From 351d7805ca3fbfba0810447c8234278006590c42 Mon Sep 17 00:00:00 2001 From: Scott Lee Date: Fri, 15 Nov 2024 14:06:02 -0800 Subject: [PATCH 001/107] [Data] [Dashboard] Fix `Rows Outputted` being zero on Ray Data Dashboard (#48745) ## Why are these changes needed? Currently, there are some cases where the `Rows Outputted` value on the Ray Job page's `Ray Data Overview` section says "0", even after the dataset execution completes. The root cause of the bug is that we clear iteration/execution metrics after the dataset completes. This was previously used to "reset" the metrics to 0 after dataset completion, so that the last emitted value would not persist on the dashboard, even after the job finishes. Now that we display rates on the dashboard, this hack is no longer needed, and we can skip the metrics clearing. Fixed result: Screenshot at Nov 14 12-11-24 ## Related issue number Closes https://github.com/ray-project/ray/issues/44635 ## Checks - [x] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [x] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [x] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [x] Unit tests - [ ] Release tests - [ ] This PR is not tested :( --------- Signed-off-by: Scott Lee --- .../_internal/execution/streaming_executor.py | 8 ++-- python/ray/data/_internal/stats.py | 39 ++----------------- python/ray/data/tests/test_stats.py | 9 ++--- 3 files changed, 10 insertions(+), 46 deletions(-) diff --git a/python/ray/data/_internal/execution/streaming_executor.py b/python/ray/data/_internal/execution/streaming_executor.py index 238f6f9421cc..ca48d7766c35 100644 --- a/python/ray/data/_internal/execution/streaming_executor.py +++ b/python/ray/data/_internal/execution/streaming_executor.py @@ -188,11 +188,9 @@ def shutdown(self, execution_completed: bool = True): state="FINISHED" if execution_completed else "FAILED", force_update=True, ) - # Clears metrics for this dataset so that they do - # not persist in the grafana dashboard after execution - StatsManager.clear_execution_metrics( - self._dataset_tag, self._get_operator_tags() - ) + # Once Dataset execution completes, mark it as complete + # and remove last cached execution stats. + StatsManager.clear_last_execution_stats(self._dataset_tag) # Freeze the stats and save it. self._final_stats = self._generate_stats() stats_summary_string = self._final_stats.to_summary().to_string( diff --git a/python/ray/data/_internal/stats.py b/python/ray/data/_internal/stats.py index 46435ec9ceb4..fc6903cd92e2 100644 --- a/python/ray/data/_internal/stats.py +++ b/python/ray/data/_internal/stats.py @@ -378,33 +378,6 @@ def update_iteration_metrics( self.iter_user_s.set(stats.iter_user_s.get(), tags) self.iter_initialize_s.set(stats.iter_initialize_s.get(), tags) - def clear_execution_metrics(self, dataset_tag: str, operator_tags: List[str]): - for operator_tag in operator_tags: - tags = self._create_tags(dataset_tag, operator_tag) - self.spilled_bytes.set(0, tags) - self.allocated_bytes.set(0, tags) - self.freed_bytes.set(0, tags) - self.current_bytes.set(0, tags) - self.output_bytes.set(0, tags) - self.output_rows.set(0, tags) - self.cpu_usage_cores.set(0, tags) - self.gpu_usage_cores.set(0, tags) - - for prom_metric in self.execution_metrics_inputs.values(): - prom_metric.set(0, tags) - - for prom_metric in self.execution_metrics_outputs.values(): - prom_metric.set(0, tags) - - for prom_metric in self.execution_metrics_tasks.values(): - prom_metric.set(0, tags) - - for prom_metric in self.execution_metrics_obj_store_memory.values(): - prom_metric.set(0, tags) - - for prom_metric in self.execution_metrics_misc.values(): - prom_metric.set(0, tags) - def register_dataset(self, job_id: str, dataset_tag: str, operator_tags: List[str]): self.datasets[dataset_tag] = { "job_id": job_id, @@ -593,19 +566,13 @@ def update_execution_metrics( self._last_execution_stats[dataset_tag] = args self._start_thread_if_not_running() - def clear_execution_metrics(self, dataset_tag: str, operator_tags: List[str]): + def clear_last_execution_stats(self, dataset_tag: str): + # After dataset completes execution, remove cached execution stats. + # Marks the dataset as finished on job page's Ray Data Overview. with self._stats_lock: if dataset_tag in self._last_execution_stats: del self._last_execution_stats[dataset_tag] - try: - self._stats_actor( - create_if_not_exists=False - ).clear_execution_metrics.remote(dataset_tag, operator_tags) - except Exception: - # Cluster may be shut down. - pass - # Iteration methods def update_iteration_metrics(self, stats: "DatasetStats", dataset_tag: str): diff --git a/python/ray/data/tests/test_stats.py b/python/ray/data/tests/test_stats.py index d8d85515092c..cb0d31f22774 100644 --- a/python/ray/data/tests/test_stats.py +++ b/python/ray/data/tests/test_stats.py @@ -1663,8 +1663,9 @@ def test_stats_manager(shutdown_only): datasets = [None] * num_threads # Mock clear methods so that _last_execution_stats and _last_iteration_stats # are not cleared. We will assert on them afterwards. - with patch.object(StatsManager, "clear_execution_metrics"), patch.object( - StatsManager, "clear_iteration_metrics" + with ( + patch.object(StatsManager, "clear_last_execution_stats"), + patch.object(StatsManager, "clear_iteration_metrics"), ): def update_stats_manager(i): @@ -1689,9 +1690,7 @@ def update_stats_manager(i): dataset_tag = create_dataset_tag(dataset._name, dataset._uuid) assert dataset_tag in StatsManager._last_execution_stats assert dataset_tag in StatsManager._last_iteration_stats - StatsManager.clear_execution_metrics( - dataset_tag, ["Input0", "ReadRange->MapBatches()1"] - ) + StatsManager.clear_last_execution_stats(dataset_tag) StatsManager.clear_iteration_metrics(dataset_tag) wait_for_condition(lambda: not StatsManager._update_thread.is_alive()) From 3435c25f8f2baaa1be171c6ae8dd40056dffed2a Mon Sep 17 00:00:00 2001 From: mohitjain2504 <87856435+mohitjain2504@users.noreply.github.com> Date: Sat, 16 Nov 2024 03:54:34 +0530 Subject: [PATCH 002/107] Update kubernetes.md (#48082) ## Why are these changes needed? Fixed typo ## Related issue number ## Checks - [ ] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :( --------- Signed-off-by: mohitjain2504 <87856435+mohitjain2504@users.noreply.github.com> Signed-off-by: Richard Liaw Co-authored-by: Richard Liaw Co-authored-by: Gene Der Su --- doc/source/serve/production-guide/kubernetes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/serve/production-guide/kubernetes.md b/doc/source/serve/production-guide/kubernetes.md index f8b55a1f116f..eab728781a77 100644 --- a/doc/source/serve/production-guide/kubernetes.md +++ b/doc/source/serve/production-guide/kubernetes.md @@ -72,7 +72,7 @@ rayservice-sample 7s $ kubectl get pods NAME READY STATUS RESTARTS AGE -ervice-sample-raycluster-454c4-worker-small-group-b6mmg 1/1 Running 0 XXs +service-sample-raycluster-454c4-worker-small-group-b6mmg 1/1 Running 0 XXs kuberay-operator-7fbdbf8c89-4lrnr 1/1 Running 0 XXs rayservice-sample-raycluster-454c4-head-krk9d 1/1 Running 0 XXs From 107a8e33df844ba36fafde92181bf6969b3b373b Mon Sep 17 00:00:00 2001 From: Dhyey Shah Date: Sat, 16 Nov 2024 00:16:00 -0500 Subject: [PATCH 003/107] [core] Change m4's to m6i's for core release tests (#48753) Signed-off-by: dayshah --- release/benchmarks/object_store.yaml | 4 ++-- .../stress_tests/placement_group_tests_compute.yaml | 7 +++---- release/nightly_tests/stress_tests/smoke_test_compute.yaml | 4 ++-- .../nightly_tests/stress_tests/stress_tests_compute.yaml | 4 ++-- .../stress_tests/stress_tests_compute_large.yaml | 4 ++-- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/release/benchmarks/object_store.yaml b/release/benchmarks/object_store.yaml index 6908c9e9bf7f..5353a1009c6b 100644 --- a/release/benchmarks/object_store.yaml +++ b/release/benchmarks/object_store.yaml @@ -5,14 +5,14 @@ max_workers: 49 head_node_type: name: head_node - instance_type: m4.16xlarge + instance_type: m6i.16xlarge resources: custom_resources: node: 1 worker_node_types: - name: worker_node - instance_type: m4.2xlarge + instance_type: m6i.2xlarge min_workers: 49 max_workers: 49 use_spot: false diff --git a/release/nightly_tests/stress_tests/placement_group_tests_compute.yaml b/release/nightly_tests/stress_tests/placement_group_tests_compute.yaml index 275b0a0a7d36..280587323949 100644 --- a/release/nightly_tests/stress_tests/placement_group_tests_compute.yaml +++ b/release/nightly_tests/stress_tests/placement_group_tests_compute.yaml @@ -12,18 +12,17 @@ aws: head_node_type: name: head_node - instance_type: m4.16xlarge + instance_type: m6i.16xlarge resources: cpu: 64 worker_node_types: - name: worker_node - instance_type: m4.large + instance_type: m6i.large min_workers: 5 max_workers: 5 use_spot: false resources: - cpu: 2 + cpu: 2 custom_resources: pg_custom: 666 - diff --git a/release/nightly_tests/stress_tests/smoke_test_compute.yaml b/release/nightly_tests/stress_tests/smoke_test_compute.yaml index 0af96e62373b..bc1844203b9f 100644 --- a/release/nightly_tests/stress_tests/smoke_test_compute.yaml +++ b/release/nightly_tests/stress_tests/smoke_test_compute.yaml @@ -12,11 +12,11 @@ aws: head_node_type: name: head_node - instance_type: m4.4xlarge + instance_type: m6i.4xlarge worker_node_types: - name: worker_node - instance_type: m4.large + instance_type: m6i.large min_workers: 4 max_workers: 4 use_spot: false diff --git a/release/nightly_tests/stress_tests/stress_tests_compute.yaml b/release/nightly_tests/stress_tests/stress_tests_compute.yaml index 5e9acaf7f7e8..abede244ae28 100644 --- a/release/nightly_tests/stress_tests/stress_tests_compute.yaml +++ b/release/nightly_tests/stress_tests/stress_tests_compute.yaml @@ -12,13 +12,13 @@ aws: head_node_type: name: head_node - instance_type: m4.16xlarge + instance_type: m6i.16xlarge resources: cpu: 64 worker_node_types: - name: worker_node - instance_type: m4.large + instance_type: m6i.large min_workers: 100 max_workers: 100 use_spot: false diff --git a/release/nightly_tests/stress_tests/stress_tests_compute_large.yaml b/release/nightly_tests/stress_tests/stress_tests_compute_large.yaml index 3784bac975be..4d8d2fdeef36 100644 --- a/release/nightly_tests/stress_tests/stress_tests_compute_large.yaml +++ b/release/nightly_tests/stress_tests/stress_tests_compute_large.yaml @@ -12,13 +12,13 @@ aws: head_node_type: name: head_node - instance_type: m4.16xlarge + instance_type: m6i.16xlarge resources: cpu: 64 worker_node_types: - name: worker_node - instance_type: m4.16xlarge + instance_type: m6i.16xlarge min_workers: 6 max_workers: 6 use_spot: false From 161849364a784442cc659fb9780f1a6adee85fce Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Fri, 15 Nov 2024 23:20:22 -0800 Subject: [PATCH 004/107] [image] remove azure packages from base image (#48762) too ancient; and there are no tests. this should remove the outdated paramiko package from the ray image Signed-off-by: Lonnie Liu --- docker/base-deps/Dockerfile | 13 ++++--------- python/ray/autoscaler/azure/example-full.yaml | 5 ++--- python/ray/autoscaler/azure/example-gpu-docker.yaml | 5 ++--- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/docker/base-deps/Dockerfile b/docker/base-deps/Dockerfile index eec1e564d8c0..5b53760712a8 100644 --- a/docker/base-deps/Dockerfile +++ b/docker/base-deps/Dockerfile @@ -13,7 +13,7 @@ ENV LANG=C.UTF-8 # TODO(ilr) $HOME seems to point to result in "" instead of "/home/ray" ENV PATH "/home/ray/anaconda3/bin:$PATH" ARG DEBIAN_FRONTEND=noninteractive -ARG PYTHON_VERSION=3.8.16 +ARG PYTHON_VERSION=3.9 ARG HOSTTYPE=${HOSTTYPE:-x86_64} ARG RAY_UID=1000 @@ -39,7 +39,7 @@ RUN sudo apt-get update -y && sudo apt-get upgrade -y \ cmake \ g++ \ zlib1g-dev \ - $(if [ "$AUTOSCALER" = "autoscaler" ]; then echo \ + $(if [[ "$AUTOSCALER" == "autoscaler" ]]; then echo \ tmux \ screen \ rsync \ @@ -74,7 +74,7 @@ RUN sudo apt-get update -y && sudo apt-get upgrade -y \ g++; fi) \ && sudo rm -rf /var/lib/apt/lists/* \ && sudo apt-get clean \ - && (if [ "$AUTOSCALER" = "autoscaler" ]; \ + && (if [[ "$AUTOSCALER" == "autoscaler" ]]; \ then $HOME/anaconda3/bin/pip --no-cache-dir install \ "redis>=3.5.0,<4.0.0" \ "six==1.13.0" \ @@ -83,12 +83,7 @@ RUN sudo apt-get update -y && sudo apt-get upgrade -y \ "cryptography==38.0.1" \ "google-api-python-client==1.7.8" \ "google-oauth" \ - "azure-cli-core==2.40.0" \ - "azure-identity==1.10.0" \ - "azure-mgmt-compute==23.1.0" \ - "azure-mgmt-network==19.0.0" \ - "azure-mgmt-resource==20.0.0" \ - "msrestazure==0.6.4"; \ + ; \ fi;) WORKDIR $HOME diff --git a/python/ray/autoscaler/azure/example-full.yaml b/python/ray/autoscaler/azure/example-full.yaml index a043603bfaaa..41d7fbfd60d1 100644 --- a/python/ray/autoscaler/azure/example-full.yaml +++ b/python/ray/autoscaler/azure/example-full.yaml @@ -164,9 +164,8 @@ setup_commands: [] # - pip install -U "ray[default] @ https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-3.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl" # Custom commands that will be run on the head node after common setup. -# NOTE: rayproject/ray-ml:latest has azure packages bundled -head_setup_commands: [] - # - pip install -U azure-cli-core==2.22.0 azure-mgmt-compute==14.0.0 azure-mgmt-msi==1.0.0 azure-mgmt-network==10.2.0 azure-mgmt-resource==13.0.0 +head_setup_commands: + - pip install -U azure-cli-core==2.29.1 azure-identity==1.7.0 azure-mgmt-compute==23.1.0 azure-mgmt-network==19.0.0 azure-mgmt-resource==20.0.0 msrestazure==0.6.4 # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] diff --git a/python/ray/autoscaler/azure/example-gpu-docker.yaml b/python/ray/autoscaler/azure/example-gpu-docker.yaml index 6faaed48fb64..3ebc763e7d26 100644 --- a/python/ray/autoscaler/azure/example-gpu-docker.yaml +++ b/python/ray/autoscaler/azure/example-gpu-docker.yaml @@ -117,9 +117,8 @@ setup_commands: [] # - pip install -U "ray[default] @ https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-3.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl" # Custom commands that will be run on the head node after common setup. -# NOTE: rayproject/ray-ml:latest has azure packages bundled -head_setup_commands: [] - # - pip install -U azure-cli-core==2.22.0 azure-mgmt-compute==14.0.0 azure-mgmt-msi==1.0.0 azure-mgmt-network==10.2.0 azure-mgmt-resource==13.0.0 +head_setup_commands: + - pip install -U azure-cli-core==2.29.1 azure-identity==1.7.0 azure-mgmt-compute==23.1.0 azure-mgmt-network==19.0.0 azure-mgmt-resource==20.0.0 msrestazure==0.6.4 # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] From ae0aae479fb2956f94fdc76614424cf9b6027809 Mon Sep 17 00:00:00 2001 From: dentiny Date: Sun, 17 Nov 2024 00:17:33 -0800 Subject: [PATCH 005/107] [core] Fix security issue for retry package (#48767) I randomly find lancedb issue: https://github.com/lancedb/lancedb/issues/1480 which discloses a high-severity CVE Considering as lancedb, ray only has one use case for `retry` package, I took the same approach as https://github.com/lancedb/lancedb/pull/1749, which names all variables better with unit and default value. --------- Signed-off-by: dentiny --- release/BUILD.bazel | 16 +++- .../ray_release/cluster_manager/minimal.py | 9 ++- release/ray_release/retry.py | 42 +++++++++++ release/ray_release/tests/test_retry.py | 75 +++++++++++++++++++ release/requirements_buildkite.in | 1 - release/requirements_buildkite.txt | 12 +-- 6 files changed, 140 insertions(+), 15 deletions(-) create mode 100644 release/ray_release/retry.py create mode 100644 release/ray_release/tests/test_retry.py diff --git a/release/BUILD.bazel b/release/BUILD.bazel index a09070d9b313..f269add55138 100644 --- a/release/BUILD.bazel +++ b/release/BUILD.bazel @@ -309,7 +309,6 @@ py_library( bk_require("pybuildkite"), bk_require("pygithub"), bk_require("requests"), - bk_require("retry"), ], ) @@ -624,3 +623,18 @@ py_test( bk_require("pytest"), ], ) + +py_test( + name = "test_retry", + size = "small", + srcs = ["ray_release/tests/test_retry.py"], + exec_compatible_with = ["//:hermetic_python"], + tags = [ + "release_unit", + "team:ci", + ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], +) diff --git a/release/ray_release/cluster_manager/minimal.py b/release/ray_release/cluster_manager/minimal.py index 8b202c247753..1cfe14c1e2f2 100644 --- a/release/ray_release/cluster_manager/minimal.py +++ b/release/ray_release/cluster_manager/minimal.py @@ -9,7 +9,7 @@ from ray_release.logger import logger from ray_release.cluster_manager.cluster_manager import ClusterManager from ray_release.util import format_link, anyscale_cluster_env_build_url -from retry import retry +from ray_release.retry import retry REPORT_S = 30.0 @@ -20,7 +20,12 @@ class MinimalClusterManager(ClusterManager): Builds app config and compute template but does not start or stop session. """ - @retry((ClusterEnvCreateError), delay=10, jitter=5, tries=2) + @retry( + init_delay_sec=10, + jitter_sec=5, + max_retry_count=2, + exceptions=(ClusterEnvCreateError,), + ) def create_cluster_env(self): assert self.cluster_env_id is None diff --git a/release/ray_release/retry.py b/release/ray_release/retry.py new file mode 100644 index 000000000000..dec0bd9be925 --- /dev/null +++ b/release/ray_release/retry.py @@ -0,0 +1,42 @@ +"""Utils on retry.""" + +import time +from functools import wraps +from typing import Tuple + +# Default configuration for retry. +_DEFAULT_MAX_RETRY_COUNT: int = 10 +_DEFAULT_INIT_DELAY_SEC: int = 1 +_DEFAULT_MAX_DELAY_SEC: int = 30 +_DEFAULT_BACKOFF: int = 2 +_DEFAULT_JITTER_SEC: int = 1 +_DEFAULT_EXCEPTIONS: Tuple[Exception] = (Exception,) + + +def retry( + max_retry_count: int = _DEFAULT_MAX_RETRY_COUNT, + init_delay_sec: int = _DEFAULT_INIT_DELAY_SEC, + max_delay_sec: int = _DEFAULT_MAX_DELAY_SEC, + backoff: int = _DEFAULT_BACKOFF, + jitter_sec: int = _DEFAULT_JITTER_SEC, + exceptions: Tuple[Exception] = _DEFAULT_EXCEPTIONS, +): + def wrapper(fn): + @wraps(fn) + def wrapped(*args, **kwargs): + for cur_retry_count in range(max_retry_count): + try: + return fn(*args, **kwargs) + except exceptions: + if cur_retry_count + 1 == max_retry_count: + raise + + sleep_sec = min( + init_delay_sec * (backoff**cur_retry_count) + jitter_sec, + max_delay_sec, + ) + time.sleep(sleep_sec) + + return wrapped + + return wrapper diff --git a/release/ray_release/tests/test_retry.py b/release/ray_release/tests/test_retry.py new file mode 100644 index 000000000000..b630e19f2dd0 --- /dev/null +++ b/release/ray_release/tests/test_retry.py @@ -0,0 +1,75 @@ +from ray_release import retry + +import sys +import pytest + + +def test_retry_with_no_error(): + invocation_count = 0 + + # Function doesn't raise exception; use a dummy value to check invocation. + @retry.retry() + def no_error_func() -> int: + nonlocal invocation_count + invocation_count += 1 + return 1 + + assert no_error_func() == 1 + assert invocation_count == 1 + + +# Test senario: exception count is less than retry count. +def test_retry_with_limited_error(): + invocation_count = 0 + + # Function doesn't raise exception; use a dummy value to check invocation. + @retry.retry(init_delay_sec=1, jitter_sec=1) + def limited_error() -> int: + nonlocal invocation_count + + invocation_count += 1 + + if invocation_count == 1: + raise Exception("Manual exception") + return 1 + + assert limited_error() == 1 + assert invocation_count == 2 + + +# Test senario: exception count exceeds retry count. +def test_retry_with_unlimited_error(): + invocation_count = 0 + + @retry.retry(init_delay_sec=1, jitter_sec=1, backoff=1, max_retry_count=3) + def unlimited_error() -> int: + nonlocal invocation_count + + invocation_count += 1 + raise Exception("Manual exception") + + with pytest.raises(Exception, match="Manual exception"): + unlimited_error() + assert invocation_count == 3 + + +def test_retry_on_certain_errors(): + invocation_count = 0 + + # Function doesn't raise exception; use a dummy value to check invocation. + @retry.retry(init_delay_sec=1, jitter_sec=1, exceptions=(KeyError,)) + def limited_error() -> int: + nonlocal invocation_count + + invocation_count += 1 + + if invocation_count == 1: + raise KeyError("Manual exception") + return 1 + + assert limited_error() == 1 + assert invocation_count == 2 + + +if __name__ == "__main__": + sys.exit(pytest.main(["-sv", __file__])) diff --git a/release/requirements_buildkite.in b/release/requirements_buildkite.in index 96d05e27bb96..0c20af4d9088 100644 --- a/release/requirements_buildkite.in +++ b/release/requirements_buildkite.in @@ -14,7 +14,6 @@ pyyaml pybuildkite PyGithub requests -retry twine == 5.0.0 docker >= 7.1.0 diff --git a/release/requirements_buildkite.txt b/release/requirements_buildkite.txt index 9bfaada37880..659db4c5e8db 100644 --- a/release/requirements_buildkite.txt +++ b/release/requirements_buildkite.txt @@ -424,9 +424,7 @@ debugpy==1.8.2 \ decorator==5.1.1 \ --hash=sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330 \ --hash=sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186 - # via - # ipython - # retry + # via ipython deprecated==1.2.14 \ --hash=sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c \ --hash=sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3 @@ -1174,10 +1172,6 @@ pure-eval==0.2.3 \ --hash=sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0 \ --hash=sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42 # via stack-data -py==1.11.0 \ - --hash=sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719 \ - --hash=sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378 - # via retry pyasn1==0.6.0 \ --hash=sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c \ --hash=sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473 @@ -1539,10 +1533,6 @@ requests-toolbelt==1.0.0 \ --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \ --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06 # via twine -retry==0.9.2 \ - --hash=sha256:ccddf89761fa2c726ab29391837d4327f819ea14d244c232a1d24c67a2f98606 \ - --hash=sha256:f8bfa8b99b69c4506d6f5bd3b0aabf77f98cdb17f3c9fc3f5ca820033336fba4 - # via -r release/requirements_buildkite.in rfc3986==2.0.0 \ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c From 7a07263e9e7eb3ffe11d3805a4c0ad55877c0aa0 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Sun, 17 Nov 2024 14:14:09 -0800 Subject: [PATCH 006/107] [vm launcher] change nightly and latest launcher test to manual (#48774) nightly is inherently unstable, and latest is just last stable release. neither of them should block releases Signed-off-by: Lonnie Liu --- release/release_tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 33dc2486eae6..9f27a1a71d93 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -4956,7 +4956,7 @@ group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ - frequency: nightly + frequency: manual team: clusters cluster: byod: {} @@ -4971,7 +4971,7 @@ group: cluster-launcher-test working_dir: ../python/ray/autoscaler/ - frequency: nightly + frequency: manual team: clusters cluster: byod: {} From 1576af11ac0cd064e0df3f8a6aa08db6160ec413 Mon Sep 17 00:00:00 2001 From: Xianyan Jia Date: Tue, 19 Nov 2024 02:23:35 +0800 Subject: [PATCH 007/107] fix NCCL_BFLOAT16 typo in TORCH_NCCL_DTYPE_MAP (#48590) fix NCCL_BFLOAT16 typo in TORCH_NCCL_DTYPE_MAP --- python/ray/util/collective/collective_group/nccl_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ray/util/collective/collective_group/nccl_util.py b/python/ray/util/collective/collective_group/nccl_util.py index 05b05ef33a27..221d5885c411 100644 --- a/python/ray/util/collective/collective_group/nccl_util.py +++ b/python/ray/util/collective/collective_group/nccl_util.py @@ -63,7 +63,7 @@ } # Older versions of cupy don't support bfloat16. - if hasattr(nccl, "NCCL_BFlOAT16"): + if hasattr(nccl, "NCCL_BFLOAT16"): TORCH_NCCL_DTYPE_MAP[torch.bfloat16] = nccl.NCCL_BFLOAT16 TORCH_NUMPY_DTYPE_MAP = { From e70b37a435122609f88e02ce3377b8dd7f780e6b Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:48:35 -0800 Subject: [PATCH 008/107] [image] comply with constraint file in base image (#48764) also uses 1.3 syntax and heredoc for multiline commands Signed-off-by: Lonnie Liu --- ci/docker/ray-ml.cpu.base.wanda.yaml | 1 - ci/docker/ray-ml.cuda.base.wanda.yaml | 1 - ci/docker/ray.cpu.base.aarch64.wanda.yaml | 2 + ci/docker/ray.cpu.base.wanda.yaml | 2 + ci/docker/ray.cuda.base.aarch64.wanda.yaml | 2 + ci/docker/ray.cuda.base.wanda.yaml | 2 + docker/base-deps/Dockerfile | 162 +++++++++++------- docker/ray-ml/Dockerfile | 14 +- .../ray-ml/install-ml-docker-requirements.sh | 3 +- docker/ray/Dockerfile | 3 +- 10 files changed, 117 insertions(+), 75 deletions(-) diff --git a/ci/docker/ray-ml.cpu.base.wanda.yaml b/ci/docker/ray-ml.cpu.base.wanda.yaml index 53dc0700a973..29838a2a3c98 100644 --- a/ci/docker/ray-ml.cpu.base.wanda.yaml +++ b/ci/docker/ray-ml.cpu.base.wanda.yaml @@ -3,7 +3,6 @@ froms: ["cr.ray.io/rayproject/ray-py$PYTHON_VERSION-cpu-base"] dockerfile: docker/ray-ml/Dockerfile srcs: - python/requirements.txt - - python/requirements_compiled.txt - python/requirements/ml/dl-cpu-requirements.txt - python/requirements/ml/dl-gpu-requirements.txt - python/requirements/ml/core-requirements.txt diff --git a/ci/docker/ray-ml.cuda.base.wanda.yaml b/ci/docker/ray-ml.cuda.base.wanda.yaml index 723374e90210..b3aa908c4b5f 100644 --- a/ci/docker/ray-ml.cuda.base.wanda.yaml +++ b/ci/docker/ray-ml.cuda.base.wanda.yaml @@ -3,7 +3,6 @@ froms: ["cr.ray.io/rayproject/ray-py$PYTHON_VERSION-cu$CUDA_VERSION-base"] dockerfile: docker/ray-ml/Dockerfile srcs: - python/requirements.txt - - python/requirements_compiled.txt - python/requirements/ml/dl-cpu-requirements.txt - python/requirements/ml/dl-gpu-requirements.txt - python/requirements/ml/core-requirements.txt diff --git a/ci/docker/ray.cpu.base.aarch64.wanda.yaml b/ci/docker/ray.cpu.base.aarch64.wanda.yaml index 43321ccb7ba5..1726fb261825 100644 --- a/ci/docker/ray.cpu.base.aarch64.wanda.yaml +++ b/ci/docker/ray.cpu.base.aarch64.wanda.yaml @@ -1,6 +1,8 @@ name: "ray-py$PYTHON_VERSION-cpu-base-aarch64" froms: ["ubuntu:22.04"] dockerfile: docker/base-deps/Dockerfile +srcs: + - python/requirements_compiled.txt build_args: - PYTHON_VERSION - BASE_IMAGE=ubuntu:22.04 diff --git a/ci/docker/ray.cpu.base.wanda.yaml b/ci/docker/ray.cpu.base.wanda.yaml index 4310a1820957..895605ed8f71 100644 --- a/ci/docker/ray.cpu.base.wanda.yaml +++ b/ci/docker/ray.cpu.base.wanda.yaml @@ -1,6 +1,8 @@ name: "ray-py$PYTHON_VERSION-cpu-base" froms: ["ubuntu:22.04"] dockerfile: docker/base-deps/Dockerfile +srcs: + - python/requirements_compiled.txt build_args: - PYTHON_VERSION - BASE_IMAGE=ubuntu:22.04 diff --git a/ci/docker/ray.cuda.base.aarch64.wanda.yaml b/ci/docker/ray.cuda.base.aarch64.wanda.yaml index 51fe8a870814..1d1d6df12787 100644 --- a/ci/docker/ray.cuda.base.aarch64.wanda.yaml +++ b/ci/docker/ray.cuda.base.aarch64.wanda.yaml @@ -1,6 +1,8 @@ name: "ray-py$PYTHON_VERSION-cu$CUDA_VERSION-base-aarch64" froms: ["nvidia/cuda:$CUDA_VERSION-devel-ubuntu22.04"] dockerfile: docker/base-deps/Dockerfile +srcs: + - python/requirements_compiled.txt build_args: - PYTHON_VERSION - BASE_IMAGE=nvidia/cuda:$CUDA_VERSION-devel-ubuntu22.04 diff --git a/ci/docker/ray.cuda.base.wanda.yaml b/ci/docker/ray.cuda.base.wanda.yaml index 3b2cbf4c3081..0bcd7611c921 100644 --- a/ci/docker/ray.cuda.base.wanda.yaml +++ b/ci/docker/ray.cuda.base.wanda.yaml @@ -1,6 +1,8 @@ name: "ray-py$PYTHON_VERSION-cu$CUDA_VERSION-base" froms: ["nvidia/cuda:$CUDA_VERSION-devel-ubuntu22.04"] dockerfile: docker/base-deps/Dockerfile +srcs: + - python/requirements_compiled.txt build_args: - PYTHON_VERSION - BASE_IMAGE=nvidia/cuda:$CUDA_VERSION-devel-ubuntu22.04 diff --git a/docker/base-deps/Dockerfile b/docker/base-deps/Dockerfile index 5b53760712a8..496d8a05e58a 100644 --- a/docker/base-deps/Dockerfile +++ b/docker/base-deps/Dockerfile @@ -1,3 +1,5 @@ +# syntax=docker/dockerfile:1.3-labs + # The base-deps Docker image installs main libraries needed to run Ray # The GPU options are NVIDIA CUDA developer images. @@ -19,71 +21,109 @@ ARG HOSTTYPE=${HOSTTYPE:-x86_64} ARG RAY_UID=1000 ARG RAY_GID=100 -RUN apt-get update -y \ - && apt-get install -y sudo tzdata \ - && useradd -ms /bin/bash -d /home/ray ray --uid $RAY_UID --gid $RAY_GID \ - && usermod -aG sudo ray \ - && echo 'ray ALL=NOPASSWD: ALL' >> /etc/sudoers \ - && rm -rf /var/lib/apt/lists/* \ - && apt-get clean +RUN <> /etc/sudoers + +EOF USER $RAY_UID ENV HOME=/home/ray +COPY python/requirements_compiled.txt /home/ray/requirements_compiled.txt + SHELL ["/bin/bash", "-c"] -RUN sudo apt-get update -y && sudo apt-get upgrade -y \ - && sudo apt-get install -y \ - git \ - libjemalloc-dev \ - wget \ - cmake \ - g++ \ - zlib1g-dev \ - $(if [[ "$AUTOSCALER" == "autoscaler" ]]; then echo \ - tmux \ - screen \ - rsync \ - netbase \ - openssh-client \ - gnupg; fi) \ - && wget --quiet \ - "https://repo.anaconda.com/miniconda/Miniconda3-py311_24.4.0-0-Linux-${HOSTTYPE}.sh" \ - -O /tmp/miniconda.sh \ - && /bin/bash /tmp/miniconda.sh -b -u -p $HOME/anaconda3 \ - && $HOME/anaconda3/bin/conda init \ - && echo 'export PATH=$HOME/anaconda3/bin:$PATH' >> /home/ray/.bashrc \ - && rm /tmp/miniconda.sh \ - && $HOME/anaconda3/bin/conda install -y libgcc-ng python=$PYTHON_VERSION \ - && $HOME/anaconda3/bin/conda install -y -c conda-forge libffi=3.4.2 \ - && $HOME/anaconda3/bin/conda clean -y --all \ - && $HOME/anaconda3/bin/pip install --no-cache-dir \ - flatbuffers \ - cython==0.29.37 \ - # Necessary for Dataset to work properly. - numpy\>=1.20 \ - psutil \ - # Required a recent version of setuptools to be compatible with python 3.12+. - setuptools==71.1.0 \ - # To avoid the following error on Jenkins: - # AttributeError: 'numpy.ufunc' object has no attribute '__module__' - && $HOME/anaconda3/bin/pip uninstall -y dask \ - # We install cmake temporarily to get psutil - && sudo apt-get autoremove -y cmake zlib1g-dev \ - # We keep g++ on GPU images, because uninstalling removes CUDA Devel tooling - $(if [[ "$BASE_IMAGE" == "ubuntu:22.04" && "$HOSTTYPE" == "x86_64" ]]; then echo \ - g++; fi) \ - && sudo rm -rf /var/lib/apt/lists/* \ - && sudo apt-get clean \ - && (if [[ "$AUTOSCALER" == "autoscaler" ]]; \ - then $HOME/anaconda3/bin/pip --no-cache-dir install \ - "redis>=3.5.0,<4.0.0" \ - "six==1.13.0" \ - "boto3==1.26.76" \ - "pyOpenSSL==22.1.0" \ - "cryptography==38.0.1" \ - "google-api-python-client==1.7.8" \ - "google-oauth" \ - ; \ - fi;) + +RUN <> /home/ray/.bashrc +rm /tmp/miniconda.sh +$HOME/anaconda3/bin/conda install -y libgcc-ng python=$PYTHON_VERSION +$HOME/anaconda3/bin/conda install -y -c conda-forge libffi=3.4.2 +$HOME/anaconda3/bin/conda clean -y --all + +PIP_PKGS=( + # Required a recent version of setuptools to be compatible with python 3.12+. + setuptools==71.1.0 + + flatbuffers + cython + numpy # Necessary for Dataset to work properly. + psutil +) +if [[ "$AUTOSCALER" == "autoscaler" ]]; then + PIP_PKGS+=( + redis + six + boto3 + pyopenssl + cryptography + google-api-python-client + google-oauth + ) +fi + +$HOME/anaconda3/bin/pip install --no-cache-dir \ + -c $HOME/requirements_compiled.txt \ + "${PIP_PKGS[@]}" + +# To avoid the following error on Jenkins: +# AttributeError: 'numpy.ufunc' object has no attribute '__module__' +$HOME/anaconda3/bin/pip uninstall -y dask + +# We install cmake temporarily to get psutil +sudo apt-get autoremove -y cmake zlib1g-dev + +# We keep g++ on GPU images, because uninstalling removes CUDA Devel tooling +if [[ "$BASE_IMAGE" == "ubuntu:22.04" && "$HOSTTYPE" == "x86_64" ]]; then + sudo apt-get autoremove -y g++ +fi + +sudo rm -rf /var/lib/apt/lists/* +sudo apt-get clean + +EOF WORKDIR $HOME diff --git a/docker/ray-ml/Dockerfile b/docker/ray-ml/Dockerfile index 67ee790389a6..42743924a118 100644 --- a/docker/ray-ml/Dockerfile +++ b/docker/ray-ml/Dockerfile @@ -1,19 +1,13 @@ +# syntax=docker/dockerfile:1.3-labs + ARG BASE_IMAGE ARG FULL_BASE_IMAGE=rayproject/ray:nightly"$BASE_IMAGE" FROM "$FULL_BASE_IMAGE" -# The python/* paths only exist in civ2, so we put them as non-first arguments. Docker -# will ignore non-existent paths if they are non-first arguments. -# -# TODO(can): simplify this once civ1 is completely deprecated. -COPY *requirements.txt \ - python/*requirements.txt \ +COPY python/*requirements.txt \ python/requirements/ml/*requirements.txt \ python/requirements/docker/*requirements.txt ./ -COPY *requirements_compiled.txt \ - python/*requirements_compiled.txt ./ -COPY *install-ml-docker-requirements.sh \ - docker/ray-ml/*install-ml-docker-requirements.sh ./ +COPY docker/ray-ml/install-ml-docker-requirements.sh ./ RUN sudo chmod +x install-ml-docker-requirements.sh \ && ./install-ml-docker-requirements.sh diff --git a/docker/ray-ml/install-ml-docker-requirements.sh b/docker/ray-ml/install-ml-docker-requirements.sh index d6744a13d2aa..0763b4d9589e 100755 --- a/docker/ray-ml/install-ml-docker-requirements.sh +++ b/docker/ray-ml/install-ml-docker-requirements.sh @@ -52,7 +52,8 @@ pip --no-cache-dir install \ sudo apt-get clean -sudo rm ./*requirements*.txt +# requirements_compiled.txt will be kept. +sudo rm ./*requirements.txt requirements_compiled_gpu.txt # MuJoCo Installation. export MUJOCO_GL=osmesa diff --git a/docker/ray/Dockerfile b/docker/ray/Dockerfile index 888183223609..9e54302603c9 100644 --- a/docker/ray/Dockerfile +++ b/docker/ray/Dockerfile @@ -1,3 +1,5 @@ +# syntax=docker/dockerfile:1.3-labs + ARG BASE_IMAGE ARG FULL_BASE_IMAGE=rayproject/ray-deps:nightly"$BASE_IMAGE" FROM $FULL_BASE_IMAGE @@ -6,7 +8,6 @@ ARG WHEEL_PATH ARG FIND_LINKS_PATH=".whl" ARG CONSTRAINTS_FILE="requirements_compiled.txt" -COPY requirements_compiled.txt ./ COPY $WHEEL_PATH . COPY $FIND_LINKS_PATH $FIND_LINKS_PATH From 36682c1738bc8b25209eb79771d0616832c94d85 Mon Sep 17 00:00:00 2001 From: Shiyan Xu <2701446+xushiyan@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:12:38 -1000 Subject: [PATCH 009/107] [data][api] implement `HudiDataSource` (#46273) Support read from Hudi table into Ray dataset. --------- Signed-off-by: Shiyan Xu <2701446+xushiyan@users.noreply.github.com> --- .../config/vocabularies/Data/accept.txt | 1 + doc/source/data/api/input_output.rst | 9 ++ python/ray/data/BUILD | 8 ++ python/ray/data/__init__.py | 2 + .../_internal/datasource/hudi_datasource.py | 91 ++++++++++++++ .../data/hudi-tables/0.x_cow_partitioned.zip | Bin 0 -> 46757 bytes python/ray/data/read_api.py | 53 ++++++++ python/ray/data/tests/test_hudi.py | 114 ++++++++++++++++++ .../ml/data-test-requirements.txt | 3 +- python/requirements_compiled.txt | 2 + 10 files changed, 282 insertions(+), 1 deletion(-) create mode 100644 python/ray/data/_internal/datasource/hudi_datasource.py create mode 100644 python/ray/data/examples/data/hudi-tables/0.x_cow_partitioned.zip create mode 100644 python/ray/data/tests/test_hudi.py diff --git a/.vale/styles/config/vocabularies/Data/accept.txt b/.vale/styles/config/vocabularies/Data/accept.txt index 8ec78bd70bce..1104d6f3cd41 100644 --- a/.vale/styles/config/vocabularies/Data/accept.txt +++ b/.vale/styles/config/vocabularies/Data/accept.txt @@ -7,6 +7,7 @@ Data('s)? [Dd]iscretizer(s)? dtype [Gg]roupby +[Hh]udi [Ii]ndexable [Ii]ngest [Ii]nqueue(s)? diff --git a/doc/source/data/api/input_output.rst b/doc/source/data/api/input_output.rst index bb8d791d98b2..51bd7ecedb13 100644 --- a/doc/source/data/api/input_output.rst +++ b/doc/source/data/api/input_output.rst @@ -186,6 +186,15 @@ Delta Sharing read_delta_sharing_tables +Hudi +---- + +.. autosummary:: + :nosignatures: + :toctree: doc/ + + read_hudi + Iceberg ------- diff --git a/python/ray/data/BUILD b/python/ray/data/BUILD index d232ab352ba0..21b4e0d829b6 100644 --- a/python/ray/data/BUILD +++ b/python/ray/data/BUILD @@ -225,6 +225,14 @@ py_test( deps = ["//:ray_lib", ":conftest"], ) +py_test( + name = "test_hudi", + size = "small", + srcs = ["tests/test_hudi.py"], + tags = ["team:data", "exclusive"], + deps = ["//:ray_lib", ":conftest"], +) + py_test( name = "test_image", size = "small", diff --git a/python/ray/data/__init__.py b/python/ray/data/__init__.py index 89d531aa2ee5..5883ae6c542c 100644 --- a/python/ray/data/__init__.py +++ b/python/ray/data/__init__.py @@ -48,6 +48,7 @@ read_databricks_tables, read_datasource, read_delta_sharing_tables, + read_hudi, read_iceberg, read_images, read_json, @@ -139,6 +140,7 @@ "read_csv", "read_datasource", "read_delta_sharing_tables", + "read_hudi", "read_iceberg", "read_images", "read_json", diff --git a/python/ray/data/_internal/datasource/hudi_datasource.py b/python/ray/data/_internal/datasource/hudi_datasource.py new file mode 100644 index 000000000000..828d9baada7f --- /dev/null +++ b/python/ray/data/_internal/datasource/hudi_datasource.py @@ -0,0 +1,91 @@ +import logging +import os +from typing import Dict, Iterator, List, Optional + +from ray.data._internal.util import _check_import +from ray.data.block import BlockMetadata +from ray.data.datasource.datasource import Datasource, ReadTask + +logger = logging.getLogger(__name__) + + +class HudiDatasource(Datasource): + """Hudi datasource, for reading Apache Hudi table.""" + + def __init__( + self, + table_uri: str, + storage_options: Optional[Dict[str, str]] = None, + ): + _check_import(self, module="hudi", package="hudi-python") + + self._table_uri = table_uri + self._storage_options = storage_options + + def get_read_tasks(self, parallelism: int) -> List["ReadTask"]: + import pyarrow + from hudi import HudiTable + + def _perform_read( + table_uri: str, + base_file_paths: List[str], + options: Dict[str, str], + ) -> Iterator["pyarrow.Table"]: + from hudi import HudiFileGroupReader + + for p in base_file_paths: + file_group_reader = HudiFileGroupReader(table_uri, options) + batch = file_group_reader.read_file_slice_by_base_file_path(p) + yield pyarrow.Table.from_batches([batch]) + + hudi_table = HudiTable(self._table_uri, self._storage_options) + + reader_options = { + **hudi_table.storage_options(), + **hudi_table.hudi_options(), + } + + schema = hudi_table.get_schema() + read_tasks = [] + for file_slices_split in hudi_table.split_file_slices(parallelism): + if len(file_slices_split) == 0: + # when the table is empty, this will be an empty split + continue + + num_rows = 0 + relative_paths = [] + input_files = [] + size_bytes = 0 + for file_slice in file_slices_split: + # A file slice in a Hudi table is a logical group of data files + # within a physical partition. Records stored in a file slice + # are associated with a commit on the Hudi table's timeline. + # For more info, see https://hudi.apache.org/docs/file_layouts + num_rows += file_slice.num_records + relative_path = file_slice.base_file_relative_path() + relative_paths.append(relative_path) + full_path = os.path.join(self._table_uri, relative_path) + input_files.append(full_path) + size_bytes += file_slice.base_file_size + + metadata = BlockMetadata( + num_rows=num_rows, + schema=schema, + input_files=input_files, + size_bytes=size_bytes, + exec_stats=None, + ) + + read_task = ReadTask( + read_fn=lambda paths=relative_paths: _perform_read( + self._table_uri, paths, reader_options + ), + metadata=metadata, + ) + read_tasks.append(read_task) + + return read_tasks + + def estimate_inmemory_data_size(self) -> Optional[int]: + # TODO(xushiyan) add APIs to provide estimated in-memory size + return None diff --git a/python/ray/data/examples/data/hudi-tables/0.x_cow_partitioned.zip b/python/ray/data/examples/data/hudi-tables/0.x_cow_partitioned.zip new file mode 100644 index 0000000000000000000000000000000000000000..9f78c06de94524454782e38dcc6bd71418f5aaa6 GIT binary patch literal 46757 zcmb@u19)ZG@;)4PCmq|i%?>(A$5zL-ZFOvRY}-c1wr$(?KRt72=1$+4i{JO{=UF-X z?3`0oYp*)1-g>LfkrD+4`2g_an9}i5{N>~ye;@%c0c@>}ENwJwwX{w3XyoO<0f5^+ zLF%(=LF(H%LI41QTmu3Cfc$v5)bCPiVSWE31@R{-I)-}Y=2}L-O6$#p(I0vne<2O= zO)A(gHcfLb@F=`J(|jA$evyYvSpJh9 z&LKMJuegu^ZOvtd8}CJeA;7&ONzMQ>3GF|D6}+-ght)074=^$vv(y@IQ-0Cxm+UqZ zBNXW6A$9EF)YH|55wsAeGATM|aMJGOt*iulQYq{#-CG0waWj53fBNl~s)UjxT7v@s zROh|}Q2874nOSuibQl=fsPviX>8KcWbXlp`^jNj2beMH@S@pE(X?1B?sc1DAsp#n6 z9_VT58EF}5>FMYhXc?I385pSFrfp@XXZy=s+y8onj+fm`)%4H+&L(9<(7aja8fs}f z^NY1a29J-;_VxJ}DvZwwAf*XNx80M?6&?WV>N*Hf5fCap1yF6+xk(cGz}Da+yL=6K zZNYe?`9jbc4f|eTz1CLUQx?ly^p2O}R-imuDUV%NFS$mYMjlfyQ*-+#iXO^NAic{; zzB4~JIRGjguHXyV=et?c{e%q4}>$#ZAftNTpkt`VgA z1Mh9vJV(j8=H0x%$LsMVOeBfNLT#S%jp@pFJA<=Yv&A$eW&|eTq#TUD>m+5jGHzT-oNv( zT&{EAcs&!makrS)C{?|g*}9>jZd%*8EWNuHt!ueh#%Vt6g;9O%^GlFzAv=P?aoM_8 zh#g8U-(QtGkbqyy38_lGnMLQR`mY;qF?PvgGYWj)85C^)LnM+%i7m@i!H?U2#98q%nLq z(#F>a@UubC(ea(IETBa$`LI%#i7Cg`@?_Ubl9kj;QuscZlO-K>n>grx9V2Ua+8@Ej zf72%JLrzDR;}W@9A$Fyyd7qFLBRck|+y_HNB*xh|Vgo-WF>$1eX{$U58Z|q~%^~Yy zJF37=3<3IXLuQ^*sl*~G>D9{2?ekj4B;|#PWmLu*N>(^6zD%x2Nz(!olLM7CJdJb; zI8|H4&5dsG!H?2zsspF7{()T(20ruY$0v;+0MHr%2BJ23(KPG+ZGf)EXlVDdG_-+qEJ{?oKJ(AgKq-ia8{|Vt;mvtQd{4O zqd3WDB8DF)d5G;|SYf@tmQOQLKrNenHt;p^u3d(TvB7f1kWAZYXzcQAbkx+iYznvA z4W-%GDaf~7g6~Fg?wFS291o8{x%RE&>dr7ILbyRzqLhIB8;c z56<=03GB`Zlxs?cO`|SZo=P1Fl3ccjSq5014j&6`UQ0L>Jh+-%J1w}Iqnz#QQbwDf zuUZBmswT+Gy4#zl@9T&*o|;C<%DQ9k4CaU!205B-MyYMNxW*KmmcJ-(Z@s_$LgJC0 z{3JQquc0neGqxSq?z&Gy!xrSW=D$7E6x%x)btf*H*-}Dwp2QH_%QA0hbNSgyM{p-s z40DR^dEz*r%!z)*Em89LK*+)N-Ea{MI-S$y-V(!@baH);4kfwXMS@K3{YuZGvS^Y; zkn(J;L~M|3gUn%f34yO6BlmpcP2-~SAcnZBp=X@}M=87l>ME)Sb{aa?u&&@{3#+WH z)!~w`^6ZNJ<1q?Ntm(tB&CxtLO`y)`_37h%#ctASRXCTtQ6>CG_VdQR$f3Qhjf`#a z-91T;=PqowyCDRptMl#NYWS42cyfQtmzub=P6E#RUJIDSRr)6D`SHNOii?Ms%neI6 zQPtQ{f+*DK)XQygWMr*+*iSM}-1S|f6ey?E%vK%7JLD)QXo%&;2rjZNTR3FH@}<9P-HfvD#l<|M?nIRmE@m zNt&+zY@Flf+^>3nded6DX|dfKWcrH(v%>Q~Sq1=*yv97!_!n;m2%G!!yxK6j{QfKs z0=4ir&B>O(NeO%3y7SlO{HkP)kV)>h`Qu{U-6RO!}K^B&9#i zGZ}Vfx)I>cgZ6h#zrXYAW)BYNPit&!s8#|2u-0ni_V)1Wv6j6-Lka-UW_3Q2&bGuz z7fJKEQ^)swT{}97bjRN!gqg*^K$kp4j6^o2pj21Ep|Hq=l{2{*J+CirOjiR7^0*PZFx^Y(Qj)kqd%GKZ z2M4vp;}OD-S*fI}-_({b?t*YxuUgYzR6^V3pA{TlApUzy{15c{A2IP?<8`J z{uL(bSeTg^*-~5Uy@5p=TRq)>hmHRLu|M*HdfVeC&o@`00tNse`Wt<}LU3wJYYR)g zHx#aC^NaY_H#quZ`~O~j8M<|sn22@J+N->nPVeQ>|?crAwNy{HtIX|HazJue9jVfEK0r$ecy0&ZU3Ch zl&*=d*ByfJ8OF+N@JJNkitAd$bv^;N;I)g$iP=1nPIK9Ed+}WyAFRKeH{L}j zshCL+_fJYU**~ShvtvEoc~+8*HqB` z_&Wdq%D-9P-~EY^k&TX)nU#V1SAY6tnLobegfGeelF8-BsaPz$E%RyFCwF4CKC`i2 z8Dcy$c%VW~Sj2`K;fjN~gw1sNE{`-FAVNyh$%p7Y*EpLeERx42b(n2Mquc~U(R(#I zD2PxyPiL6sW*#VY&B*Kz5FH=ziyjIEAYFb_kKYM;2Dlf*aJ)39iSIA$T1+OFU6(4yCmkF~Po-AjA5T;WA0 zOA?BV&;XOph3Uo8nf0=&IJF%5r=Dt1eomOzE$&TMatDI8TQ-|Xvk78af)(cn6wwH* zo{Cp#-z-H2b!QFgfbRKBR#18!v0cSJaYHA0m}2)XY~e*?`w%lguG)kH*qsi7M$DLv z7~PP?pxwPAjkMR&aCw(TKU_7cu2ic>rhtByMC82=HJeBKR>@#yWBqiptQEycNMJv* zk9sAfI|NhbX&sOJ2eSi()p5O2(Dn^N4oO?^yhcotxyxpg4nXc z_|J(Ax$!8PgJ-2kFYnNku-0}IB$y-Sffjm&POSsR`9h-AWI`V3;8{P;RBJm$8a{R4 zHFQ8sUI;A+38O8s)W=5CuT|5AOt1o7Cvs@F8#+X=67b}pSGBew}~+REA@07FKV&K z3fp^nkLWj&i!Ti$%Sk*8)NYT$AEgarg*&!Iibo!)93_$j^|UNXc44zUWw-zY?phJ* z^yHts9uEKH$cXO5H2p;}V=LxC5CS4DR(UgYB@{bl_8uz6DWgEQgLZnM%q9F1DbvSh z1-%&@T%Qn+EQl0FN-X^oV-DU1sy_le38Z!qki~bQl7i+#c$Ta1+i(5ApO;9WEit>nJ8o}$ zmK`qem7nmtR7D@N7abxO9i(30%8(4oB1|F#hBHv6XwEUoDCE8oUs{^iPRDt#a%6Vk zAytRc7QM50(R>NRqP>YOFRSkf)2kZvy4Dbpw{zAq;?N}_(#-`!wIgS=JY^3@4{Mb% zU@b7eix+s4Uwggx^mB&^A$69} ztfcb|s4v@0W&;K*>VTEBRv>s>WMDy?`1~i#1F+djLTtTCZT-*!F2|eCgco4G>loV% zPeiWN`VN{l_0wfZ;`Z-lWGa%xZsmd%5e@Q%DqP;{L1#ktK~_ZD05M>mX~qw!-#c=9 zatPqLC#gn3zPeJQ*nE^?TiR0)*_M7pcA3JyxNbqNW*kdcFPgrhDW4ktTKZ)d%>lb` zWQ|jT^HHjE%7x|HmL(e)9FUH;kk>pD3%xdooV;?t8oS>3M=DJ7=Hi-lDEB?ztnrz;SC6az5#`w$=H7Z zp1%VHBXfOIBLl-<2}<_Ah6=~gqN1Ij5Pff-k?;=RO$ehGiY|hI`i^~34TwJ65_nI- z(7E-+bgsK%d_6rL zt8vTaeFc+=N)e&t*c4R1XP9w-WxFV4;RZOTeim#cq5E^Yq&0=hNvbN} zsU)Q`C0l8UVBIGf<)M@xGQSZ=l&)_XmF*+!%osnuMZ6EI7H>;lfZ?2LVbE7Y<%Xf+ zxS+gP-GjR3lhdkCoqC=Z<-jm-ovPuNA~byJ7LI0OqTt6R!hysoz2F)KAuSSlt9u1IdQ2_1}mu}-V+`*w*dZqEWrG`SYY$U;hSmwml5XYs=|MC zn%}qnKRL}0_Wmyz{J*yz>u;>rveq#)ve(o7Kbrq{Vt*?){3`ZG!t-zO!_QTczssq9 z7yH|1>ld*7Z^|Zrog#|;&CU21@w9(F=xC|mGAdiGzf2teCbRovzxYx8z~O$~6MPQ< zkbn;W!291?@q4E6yD$8c6~EkpA6VeW_zBo7E3I0fv%$KbYG|EXJHW#lCdmz`z{?+TEo3Q4$rNmnQA8a9Za<_&TS@~v!wNI&gZfCR3pownb#xg zIAzsqDfpo;ub3W5a@e&ToSmOqHd%oom{PHaGM323cPRQH|E5mshqv`m_k9#N!w%hI8+|U!sAS=1RMawBZihqibbPgVwrCyiXrVlqa4tE} z5k%m#%4>J~6s7kU9(p2t$yP6BR!@Vrg$ZPvv>Az7Vk{Ck$!S?5TEqK^S#m}Bw>s{* zD&iyhIJS$I%VHed16$PZ=Ogo=w}?U~;l{3^|u;hc>;IYr`y>5$Z~A%tXiLLfxY1wG`l4Ns(2XHDkF;P6Kr^fk|U>p&?beujxxy%r1Q0w>+-z#ZjZ)C zoXdh=OtR|Z=ao!eaT>FiZDDuHp3)BOn)1g>EKm@wv_#CqwY>TY>=-4m#K*}gsz4m4 zB5EhFkmNmpg#!p6k)Ff$J3jDaBji#fEnp#T9ZHF9Babm=`&}Rt>_>{+2Fw#I6PRZ< z(v>c!z{XySWrv_5>D9}1fC_a$`7964Z)R`V*xRe>-n0ZRp^v&FPN2H3o-z}rqdpu4)Owy4*+IL zF`=TZkh2?%s$scRZHUbj2{2-Wge7*rFy?rkn49s5Rt`0ajHHMSA&g|cK^ag1$c4h^ zePEMv(TF%X>aJS&uQ5^zxzB3^yf4Ln$#m? z6)*)4Znd9Dr?**k52(i?P7{a8P&NKoJR&%HRYd6yNBqZBvpbD z12~DQ5JHP8LY2kN>a4+z-bdCRq+$%SZ2CEFjb@@j#MByX-Ah9T-#cy(t0~|U9iQo< zs_~{|5tqwWXFfCZzNZJg9Oi&2IdWHXU{1U@PjiL}Ck{Hf*KBO63*4lBWV&1;8g^@Z zsU-P~U_Spv_?vhpVe&2-jNuh`KOZpi87c-t=#xW7t2TsW&2}ma$4FSDP2UJrn@VX( z!lnZ!>KJaI_H(CzR@Y9K0XFIUS#gFdNiBps6YhLbX^?-vo3YS}m2%eW-pp~PM*V9C zlYC)y!&QuQ7BU>H|QA}Y<~%~SdYv+X~x7bjd|ld!`&MMe{r=w9r51! zJBWkHgBWp3xUBd0O|kJ~x>|yETFMRwrG=1rEpS2i5m*uCnjQ4g7<~x@lLaAu1AwaI z3ov+Vx7DGAnzo%xZFHbp_(5T<=fR~rPr(Kc9;M3Xp)A+ShHG*dn>gx{m|4g8aBI7d z4<1Uvrr~9?a7o(lfx{>=qm*n%m6Cv#^ysvNkPw#w;~t}K0!Mf9mRPXR>1Z7EpiTP6 zUFbcpONsUyllbZ#7!$JR_a2;CaZAhQi0bM#xlAUjLkmyo2Rudm!Vw+*!quHBzXa?!tSee-~$4FgKb&mU^gYMbmR9-}BSl4}po z_VruG)HT#o!h2R}1b5@&cD-kku08%hRX;-bKhrV)HIVqXA^ax>`bWpYkMUD5{{u+; zz>q-oEZ7fkNryh({{STZjAm&63}^lwJ-w_XYJtH5+p?dA8gU{@bsA~nM6x;#fOlRzN&ASgfyq_1k z#A~!Js7e7H^*OBZ)oE7fa*2`golV>1Z=oO8Y*Dl_9V@# zBic^fl}mr=`Yk+)T_$-ULaHK9=f()yi#jVbNgIW?yz`xr-$r#l77X}dBb?KTrWP}x}qSVeb;v={d?6e zQ@mw4@||3-ZREL#a0H%UVj02;U%NCMa_HqCmqaLah3C2vAv?Y*-B%p0-DX$V9h%Ox zLA|=d@v_q|h{D^(<}g}b``~PeJ7GJ9(=pP~%PIkbu7dK@X@b#On)wF~0|tbQyTZh# zINqtoIg8CArI0AIQOD@P+Z)S-6@;Ewj9^m8r0A)3>yCXX@I_TxPsH%z z$dGLt0iS%yuT2x+H-n0n-GcO_699ig_zVC$_eGl}S?yb@q_%5!X&+a!afh}p+bgb6I1WnK6yxQk0Tq_ZP- zfl5Yk=CZ=$J=-?*@r(B%3U}ryH~hR9Wvj%Abw18snN8P8V%0WJat4|$DZ6z2Co^^y zSt+Miu|{Z#q>Y2%qyA6_wO}5WQk^#(1nO{C7y#@cS%g_+Vi?`sPhaW{| zkF9)v>f73jj6DM=XYEu^*LF~m@4h)2 z+127xkb)WljKafI)c4Ns0|YUzne1 zdO=Q!VHI`15_GCks_(P%u&-4M2ymQMoWN*EmmyWE9!4;YdtJtzvWhyBNUT0vx%GId zx+cYJhjF8selAmcOX_uaMW!D|s|)j%C#Gb3idfnn<(1V8?kStBol5`}3oT^unOZE1 zPOV}`qy>Xdlv&ArtaetyWKW{q!WqWbghq~_?unJ!D*K%B216fTZs5zbJy-H zj_>IKrmGjE8gAyCgPluxqLmkzFqgqH%dsyS*E>*R?9T5U7+VgwYE<+J z5ZG-AKDm;GU-<%>&r>je!@nS`XT>Q`J1n*D%(Q84^9XJsOqr(9^=hnbkbWaph~r#f ze3vR`;nOv@$?gc9&$Hfwe>e}{v?UT+8emA959q!3O3|Ef6ypg7{Td_^H9#7ii=`}& zu5P4C`&M;tSh+C5I4BBx3O9Dk`N)MZD@YO_Y>NaT`^B-x*QsJ*3eFp{UgfrJ1X+*X zO7E`&MxY{r4&C@3I(`Uq?s)gi%rm^lIwz*NzXpHuZ=aRVFeP$(n6KC~Psi1fp?j;S zX=>K5oG|?dEg#pDK|yA|1F>kQ5X|b})YQPE9ho5Z0A6xQAEfubkT=&A0ghd3%O@b! z7p*40qqDL(KEG~C_v^Ks)pi2|(2eYavDJ2kXwH7>0W(as^swPv%U#pEy1UJ@q_5jp zSp*!-AyZm-`cUI+Iy)$FPNcxcCjJ)v1vCu9%SK+G=s8qZ0CM|69E(Ii~%3RpDMo=t~B(u2M zrkt%QQB+YKLAjfN$mEUIvV$oI^_z0suDxLwhSU8G-+sLJ-dyLFyeU#?h)UAAN*vu> zt>4uRW)DT87oW`RDsX5Utde#4qfmAwtX=BT5Q_*RnMoNz&KS#i&)bPV8j#43+RO!r-2j5BmbVo-M3 zVZDP5$V!xPdtm^4Bty6UamA4|mc z^^NqymFVLdiM^+vgtUmWgu>1mRAlPH0=Yli+?_P<*OKdc!0Kg_po|N_W{>s^z4QlX z9h!-&&NrzL^!xPWk#MWg$aY5%c3bs?ljc;5Mq(LE=^DUcDj2A!@L;@{Z5O7etj@$< za+oU;{9%fi=5p`@i)hk#i1RHU$!wBq6?Gv#g+kgNtNWwSrcM%;E%8Rq^#7m8ncvyn zpUIgY1-T#NrGSzJY)?1Le4nw`&9o0IU~tjF3x9Xtbu-XK6J;zJvvtv zUIu1FRY1RFNk)7Tyy2&YW^5ar93l9IsnEjXpP#40?<%wcyv-XYy**OYi&8#N7AS2^ z3NB|()v4cWw9bZZeRp{(ncZ*Q$3&?H(TZ>)h$0=Brl@6)vlAtY88~oM z!^AERxIb67dU8gS=}c8*L`)=;RnpDQh*Yz@Swp!F4}UF2sFxBcJg`Ql5yURLpq}gt z5izVCaHiRI9Re!oU@z0W04@bAk7cB#ERJi~c&${!hyFIUIG=Sy8P%?q!muk-plc)( zi5^%Hl1T=2av-<1GCrE&2(%p^9E^mZ-=S0Mt0#7vmMRxWORKKseljEfiNBGyD()9* zX7zj7NE*evIlaN>BVq>VrPrZoKibVFE32nT)7%8IPTHJAEulJS9LzL_lsYDV6Hl3v z%oUjXNOLB(#Ioh_qgIQWB&xkmpd^^Yz+J(#r5zqC-6DvAAORytS=QrM4C}`MzQqqg zgel`f3?GM1LNh(+_&mp|BereJHGgn4E7e1tQOng5=HC%mN^Zn|;b@|N;b^G;!O=L7 zJt>!;;JW5nuIZR1G_{^2l=)$-q(u zx+03L4SD$up$_nCU{C z$?xA&_Ml+0Q6vhA6MnT(=6^5h!_27Ol5JEkg-Q-QiG;FL&cGIG{n;y#``@5n*+4Y?&3jJ>L_Zm2kIE-yO*P!7B)mzYj(w7mz?25NWHC zH5IPeDQq8^PLdP0@_c9Gi7DgdG8YsEwm2f^S-Wk`9~2E=rIuPMCvz!)=QCI}K5h*HOb*d^1B)$KjXzD^c0+vHs9n*C?tNbof=BV%H zzv`AXl4BF!7T`IN>|;H<)S06^cRiyF$nED2gNA>LSG>Q1FUgOuqGro7M8%G{kuf`6 zxX~9^)yWp0v5fN#7Q^ihBHJw0T%$jd>Cf44RIW0Z9~nexUbVU}Y*v>qrJO!&`;xIt zt*N4?^>O(NgY2+YnZdJcF)5zpsRpDIZm*4HUGAvf#v?0J-^UTe%h8Ht#2N0<&{N0g zN0exy#c+;+x-=D<3tET$31z21|9e)2y1du8nm*_|XRInTd)l>neDVbT+vFOuHl$!O zzQXi1_3uk@b>XfpteY+SV*ySL6cCD{E6sSail@p^rA=%;jamJS!veFjoq$$u-P8>Y zaCzok!@Czk8mQ@snBRyDJPF$GtcBcZnig(?skhNR^xcFpinxox7i+{rGcN=g8}>Q$ zt&`08zJ8C{T|?-{-~tV8hC+$7LieYDMmv~8@q8} z+;@?C2TIB|6J+<$a|Ji2@q?o|Xy-FfSF03ju(WRU1R&4+w}(1%5ff3C_#EF zsGE)Xu8dG0wClN9QLsG|71kt*Ww#o234d@j4uW`yw5LiU*G*fpC(SEd0A?g#qUXJu z?uLy}mCa}T%4Xj&G5RP6-|_+Af-9nrC0J8kC0~4${KVAJynG8-1wiAS*UEk0w^Z@o z;62AKxq;C&#gzS=IZMfrXsOR(wVJ;q%cJj&v#lLnsw+k)jX+i~&*AhA_a7Y1kFxCl zH;(47BKc1o&5xq(kMUDX{{v6_V2bY9KEnyVp~cO&rcKfRUy41{e<}k04N-I~CoT$1 z2ittBKqgB3N)~`K5Kiu}B<-+g(?l6DpwtsS%zwz2XJIwW7 z#qkGFU%k3+?|`l&Iljwo+uG{b7l&{4&l1-YNjtH(`YU4C;l&??qm=y!g-|(}B}u`^ zwHYP9HR&EJ_DcHtSbCU8B4bQri%p&&SBG$MWMhyy;FjG>oCTDF1$#=1NJX44sf)0X zAHgqF@;QRRYu|OvgskyS3XKBpgOKov`T4yZ6dOtI1iX_}lBx}8w%^r@hRTH5X$CPb z4->cDYOY&E(?KK2-(-rp^GOqF8O~p1YYn*0S+AO-2lk#*T!{TlE<&)4u~`-G6CSW? zlTC0#o^c6khz4|c|(6`!s6oV(o4k> zF%4Fl`YY09CyHc3GT&D%;XZ87cpHD;w1yHElzk`MLAtzm7=%wrcR_i1crI zAb#z{__@)8;6JV9k0tv@TjjrL^Z0cO{V~JAug6n;}d)$ui_27bSydwd^Q*<5oP2`5*R5Z z3nREL6M95u`n(T*vw|8~rW{gtPJmA=7$L4e1U^0~DHyodJE*lN42R{S!Ib%Dd+u$P zQwD<(o7CM`?%kp9480rak309Zwgeyn(<%2>{vtLXnTo++&)oq4x zy0343mQ?!b0AH}A`T^UbH71ou9Si_KyCpz0NmaKx?O`ydN#$^XiVE?1!?RZsJYu4| zn)VYAs2}}FcEwPyQ8Z-YktdH@a7v$xVnX0rZm3>(XK$Xd7>T(3Z0ea>e!w$NrA&Q6 zrP0Y=$ANVfayb%^57vv(Q~b`+Gn+5+D9o!2?)4M>(IDhN-*N>%pxeYs0E^hb9nmolP7kooa9o|pQ;p8SNZO5`mVgQ}ls;kgYhrI|dMz$8+%2zrxQ>6x>aYlSKse#6zdTz_{ToICw*I<05TkgQ!^cd!}IfQ zI2O+UG_WeK;J0P3;6VE!c=;in8LEw4crpW3k5!M{D=zJUB8s~rc3?m`E_D=mj=mq# zBks#=nH&M1`FMqPB-Iwn>;NawVE2@-WmP(BYGM)i{mP9+Ke1@tALo^Fd|;Xym}#Gw z=?@}#XABJBltq;U!g-I+?4%3NQlViSu?7>WbKiU;4jYk1!78Cx9FGGRVh$EC6=xD; zAdh>J7}F7=-u8`pQAz;>xV|&VVF90jq7O$4!N?2Z%73@?i%-Ik6V+WRf@bKbE#CC#%>kpb}#!mT(O&mVq1K{%Q7Rp3^dVIeV3bg?`v(Yb9Avc?-0q! zq)w!HqxN7fG`Kc*)SvzFe)L)SfyG&X9H+!FgRo(o&R;c;K>dqQ>&z&GuRNG(s(0^r zIAI}8y;7}=#Rk=u9&Si_b*W#fl*W!qL#=LbGf1Rew-a4?H;^87>~WE7#KHyh0C8P! zZf?MHBSAg(vdWd8z+sc4v|rzcNJBY1c-R$+Hqk(X_ZQWx3=_+EQOaOT8Hi%%H?aCt z;*9edwXtT8E6P-vPk8Squ_G=;k)CVI8O-qeC2H;-rm|yLvKXe*EjXttqcEI$;1?B= zbXWpxVOg|i>lb@SIt;pE#bOYGwqYhFHZOdAcfZlhO9eLQZ*YTzf}_JRaQHI>F-j%c z2%PW|e1w0VZuPgq_)0cUqLpoL8Mr7IxrpC=#d#X^BpE?~xcQFGx8%dr)Hg1e(g${e zbvGZCgkd*Hbx9u|MCPNGH)sE2TLNbgA@*sXoeDg0j zIE$h%XZHOJt^z#T2$F|Obvr_=kzM^<-RB>3!ez-i<0()p#x2^kQz46c09oF#FU=(r z#bCr?){lKemYVTzef>gmCJfK0F7H_=Qx40z3p8O@qcLe`ch;vdoAO0uu?mVfD%X@T zHKkbevXHl*J|?!sTPZ&=j#P4^$qRe+TkBwSFh{=Z7#yUhCS{NZV#+sS*__sXT?FXf z(FPj{;pRBDr&?*=tOG-ofd>vs*-+=}Zr~^9`KKmaV^(~;@~R}mlZ(T!jEaOS%@Hcz z4nBd?C2ZslN<$yA>)^dP@9mKorS|R^1rtR>5H>Uq%CH;G1YbN z5+uJyelxzIAg1W}7DTs0kDys~INbCMBR$5Qpy)KA#ES%(cD2ae`w;mq;KiVK-J#}U z=;|swK@n@f4Q5THsp4QY&Uqzd$sQ^65iYHMB!_1GhRDl5@2hd2Y)nP42`}^u6A`r} zV8{EIut(Z@VcY1>@rpDV-g3OJJJQC z8yW{%I-ho`^9qr&4d_{rKItCmrYTK=E7Faj8AGPVU$e=D0C$uML>#^ofZw=f&ig`e z7FT@fx@tJ3OI`kCGC@t^0!81cicnlz$c&;UWFBZ_y%3Pv3J|c}+y7+Ct5$W=4~Bra=G_}!$p&L5QLM@;>EMQ& z<(^_U#1ad!XP;{ zW&;azGjq*qWP~;q@RldMFjl;0q(QNY4D5=8@`!t*zvGrfwUS$Ih4RJJLiNdyQqm0X z8gSZ$^3!(^aQak^U#*YNKW0_#Q}u4(C@(Bxi8UFZe4@~+Ej7ukY!}7=)_;CSFeEF` zTA**F?W;kwYHVO5j~+sCos*U6TS-@Lbb%{I$7NHl-c=Ky1jUIzap15ANpupnxVPyk zLU_rpXZs{HX4ITfv99hy6knr%p8GAayHKGMax~w;S0DUV?9!SuS5Bn4ekDG0S51{3 zxHsD9;8+m-dC8{eQTo6>ru-x`Iajr@lwF1)JBAqXS`+YGTK;MnwHsnjaN;u*ewCQ$ zK%(CW1T02WNQk54``%#+z7zAbRNsL@34AKe0##VVjJc$8QSnbIZJz*$G=b{@1aH^R z+7sPLo`Gn265}U5@e=V^hs!N7S7=y;Cg1r(K!R7&T|z)q-qQABaqs)zZc=5uuFJRk z!{Bha-kq&?{^R7n6|d{@1^CYdjG3f8_52%U|9W3rNYvqY`G% z{z!=*^kJZl1M|~oE52T=2x+O_-i@&QdM+%zrAU4_D}HIrdkY5=Jlx=sz1P=WtP?iJ zsFOx{4SBb^lM;;U6xJJ4w|7iEoS?7yp4?Dpc@o?@S1c?HI0nokL$1XQz6riL4Zfhy z)^kNd#Bg{#@iSs%9i7>(xpcCS!F{I^s6B#kC_Z2BX#rjGTe^v8!x7uFc_>j`k#Q+K zL&7W9q{w>UwOl`gf!w3Q1kCYPtAqLT!Eqt_!2`)b!U2KE82WwJEQJL`0FwUP*PAfY z68QlP5awm zkiNe3APSyKbhB8VfVHf!Cf~lXHcElEz-Dpeox6A`e0U-hk{=^A4+nz-T4x#E){cku z>>fT5)dTa1nv_mXgaOyY+`tb&-5A^zxEHiT)z^d0)6oVJ=w|f+;?Bm8Cqw|Tp+f^C z>-Xg=kC#O2ke?+&1H{CWYhMJX69dWQ#hn=8Td^{gmm|ZfBe@h~8zNeb!f7{IW3=Q6 zs7vrgCVZeICJ$LCNW|ae@oHAgfMVD{0b64 zggb7%^q@j!)V{10UR-IAAOzAfvxmscQZuBJ?t4jHs^e55Sf?4rt0WJ0bn4^qiaqEhn$iK z*ot`~z(vR51t%Rsq#Z?O@WME}_e?tbvZEQ$F&Fc-A2KTxhPp_8-&DP(e2o4`nCS4Kc|It&H?;hAWVa5iZ};uh7Xrd~+a z@5eely}g0Ri}r(NmD9OXZ|zs|#wu|&u{G;iM03cpi~UwMa_eU_$np^fqrqf)w(P`N z&NpAAb1uq!_I=^L>T}3MAAxQ`IX))#mw5=%kxe$(dnIobEYv&XW0BC3VS;oBTwkU? z3Y@QvRNHiMh=F`ShL$Wt1XC@W^x2~>=F;%X z_z7p|MdBueJukKp?);z^Kc90^^`|<4Dc)=dS`@r*;vei`-_u*0wjiJrmh%;Sspe~8 zOhW#OHg;!~-b-Tn&=}BSrtQ5D%BRW?wPrbY1M7C%>GjlAdjG=8+v2yp^w<1prDbmYOn-`wkvbP7UA~d(Qp&|%N0;Dovy8&fjFOVtmr6*dW z#wu}-7?ThM#Uj4MkLEF_g>|0b-qz@qO}H~6RcP^?poV1;qU=9odGW{%liG*q@t6-g zo`)BhiUk)8xFSkTCpj~xWh*7!(=ohL*5sO->&rqoQ|#eGM1zQ)U`Za{I*u5mk};fV zuN4IY;81^5xVs<(A%Lp}{fZXDj8Hz!?|uk*FF4)`e(=bdglAYE3RIbI_`vKqEsFaKY;oIS$+k>+xHRG4)dqMg4Es)Kd3-x*y^MOee zSiT95cEAru!}pR#ZsTXSMYkypb(_ccCA;VjIQI%ulMV079!=4a6XRAvBvEa`fH7DT za(gb8vot6<59-(U9^Bt;iZ+I}IDB&3S4rw}%=&zNI_YOlQxAJ?YuV?Ivbdru9-l0$ z0_Q}Oesk|W%mnOUNaskVr_dwRBVc&)c+R`j+0neCJ}#@hqo?v<{K{sX-{7a&Pz$F_v}HWBrkKrz^RPH|t1U*B8K^RJf@D z^EWmeOH1E}cvMT;(xi*(8CZv{c89~iP`ub{hf_g8k+7H=D%x9{SQ}aIKYCsGogtkj zK48fkAnsE3(E&xVJRZmukhI&R1d1m4<0*hs#6ynOjkO z=B!L4S-ELwHjdY8HCk+`!O9^ipLWGgS+F^%weoc_l|XV&t6eys0hF+QsZ3ZvxTMkL zUQmgb@8i-^#6HuMchcGq?U}-iqZnaklKq^KM-^7=f+78cdlBw+reN_UN1YLU-1g90 zOh!M3W^b_3$wmD-D>}&^9s%K0m->P&WYy>sp+hcL$ttF^&AoO=#q}6tQ34L(Ji-aN3s_aJ$`j5Rt)x`2j~pb}O@hhvu78k@{? zo!Zot81QcjpLvPg;Cxuf&97PLQXV$iuL@p_ z`~7|Xcn&WZ&OYbt6+6y*)?Rz9GF8G`3>VpZTrCvq7vxn%X%#klQ`;O#T?rD zjjc{^JAY}id`n&T%~!hw3T00)(cj#amvT6R=7?}~P5B6nNPi9#ZUL#FGBg6urDrN= zq|InCjD-exSaHs)#DN&z&2X(YMXZYVyWNT?+yTMl1F&=G_%q^Nr){{#R%bE%=mq8?MRN`U*r)!&V(_TFe$l*3f#J z5##$@=xk2XFlScAsR+4)fh$$;?LcZ&aUtZ*0B3+9i{aYq*Qd8ucHQ{nnf)%mcb@U& zs?!mles-}1UD9bUV$RtRmIu9(7|@iSnR8{PAbqdMEX#aiZ*dLO*x`l`_cdG2!ZuQM z30l+X5O^B${D{&vk%=7C_2(`mUg6|6jhzCPqw{Cxa8jQaxtV_RVt zx3&wRLZC!-)2&y$N_2DtH&q|!B%xz+CUx4&TL~}=r*QOtYAG`m>(!u=hs40Ph0sDa zI$zOY1n5DM$%&R4?K$Lyrwb+?K7@4W@y0qC4jIve^RYD07*GiC^cEvEc#}+*~7wzHA z+;5)f$?Q4Z^MH@6xtFRi?r^5TZQQ-Rv^$zbW=z4dZub+Q32O@Rg^By(*;L)S+fkqI z=AA~s)2KufGq}d=>3WS^J#0t;7V#4!*RfgvK~QisY=2>2%os@C<2E?5Q9d|`1*a;v zD#=pnrW#1}HKw6bMKtH;_etAnNq49k(;LbZ0u%RV=&K!)tcBU_6=x9P|<%UAS zu1Li%K`S$4eEr;YUisZb&=~!Gvneep>cNgb zS30_Z<{R?5yRZ0>J;nH|xrF3pb~)Us8h&o@ZB)B|0D=(%I?RabE6*>~zZineh)BTq!K z7~C}-EkBd1lZ}^qa5vmzRgcwhFZRETNBs1IUQK3u=3^_ZLe|Of0t8s>R?Sy4=bR_1s(FX14EjSB6U?HYgm`f| zt@IjFfJew*slOEBFYF}DNIMDg=Xa}SvYY2^R^;@ap-^Ab6REg%&EVNZ49pKWk$0M2 z$6dnUJHqFoe?+ZCrT;agJxA()YtLc*YR_T)%ITq##0l=YU+g(gy0NZd`OwB_iFfFB zqMoYM!h7zi7kd7M*0Yd%w`9)U7L#F?KlPX>Yfu?43!jkWEWU+uTw6$ut_WV}T?5Yh zj|Lv$**$(FVraUpAy0`k>DbO49Oz>$McThj-d)^ZEQ6IS4SL1$?+@C?K3N~;dG5yc zIgt=IEmQ6OjVKW zgfOixEU2FG86!ANb=Ky(yV2Mq~OMBr-~B`S$ZhwEcf4P| zl$-sabe%>sJ5|ept}geEXT`O80?XT{JL=6maN4Mh9wMp&ZVLqv zrASMh9D9#$oX?2}Uwi#hiymby^7&ZJ*>T2G9+{PRwYlY= zl&{O3Tg1C1Eo6(06KhnSi1xrVPVL1Ss*3#9^e4;(tIx5V4Gb#|ugu|RDhS2AU9eu} zj;3EP9h$_N{TBBfmhh7MGeQgK!%)EDTkc5y}Ntnbp@WrT?Or~oTm$J@UHPdi&fpXezuxCH9s#{lw@8B=(}F>&e$$5woX>3I^Ie`8 zs}K*VydY)H3nD4Fj~~5NA9GP=--0>buXxn5SRX(r&vQWY-Z}Ct8}6eRImQnLpLwAY z`+KZDx5F0mf;!{EX8oK(`!3*wmOs8`Vl9YW40!!&QK|$i^33;c`Hedl1qR-#ns(QQ z??2;^van2?S<&w=+zNoZWd&>d;H4cv@-1D{Xn4vJAFEk8S4h=i`^7mv$uzhHkSBQG z{Yw3n#3}zfNjkDG)T*BQkYKM=#k^!78uYQP4^`+8VmcFEb&g*^j0t+L$rKm6)9kd* zqiz&8w`k(3fqnB2m%Lfmo;eKCNLgUfVwU z`n;2{OEJrEDi+A<$HOEAn7^jPV$Jty{~65k)VBKh0j@W!U8U&L^uiXR8!%kS@t`i0 zdzlyFGkl0OE;Bq$e{y$PhxFZPEWA&zZ(nQ>5Q3e%^d`8%g){HHrd+eXyseoT*O+{Kn-Ey#*OEincvHFF0BF@mU1%a$wPsnC&dKd3M0-7A zn`06TnY@Gk`k9{}56`K9iPLgnSr4vQRrWp{VkjD*@(58gDT%xE8Q0p?BjbS(+tpen zl=sX}XixR4jTb;38Ju3*y3_sCSll?B658J9)0mf+9m(c!dTrniv^i{Pesn?iqU-!w zpfDZX!BSaHDvCA+c$r6i{sxYEkIxeQt-wt*(&=uQxQs)T_>}SyCIk?y57` z3$YzZA0JYN@q*Gzfx~r<6&IoGDXF9C^v|ut5%^^->pSxoC`RcI=vQ&^oiATTE|CSP zfSVbKOGgaU^lHxEm>-^@_C<@foq7+)R<^mle3z&1X~__?i=>fDj6<&&Az`4C24ig> zCpKUx3h<7*l`8&0WuEk%1?Fcd*SP1HAPc;L8X6#WxZxQAy@kQyg5vAvrB$6pyCLx( zH9gWy-8V08Wf^LBgpVQJT-Tj?S!f|xKq8U#a@P*+Wk1DlQ&B~BV^XJ z5uqxH9bLc+sXZexUlvl$LT8NjX)C(hn^VtWgu)cHny$~NJ!PfB->=oO%#Gcs5Tcd? z+pgi!QC+p(70^z9D!AEAx9n50s*`4DSiZohsw_mX!|&9y^rkuxMR4tOBK$+~smJ#W zn}eiH(8r=7_PbtlYW0JAO~#^Vk;H~1@`@yO@V+qsK}~6qoT@6HZkdN+Dx@xU-#LAM zt*(Ud?K#5e4Lc(f7KF--qQgEv=&m}Vq<4#*YF}Dno%J9QA#Kds0puJKyD|%P<{ybj z+!dvg0GrLJC%SA>!5*jUNAm5aj2n4D_rerV$0We@RXOTWg6{OBZKzn+wPd zynV5wh;E%XuVXH&Eg>daSmN7RAm2+0+h(P3TV2rLoS&vh3GEoPROq8G11c32&Vrl~ z3{{$GvFU3be)k2<-3q6x`Co7)hk&?4x&qDRpB8SS-_QRLz$Pba)FvT zITDUHc-!pja0{|n?!udi&B`aLQ>a1_WO7A0l(qs(7Bh>f+*72bHVe_?%MNok(B@{c z!YQ=MC(LGJ^BXTbX8`QGF0*x-y*qa|#-uQ43w#&{P64m8>*R;B;u5BD+-jD}uy;~l zTUh_FrU7F00roNNu&KN!H1Vf1HUkyY+sF~{a?E$q7}gcNc%;!B+0L%e0thUw2?bk1 zny$WiYZ{i!Yq~mTI)C$}WcYd2G~M@&GNlKV{r9MCUIvO4AhPIfGW%~gp04dH0lz*- zYP#gb>Wch!%W*s2Rm5>=gh8~h7`H}MQaLMyFIyG4q3eMlV0U8aCfJPDjkKsqidDY0 zOWA+=N%y??0i`-?nwX%m}A%q$+o3Q9$F)!t-)h zEB^Z!Gm-?vgRTYk_1LdKF#o4O_o4{x(8vr>GjmbmR-f}UhmyT+**jU>NaOf&VtCX+ zY1gOncMFdNgZn>>v;;erO>u_NTM`FC54t>k&kVBGMl4|DUyU_pYg*nVSxLxXS2qla zGq4IxWw?Ux>w(F^f2nBiZe7-UU4jAtv4B}2l*E)!whE@q<2W9r*j{JbHlSRoD=!n7)x_KeRi!R&=Z6UPg^>N^O}Zn1z5 zAZe#_@-5UU3~YvxK)Ff6rMZfnwxT4fWW%;|vMsC0c?ziIwR$l1NC8hA zW7ZWLYn>jZSr(jgnH#1n3c6fuH=?*59u~&qW!B6@Vv^tXNz83}Sryp4)8WHUfy!XA zO0Yu}jj@r;HP}c!T4CsA5y>`}=*0d(%QuTs%eV0F>_hBnj@9ZezlX2d4 zR?acH8@vg(w&vq?#cRn`^BN^W1aw{6{GRMR4u(p}JC9_JgwiW60)T5ba^T*f1Y6bC z`=9Qer=6d=jP5IQLq>n0$if(z;XuZ>p;X5;c5Cv)K!qE3 z!t7J|GBJ|2yZJV$sml_E#k3fz0}s=|Mr8Yfu!%2gh8%)x z0j{fQu`xNHpPsu(b zs$xJeC~5KF*4|DWQ55FojC^)OQv66xjqEO1Ay_l7l6@|3llzPxCzzMBR!ll~VyhXd z!MKZU!EN_OtsHCLryW+vyBmx@0%0q%h`<_R<1cz>&Y@jmC_ZX?d}~W z3;9f~Sj^lT&rEaaL>i?Ni(n0RdjWI`sVc=#SD8&bfOJmi1?Xs_l-zC=pm&spOKSIob`P{(X4;dngO-Wy!jd zcBGRtP#^$aQ5bBGFHjNhgOfgJ)d8ZBQ%N8tPf9}i5sa8Z@HLlb^w0H6d7CC zFwC@2Ol`wFfj{iyd$$q8U2)`UYIGZ`<9*2V75-TL_18yE_5`me&u&5K(rXXCd{h7G z<4g6^2Ap4Ql>QzQ<@?5k6XU8Krq}sTDRu~cvS3(g0+5MtY)k-OgVFubjrefuPR`uH zV|Sz*aSJj_joL5k)79l>X4mH6qSxWn;(GrXfslRxGB#+ zlEJ^}I^zg(ii$MaZC@)A?9S8bcz2gb{(^kGkD;}(_tj62wpY+zu3{Tu-n~Y80-zxNXBl1vVAEpUxHwmVk^c>UJx4D**>vqHZgFtg0hv-j$++{-I8UZ)*eM zWv~H)-4T5D<2{RG*opq64)$A{W1`q({FWUCmq@ubw-ME!_(#&0tHcGl1NnqI+jYID z$$DL_M|(^LHmPB~g6)Rn{CzjFRs@-Z~FXHkBRjSX`Qa6@R_24hc=p`c1DJG z?3J|zLdliW5CND?sR}Tb<<%k~O1BS8f@+5J1=fYPeeUN7SC0gO+Bak=T-@DdD_vrt z*nXS$Dr0+vlGvESZ~Lw5t`>nzI1qEW+dELQ(c%#6;`=fI{8qOvgN+DO!ZW-rtCPxA z6M8sVSd?dpn{>Q(sC;87T!dqT;KWVPM+W$E9R%K z?w(W~lpo{F!-LqK%8x6qz^0VB8w-DW!`ZxO&p?KtiGG#(327Kvs6%4|}<+OryAn&73L(xFm#Q?pFnl$I3fVl@6 z!7z=acMXq0N?~EQp{seT61nvDyCFLt9Cs%L4S>Fj@6&Q^dvXUpUd5;Fl0j@vM|V$l zT3YehrK!>?>W#^mx*3Q1HL4{vbUb#=ZDCo%n+$u3iWWK1?u;Y`DrREu1Xj{xAUxXqJ5bTlwU%&Xy!M zV;?tM&za7Ooy@E20)1@~79M#3D!4%{%>_M$Bz5}ouAKrZ_}0c*aPG(GVcAeNT-Av| z=!4bn&Or-URtQ>NJkeCPmdjkE2F94p?z(XW>D?vVsAg_yH(t(!-Mcq7=0pZ8=v>y6 z?MC2Qi|+%B2q*9{k)T>%y+g9yAv-9$1}R3~e&K)J0!7L`hC)21P_<79?}MuJ>LW|+I>TeGZuMchN!GT`ZlHtV+Wu8!4{@MexEDaGX7 z9dxvdEA{;_u9v%G#z0@Y*!B1#L^;Cb&5)~dKoM_3D9{68G?9)7`=oFS-^gRVHfU?J zbCLVf#y~5pVoD34F+X%uA~<%YT@`}4R8o)#8mW?`jnuOMPZqAugWwzUd+XMkmOSac z6!)N;bkglsm1SFlb_+)3SE<|jV%f3%@@jYX_gxlNq!tFJ$dfssB)n?d?t!u%Geuoq z-JHNRVByMm^ucm9_1a!7lpH!aEu#w(7@yB5L-0kXGx?)4I# z-(oMPfB+w>dO_@pP4bXtoVriVmOn&lkcSHkxvKEbQ5v&gV26yIa_k@s^R}h_R$`vD zb_xS|Quu1Y4(2GevGNfarT6ioN_wV;Rb39xrm=?TN=Q zz6qcN_ry#8Duip>7JR9wkcp}<>R*?CU+@_Y7dcAk*;;LOX@d#uP zlK*%-^KCmu@pq4_JJ{y`tfS#B)7tR+(yAZmP!zKACBLG%e^p}7y=+yB)ULYeKQ#=K zz@^CkbsNfXOn%81c|SJ>0jmx>L8gz(!r@xk;nAlhdZ-+PAl&@0xlu~n;Tqp^9Wmqz zVz!SU9>&Z0ud+_`Iy<{O~3eaB7z@C z^Pl*{|NlwAc=GnUpVLWLX_{&1S!$YT8(L}q7q~1G`0?K1t~<0uROCwN@jRKw>(t?9 z;$qR$)1hb4)k3CrWaVa~*VNV1rq|KcV$}9l zRU2w*JLgnlP|HfT$B)eO_o|?z<5*qEy^8n|GKx zwl93F>h@}sC#Pueqhu{40_&fWPd|!mFc+@gVyIs+@6;&!mKHn7Y0 zktEc#lJ~9VB`nXS^W-kGiCd9i7Lx#VTTivS=G$p)0mTE;ew;%}s%;kLaaM5!IR#ca zj^j#GSrC5`UIG(o@P<={PqBzQ$II9e0BAEOK+QgzNPM!^-f?GUX1J9eBH-Xa(J}?v zuL~lKa*b@c*0TdSlVh<{NjfqH-kSkP+e!z}bK4&v%A`X%mS9eM{<{H!#Hpag?AXwj z*Mb8Cu6gv%d+6Xo+j11bz2PWAjUc7`@$xn(bpT+0xylX|=DfX>9*w9S_e2{d1^^MY z1&aKu!yrYlzJt+RUB;%%r1$Q@fK{a=bO}724pwK|kJqUUPpJx^6~PzL?pjwpE5d{=C7 zwK(4$C@9A_9%Hhxocjghx-+rJA4a~^*_tSwzO1Dar^$iT?JJlHG;@b@`n8)9Rn4>^lo zzv`%A-YX2-Z>hH061?QPI*kqB2bAwhW81n8Q)8ve9(1GqJtHmBAd;bdxaUK>7@>Zx<(K=~EWoz}pJhxJB(c>CU5b z3I5P=C}nqoPqni~GE?|G@q-NF=|%#dB9ODD^)=r2bmNxB>5JkzZ(ZhMfNKo>;N5xQ zdBcZ^b6I6jmyt@-wv_u>N!tw-#pSBj&&xs9%n?h(4-#@*dTq`mfu0W-I5LG1HEBdW zjMgeh$Ye};x9af_mT*6bJKj0o+DfhIb|Ah|hxD~}bL)I~Q+u!d zV;n({ZSVM^$wWevj+vq+A92kK^WNl5IAxgHge6F>ZoFY^EZ!(V`6^@T#i7z{j!)g7 z(U+U`717?7R$jhSJaQL{EGdCSNtp#j+(=}iR-&97(t!IosW_#makYq{x?SlyYoO`P z)-0M=1&Y-A#WoU>!K=+%5V`GmU%QR?)+|qM-DLIyn|FPN8vv=5IZdJf{!+Tsvt?fW#J3qTH~r3H7cjnk zsueg}jiZ|CX7vKQ6KAjOT;ACw{*1uk8%rR^x6s-lD1i+b&xiy6i0->ivHpl4`ZHz&d1IjCDE{if2pJGGCT$4xk=O>`l0A05+2MGE##H-2(>d+;P;JU|_9 zsF!Sgh+v2N7=;fZX%cWQF3-^o` z#0N5pJZVlb_TP8!Qyp{_D-@V6TbuA}%~3Ju-}LWke%$|POv3b?kuftkpBs^3yyE66 zW=e0SQp}>tlhd@Z;B5O*G}iNDjrYK%6mnLitrzE#76;X)pTJn3q)AL9^DA!PiF;)2 z=|Fbuke3z&9(SD9SL@7RPG`$vb>wGe=LjJs^_`7*;F01u$$s_8CX5vGd?P8T+$Y%QHZz&++Ge7-Ds+XM#E~R zogI=n#ygkFHlIZ=R>N$>FM&;LC+^oaOsw?n$fE~K&_7&HZzdcShb?awU;qZQZ?ENx zTym4|=*Tyw(U{G^4!?p6URYEFO%?cM>TP+XY2NInja&j)WHpH8L(r?ox#!jIC=2Q8Bd-@osC zDQC3o|EKE1)3nh!wIfQt+uw6sr9Y}dL9xHyhwKf=68j4dE|;+Ff*ZGGuCuC#*L|OO zImr@mlAc%Hxq-}Fj61`gWX*$ofrS_A!>niqkp^y+URL=}n|_Mvg+-6T=z^)!A8b;~ z?$6Mwv+~GaZPa+I8Uevyt&}#eX1)xW+fXiv$0=^B-+SJVf_v7oWGG-EcfoJYhUc=o*0bA>P0TNA zoyWN;L=fqXs`?6*qU#=x+gYaTS8=PW1z+<>U|hv@S?aUo>}xsAjyAtm-_EzR<_uhN zfo(a!rWUd%@&o4Q(QlMLU2Yrp&meIbUPtEV>p7Fc&q1H(;Dx5jp%FIc0WC5OE@ zkGGF>(9zg#t6U{^yT@catltd2%vg_7#0GeNq0r{~>O^pPe}%r(`uty;awwRKcpcR_)*HCU9*TRsR-U~T;Qel2<| z+9r9~Zhh>*Ym>s+r~sfK=K&&NxJ0sCA8c(u8A-_+xi&`KXQpmv+ZZqHHoFBps9b;< zAbQg`GX#ofipUX^Ye80S@A%e%OPr*uX z*>eyh`&)a?$hojqaIlMXFkgF-0)pxJqBqfXf7!V{joNkCPvbz{c74w-%2s(iKLti; z8WmxwW^N_BJl_Fy8hsrUIVms6iiUk3o9vZo$wpl_W`TfxoKxkzCqwduceTJQ;>bbs2JrHjp zU*SX}jNd%_MRY)u2~im@p;Xz;)oSgW0Fdd(UQoFyWQez9eOur5UO1rF;r@7$+a3qB z_@ePZTlYZQP{=Flw<|h)ZJ`oo^S;);E~B*tj}_dU}BKVv}BP`DZ}o&XCV{ zG;VPBkV7!Abe$@a9fvxC9wZRp5A62Is;VkV&5;Xiel7sD;*oWQ+f*(M6*7aE`PsUm zL3Nv$>o;@ud!s+9>S;?e0L8F$QFqWfR^S&aXWOG6)96^mM>iU!?Yzy7uK;B(x!W}F zjLe$i0ztXy+Ybq2Si7$d4B`38Mc8qivt!6zGEUFK1`-J5qc4OE_XzDiWvhd59*|;U zF3m6D9FT_#;#H)OJLX({gjo1EIDK$ zgWR7UV@Wc!!2_Au;Yxmk)2whGfzYZ)W@){3+Ov?2z|G0kPF1DGb?fUYf=lbO7ol4s zQwla8jU~CtD1i(Ulejt;Cp`MJnn|Nb1MCWHQT6RGL@3fo#noSZpzJ1TtT6-3nvtq& zX(+|e5)_%=vlz)f7fheZO*etK_NrQ)kCw>S8rCaEYfw+Ck!HYGnp4p_qH7zaTU^<$ z{@B^YJK)@1r}?5BcbT|d(4?}rc&ba<=iP9x(B8q28G( zjb4M5d0SFx_|d&MWgA6c0(4-FXx|I*8-oY_-2_?1yK*_OHay+|Jb%oL`H9@@PnqfR zO$u%T0dnbG!2TDa;Wq}oW;aWU26G(m_iIm>8^GfgC&m3Tlkd4~6zS7KXQS6%NRH&2 z>(OuQai}(zvF0<95T}=!8>clW(=Db!4k8{R5+Nz6)#dC_<>5uT>1H!)===m&7*KU+ zo&x3e=+MqMqug6aYT8XV3&}wt#UY$L%+YH0l)}3MuC9dyU?pO+OId`<+RcM$S>UKV zj?&$lenmM)!32NH>@BfnwJo2=&*H=p=CR5+^QH=HBvMpyZioHvJ z`?2D0Bo05H$ul2+=IBKfln+Cb-^d(ZSS~IoY5OOs^_|LXMb_hY|LSbGGbioaV<+F! z*w=O20Z2LTCcKk0FOErgZl=Nq8U7E_G5SkJp(?Z{@JLv`?$FFtg923 zP9X5DDDjo)7ed3GY{=#sD%_JK_;v{eWr$>m*bsa3a1Li+fBaq|*fLD(cEpaonVZv_ zZ|}S5fGzBsJG-*pa-|G>`}M&u!ES(J>q_hN0oKRL1&oT}GrzI}_l;QorqlMNtT^1;z`7lP z2t~iB?dJZMV2nRDT^zN+vi%lL{{PMn`&EcT7lYnTfIt9plwCyHO#dgCn1!DCFoM~i z-3(638heO|ZAj6U6XSUsh=!j++8SPoKd-scHyDs_8`okJ8+3)O|17TCMB$KHfqOn0 zl$PrPs@rv0yi}bFK0fy^6KH?F)9O)WAT(xup$0xj9=rDj&U<$Y4GD=C`z)urTlQJJ z<~JJJU6yv8!9XWL$X4`Hj&7qV@lYU!yZJR|J9ra05K+^cQrSJNOJ{=p0_wuD^EimO zY_WK*Mj@}+#M^Yn-=;QyAWN5nV|O^DxA&>jPT!IS9;P>y>3PvgKp8xXF^?pzCY!>z zv~Z%jcep5n&vA2Z)&{Y%;Izau2cN5YVYk1~&$?#gh_ssm+&0%U0&Jxqj1dk-kVQgY z3MX6g!lk4$ZrI)GRx3e-EzJiH#o-Uc;oHIx*U89xv@fO0rUv|o73p?Ywgi?v1rN60 z)~dK6vo|rSzT1b2&w)XU#*5SW+Ukj+dNbjnE}Z>d4tY`e7IsV3w8&3)iF~*vR5wf87l}Thblb5 zZz1ui2d`qjt!=6VrHqR==&RXuKgrrL3zV71Dj;~BZ}cfuuoz?40gQ?&WESw=zD3TK zg;k+NYSKrxsC^`Z7}abZk|1-&{fs+*(#8xZ8kB|1(P4tz0Duez95ze*fZFnm`*ySl zRacONoEz_+i<2~{s@Rij0it%$Z0XhYtGgi)Ih!Kw6i+n40(FJ z0F=lSGDqCBk%*S zJVRtdy*SuL%}&Y{P6U0}x!wH%k(URN2snSa*JD$HjwAvA!t><-Zf^=NXzGF-4xWrO zqq~QC#jZ6AH%`snyN;!vr!`w+vhCRi5J&0CP0*^NA zp+$`O6Z4JP*|^yic`s5R%>dgIBNB5&#WTz6iZoNKoOrgJWd4jK{zD})sd(Z+k5jT! zEb1COu4`AX2C&u3n%X8;8$znY*A5AsjG?Z85pIvrVmIc@Y8Md*gjpP zrF&haG|$RrDrj&ar4m1CKkGx86>>9BeznTjHav=ca6#C20O4z0?G7dHC`1}gXQjHX z-rLq9su}ZY!4h>}bW)yuPEfX*CoWQa&8(2(Vw(cC#rRaoCPL8ETafsi0r9MTW4&2xTkjy7+Na2Dp*At_ zC=0v#w3`Csy4sY7h6!e|zE(+aFvWCQnC2YLKMeS>5N9la?r(lC2b(WKHGPA6>+M~i0gIDgL@ML!cp@)~wA}mt{Hq7zaqoCfe!Ca6KM!0Mm4M1&B-XQnGhzgrN9p zvcap?>FqWSP^B!UXy9z|D1U*iyxoWC)*Fx#163Dw3rqcVM@vU%K3*QOjeEftL+$8G z#h~~FL$>`B;`k~2DN-Qi{VdDXJjB`s^VJRYp(g24Io)IdyHe|(rmT5*XS-}lNt-v= zVnIP*WyY#t)*F&B1m5V~Znw5+$O1ZTpz;rKy1|fZLDg%toIL6@l5)@tnOpkF@-fn; zpw_It2D>sBxTy-K{!rL?9tNYrep|^Y-?BV0D=VJG!L2?U`2q}VY`|L`*mjdy=q4|@pWWEgiM(_J&`>x*JFE>*+D9V08xt|Zxj!H-k zadSU4EZ;!L^Zl8B{#k}?-=O-r5P8l;Zvr@~-4kv(sc5*z#Ox1`JsZo*={<#F@ItZ~ z>oe_h!7q3>DLW5YcI2>U{{!CjPW#eG=8jhlZJ!-zAgB%j|360t7T_XJg<8A}NU2T& zkD7HtC|#~a-|W`>;&~*#)resXe3;PgZS6)A)e^_z?K#rBMc%kvF!Y`thMbhvCEh^? zRBuV+iw7CMEvuIUI5y9TCi;$TvunF=t6m3^NKEWl)@QXjU*~+&=$8=WzP~@i>A$Iu zTVHYKx*$_={#t$4UGsH~yRcC0^869fdbgj#nCWNZTKWCe>N+PElNJ{{7Y9A7rVbN5 z8<(ygy(WhyGrbNA3o|Dd8@skHo8}km>7iTdpMl~DZnq1A-+>~DE{2T>5-6U}x#B^Y ztLkK`_~2Y|@66uad~V4*xvERGw(Xnwb2eOoOxi*y&tH3=?Hqo`RT*&p!Fqs)h@3&> zC8k^6J60MxHFWehDhDOWi!)ApcqVu zF~BaH$U1NdF%eIwE-=7dW;vU#Z#IaGUV+R^y1VG42NQJ1Cx*DJ&d+R@5Lb|MytEUr z-vtv-rkYQ72&(03_f{m(x~^3V(yNtdkKs?L7LAKz3oZlo8T&V*u~paWtssbPIA~Et zaj|q~)kWGFz6RlUU5Z8{Qy(Z;^Y0ezHm!GdaM(-s1vqZB0aZZ<$pGxs2-n&zkNuGX zq=V}mtNzg5m;G{x$+A17ZKr!iK@7Z2It*ep!6_ffu<*BCOz!zU5MfE z0r#zWKk0?uoB|gJ$i-=?7tB~-2kUI6HEXU_7*9;lDj#Dsh2NR79?UnFw1+-VbS+qB z(q&CtxLORTb{BS9!XPU&H5iPU32UqjczIAb)>aGQ8_>nJO(QC1B$nJ4HXo_7Q;cMH zLhz3uGzK?gWheO#0GkYE>aMj#<*R_SVu(5W*kq>W@44`aG1b0`@iL*8%-3 zaq8E~X!4yWjE41O+uK`xd{eLQ)!~ta3^ZBzj=V9N+}YXCL1r8N*4{@+n(IvHp&{dC>Px5`LygF_3Z&Y2`c=@7}Q4M$m4GifdC9sXd zBb)S%rlORe)b&O8tdBO^(!WaJoRF!Wmw0ZCjK4)c)Ki?hQNL+mr`pp;j2^BsReDKN zwv;*A8`j=B#K%;-ASkNHSGfGD=3WYJh+T5ct%gOW*R|M@m;KK!@De9KpDbMs7K#0W z5}6>AEYe7n_^AeZf6sy_zsjwIi4Jw4ggE8$l+v!z=Rv=_;|ZSaJz^ORHNF0uP&Ni0~FBQIJ5REJz$ zFG>!rwtBpEXQNHV^xhP^D8O{VX|yd0UKg{R{b64tYxcB)ZS=#9e&W#pc9B|C|M_hz zo!A)F+AQUTZ9bfU^ezoT4?%1|msVGbw5N*4xU1 zC0wO!2H=BrqnR-JLK!DvXBBIc4?7ZR`3=Sv1_0B2Sm+$Mu&|WCj#233SaD?zbD3dr0!h=V*TiF*uOpPL zO0&wi5wlmsks#4PT>N8U5^EkMK7S}LZNPNPw3dX3lYC=ReM@vp`Ull9S^{w+f! zQPmY2QQEySG(j}+${>Q0k7>Cs;`s~h`Q9=XENVt2pXfs&{(4k>4Es%(rW1qIJZ#7W zeODEuLdjuxWn@4SQc1tp_gx;x&)+l60kqrBU~&%Gw>ekSK$5gG6kOs+oYFX#5Vh({ zetvstp-7-NK1>XR1ruMP$E#_WQ*0pcYCHsqRQ=h*!is9#aPPbnFTtqu$}LGn{T$MIt5LyX z9ahSVuG9O3p=yF$T!qQ{UBk0Ylye-ISet&BA!*6Z&B18vp(?%Q8}4I8e*Es=-u=kW zWvM*S&@nV|JNBaHj z+duw5$&K%TbA#;&36w|LHs9(6D5lJQd3Km-Z&r{Cu{X{9t%7f#=0d3%cF2=q(&jCa zZ-xKu^K;2S-#@;(2*XvoPg`-!81PuO=a-dSoeRIuzA6=qB6iX-)NO-ZQ4%+uN`mS~s?1~b z>}SzMS@vR8WUAAGdSz;rEPC@zEN*BJX>@n>b`6H9+K$xexU%KYL4jiWtZMs%Gtt(H zW;cT*x8ge}>eCTJL*osBLvk^+{h`h4eyIj7mWG!|5YlhpQglk3g&*hK9*LhiO?Kw1 zYv<4D9WYS7`H);fS-5*Bt+l10xs`^srk05=awpU|lrx|IoqqCYjX&pVz&lps{hw;o zHqbRQ(=_~A^Wh$=f2Bq`=ygD2+n{?UNHSGO|q+saV}|53&t)O~W-;5a7wr(e|k@2>~{-k$eowI6oN zLhFUS=ST(9BWFX(V@*wj^!+|bsuNPCB1s)aM*2;!I!x0P-NPnJjU46|P*G6mj*)_6 zo=obynBQGUzKZ!}gq#ou=;#RfIWg4L?}|Mfa~{a$my?A1mK6#q(I4t>M=B3F7``7@ zj@94$tNOnt5kgj9@+*aX?H4~+fAd)NbjXFn6WKqk{#gE?|4xbN7$u*-q;x!W(0`{& ze~jw--%>q{bY-Y>n1tqcB!A2@8S?s~Pmx`H3t4o$531mttkUm&@cYitBYp6MTr*$$ z;8%)=?p62F-KXr31MD2K*B_rtuM_-%;`d(vBT*Pq%0tho6Gz&~9f#zYVh>MF>^R@k zBK=N}Cr?@5q&gf5anBWgLE+zeJSNp2-giF_2hw9wfqp^rNEW95F4l1p_*YI=-1p2+ zUwYs#XTh(U`}sBUZNf!CIh>PB{zT&c<-H>DU15if)cCigj>p(%LI&ACl0o@s-bJK- ze<}Qr;YTibFfEFN;9)mVP=x;B=qs>zjLZpjQpfzR-*3YIyB{6l=u0E~a!`J4gv0gS z@3S`YpW~XrLvr^2&c{!h`6ases+q(0@aaNMvK{0Up?Cdf&4m9gN55^W-*?ZieEd6$ zI&xegkFPCsiRLdgb230U%z<{|wDpztzjJ&arM*b|m$bk4j_-5l&%NUhul2(z`-{;3 z{5jq`3Xv`_|E+hN#K%v)BFA4c^fyrJs|F6+z|UC^X^v@tm+Nme z@Er{Pa&VkXkNp@u;D4g`Gdlc_r0*Uhjm7<6L_I?KUvTk1vLSGc4e$Rm8^`eDf8^mf zWIcskjr*&q;YS8aKcgm!?+hI(K^|!z`mNr-1jGHqck!?D%%SARb5wqvXAWNlanG~=j?nSgyT8v^ z`FEh{c&4PIEP?O+mF)j9ZxWKLUqYxKiaH)#`72jnfx-=izvJpRp_c!wn8UU@9vbHnO44z6I{#Vs{bMW|J9m$@_c#R^40k-s>j0$Af@^X#UBqUa8&WPtbUg2 zX#M}I5CebK{o!CdzP0Trmk(Ti$L0TQqucLB=gD1!?Vl0ywTpy${f^E5Yd85y`QLud zM-`;{>UWg?QbB+Adj4}e9u1Vo^OB0=Ksp&=2u)E+VoIR*pWY@NP|jR E2UXLG>;M1& literal 0 HcmV?d00001 diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index 60eee8571c1d..d60a89858512 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -27,6 +27,7 @@ from ray.data._internal.datasource.delta_sharing_datasource import ( DeltaSharingDatasource, ) +from ray.data._internal.datasource.hudi_datasource import HudiDatasource from ray.data._internal.datasource.iceberg_datasource import IcebergDatasource from ray.data._internal.datasource.image_datasource import ( ImageDatasource, @@ -2312,6 +2313,58 @@ def get_dbutils(): ) +@PublicAPI(stability="alpha") +def read_hudi( + table_uri: str, + *, + storage_options: Optional[Dict[str, str]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """ + Create a :class:`~ray.data.Dataset` from an + `Apache Hudi table `_. + + Examples: + >>> import ray + >>> ds = ray.data.read_hudi( # doctest: +SKIP + ... table_uri="/hudi/trips", + ... ) + + Args: + table_uri: The URI of the Hudi table to read from. Local file paths, S3, and GCS + are supported. + storage_options: Extra options that make sense for a particular storage + connection. This is used to store connection parameters like credentials, + endpoint, etc. See more explanation + `here `_. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` producing records read from the Hudi table. + """ # noqa: E501 + datasource = HudiDatasource( + table_uri=table_uri, + storage_options=storage_options, + ) + + return read_datasource( + datasource=datasource, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + @PublicAPI def from_dask(df: "dask.dataframe.DataFrame") -> MaterializedDataset: """Create a :class:`~ray.data.Dataset` from a diff --git a/python/ray/data/tests/test_hudi.py b/python/ray/data/tests/test_hudi.py new file mode 100644 index 000000000000..af8035cc315f --- /dev/null +++ b/python/ray/data/tests/test_hudi.py @@ -0,0 +1,114 @@ +import os +import zipfile + +import pytest +from packaging.version import parse as parse_version +from pytest_lazyfixture import lazy_fixture + +import ray +from ray._private.utils import _get_pyarrow_version +from ray.data.datasource.path_util import ( + _resolve_paths_and_filesystem, + _unwrap_protocol, +) +from ray.data.tests.conftest import * # noqa +from ray.data.tests.mock_http_server import * # noqa +from ray.tests.conftest import * # noqa + +MIN_PYARROW_VERSION_FOR_HUDI = parse_version("11.0.0") +_VER = _get_pyarrow_version() +PYARROW_VERSION = parse_version(_VER) if _VER else None +PYARROW_VERSION_MEETS_REQUIREMENT = ( + PYARROW_VERSION and PYARROW_VERSION >= MIN_PYARROW_VERSION_FOR_HUDI +) +PYARROW_HUDI_TEST_SKIP_REASON = ( + f"Hudi only supported if pyarrow >= {MIN_PYARROW_VERSION_FOR_HUDI}" +) + + +def _extract_testing_table(fixture_path: str, table_dir: str, target_dir: str) -> str: + with zipfile.ZipFile(fixture_path, "r") as zip_ref: + zip_ref.extractall(target_dir) + return os.path.join(target_dir, table_dir) + + +@pytest.mark.skipif( + not PYARROW_VERSION_MEETS_REQUIREMENT, + reason=PYARROW_HUDI_TEST_SKIP_REASON, +) +@pytest.mark.parametrize( + "fs,data_path", + [ + (None, lazy_fixture("local_path")), + (lazy_fixture("local_fs"), lazy_fixture("local_path")), + ], +) +def test_read_hudi_simple_cow_table(ray_start_regular_shared, fs, data_path): + setup_data_path = _unwrap_protocol(data_path) + target_testing_dir = os.path.join(setup_data_path, "test_hudi") + fixture_path, _ = _resolve_paths_and_filesystem( + "example://hudi-tables/0.x_cow_partitioned.zip", fs + ) + target_table_path = _extract_testing_table( + fixture_path[0], "trips_table", target_testing_dir + ) + + ds = ray.data.read_hudi(target_table_path) + + assert ds.schema().names == [ + "_hoodie_commit_time", + "_hoodie_commit_seqno", + "_hoodie_record_key", + "_hoodie_partition_path", + "_hoodie_file_name", + "ts", + "uuid", + "rider", + "driver", + "fare", + "city", + ] + assert ds.count() == 5 + rows = ( + ds.select_columns(["_hoodie_commit_time", "ts", "uuid", "fare"]) + .sort("fare") + .take_all() + ) + assert rows == [ + { + "_hoodie_commit_time": "20240402123035233", + "ts": 1695115999911, + "uuid": "c8abbe79-8d89-47ea-b4ce-4d224bae5bfa", + "fare": 17.85, + }, + { + "_hoodie_commit_time": "20240402123035233", + "ts": 1695159649087, + "uuid": "334e26e9-8355-45cc-97c6-c31daf0df330", + "fare": 19.1, + }, + { + "_hoodie_commit_time": "20240402123035233", + "ts": 1695091554788, + "uuid": "e96c4396-3fad-413a-a942-4cb36106d721", + "fare": 27.7, + }, + { + "_hoodie_commit_time": "20240402123035233", + "ts": 1695516137016, + "uuid": "e3cf430c-889d-4015-bc98-59bdce1e530c", + "fare": 34.15, + }, + { + "_hoodie_commit_time": "20240402144910683", + "ts": 1695046462179, + "uuid": "9909a8b1-2d15-4d3d-8ec9-efc48c536a00", + "fare": 339.0, + }, + ] + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/requirements/ml/data-test-requirements.txt b/python/requirements/ml/data-test-requirements.txt index d2d435b09d88..9ad22340d031 100644 --- a/python/requirements/ml/data-test-requirements.txt +++ b/python/requirements/ml/data-test-requirements.txt @@ -18,4 +18,5 @@ delta-sharing pytest-mock decord snowflake-connector-python -pyiceberg[sql-sqlite]==0.7.0 \ No newline at end of file +pyiceberg[sql-sqlite]==0.7.0 +hudi==0.2.0rc1 diff --git a/python/requirements_compiled.txt b/python/requirements_compiled.txt index 1347afee24c5..45d40d81b7a0 100644 --- a/python/requirements_compiled.txt +++ b/python/requirements_compiled.txt @@ -745,6 +745,8 @@ httpx==0.24.1 # -r /ray/ci/../python/requirements/test-requirements.txt # gradio # gradio-client +hudi==0.2.0rc1 + # via -r /ray/ci/../python/requirements/ml/data-test-requirements.txt huggingface-hub==0.19.4 # via # accelerate From acc1b0728058103dad747de8bbf0304b995ff3d9 Mon Sep 17 00:00:00 2001 From: Dongjun Na Date: Tue, 19 Nov 2024 10:45:12 +0900 Subject: [PATCH 010/107] [Core] Map aarch64 to arm64 in get_system_info function for Prometheus download (#48756) Signed-off-by: Dongjun Na --- .../dashboard/modules/metrics/install_and_start_prometheus.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/ray/dashboard/modules/metrics/install_and_start_prometheus.py b/python/ray/dashboard/modules/metrics/install_and_start_prometheus.py index a65050212950..cf7cb31c3607 100644 --- a/python/ray/dashboard/modules/metrics/install_and_start_prometheus.py +++ b/python/ray/dashboard/modules/metrics/install_and_start_prometheus.py @@ -26,6 +26,9 @@ def get_system_info(): if architecture == "x86_64": # In the Prometheus filename, it's called amd64 architecture = "amd64" + elif architecture == "aarch64": + # In the Prometheus filename, it's called arm64 + architecture = "arm64" return os_type, architecture From 73c956e07e3a6a3587b9826927aa79287a7af0af Mon Sep 17 00:00:00 2001 From: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Date: Mon, 18 Nov 2024 20:11:53 -0800 Subject: [PATCH 011/107] [core][compiled graphs] Fix do_profile_tasks (#48782) Fix do_profile_tasks by adding missing param. --- python/ray/dag/compiled_dag_node.py | 2 +- .../tests/experimental/test_torch_tensor_dag.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/python/ray/dag/compiled_dag_node.py b/python/ray/dag/compiled_dag_node.py index acec6c2672cd..c964b890f3d4 100644 --- a/python/ray/dag/compiled_dag_node.py +++ b/python/ray/dag/compiled_dag_node.py @@ -185,7 +185,7 @@ def do_profile_tasks( """ try: for task in tasks: - task.prepare() + task.prepare(overlap_gpu_communication=overlap_gpu_communication) if not hasattr(self, "__ray_adag_events"): self.__ray_adag_events = [] diff --git a/python/ray/dag/tests/experimental/test_torch_tensor_dag.py b/python/ray/dag/tests/experimental/test_torch_tensor_dag.py index d1ac1c68063f..1797068e7e2d 100644 --- a/python/ray/dag/tests/experimental/test_torch_tensor_dag.py +++ b/python/ray/dag/tests/experimental/test_torch_tensor_dag.py @@ -182,7 +182,11 @@ def test_torch_tensor_as_dag_input(ray_start_regular): @pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True) -def test_torch_tensor_nccl(ray_start_regular): +@pytest.mark.parametrize("enable_profiling", [False, True]) +@pytest.mark.parametrize("overlap_gpu_communication", [False, True]) +def test_torch_tensor_nccl( + ray_start_regular, monkeypatch, enable_profiling, overlap_gpu_communication +): if not USE_GPU: pytest.skip("NCCL tests require GPUs") @@ -190,6 +194,10 @@ def test_torch_tensor_nccl(ray_start_regular): sum(node["Resources"].get("GPU", 0) for node in ray.nodes()) > 1 ), "This test requires at least 2 GPUs" + monkeypatch.setattr( + ray.dag.constants, "RAY_ADAG_ENABLE_PROFILING", enable_profiling + ) + actor_cls = TorchTensorWorker.options(num_cpus=0, num_gpus=1) sender = actor_cls.remote() @@ -204,7 +212,9 @@ def test_torch_tensor_nccl(ray_start_regular): dag = dag.with_type_hint(TorchTensorType(transport="nccl")) dag = receiver.recv.bind(dag) - compiled_dag = dag.experimental_compile() + compiled_dag = dag.experimental_compile( + _overlap_gpu_communication=overlap_gpu_communication + ) # Test that we can pass different shapes and data. for i in range(3): From b5934e54c446fd4f5c28cf9b848c402f9a044357 Mon Sep 17 00:00:00 2001 From: Alexey Kudinkin Date: Mon, 18 Nov 2024 21:57:19 -0800 Subject: [PATCH 012/107] [Arrow] Unifying Arrow conversion handling, fixing handling of >2Gb objects (#48487) ## Why are these changes needed? Addresses #48419 Currently, we defer to Pyarrow to infer corresponding data-type to represent column values returned by the Map-based operators. However, Arrow is somehow not inferring the `large_*` kinds of types even in somewhat trivial cases of strings, byte-strings etc. resulting in `ArrowCapacityError` when you try to add a single string/byte-string >2Gb. This change addresses that by - Unifying handling of conversion to Numpy/Arrow in a single place (unifying it across different code-paths) - Fixing incorrect fallbacks to `ArrowPythonObjectType` - Upscaling `binary`, `string` to their Large counterparts (ie `large_list`, etc) if objects we're adding to the Arrow array > 2Gb --------- Signed-off-by: Alexey Kudinkin --- python/ray/air/BUILD | 8 + python/ray/air/tests/test_arrow.py | 71 ++++++++ python/ray/air/tests/test_object_extension.py | 10 +- .../ray/air/util/object_extensions/arrow.py | 6 +- .../ray/air/util/tensor_extensions/arrow.py | 152 +++++++++++++++++- .../ray/air/util/tensor_extensions/utils.py | 25 ++- python/ray/data/BUILD | 2 +- python/ray/data/_internal/arrow_block.py | 81 ++-------- .../_internal/arrow_ops/transform_pyarrow.py | 2 + python/ray/data/_internal/numpy_support.py | 76 +++++---- python/ray/data/_internal/pandas_block.py | 28 ++-- python/ray/data/_internal/table_block.py | 19 +-- python/ray/data/_internal/util.py | 8 +- python/ray/data/block.py | 15 +- python/ray/data/context.py | 11 ++ python/ray/data/extensions/__init__.py | 4 +- .../ray/data/extensions/object_extension.py | 2 +- .../data/tests/preprocessors/test_encoder.py | 2 +- python/ray/data/tests/test_arrow_block.py | 4 +- .../data/tests/test_arrow_serialization.py | 6 +- python/ray/data/tests/test_binary.py | 63 ++++++++ .../data/tests/test_dynamic_block_split.py | 4 +- python/ray/data/tests/test_numpy_support.py | 129 ++++++++++++--- python/ray/data/tests/test_pandas_block.py | 4 +- python/ray/data/tests/test_strict_mode.py | 4 +- .../ray/data/tests/test_transform_pyarrow.py | 70 +++++--- 26 files changed, 602 insertions(+), 204 deletions(-) create mode 100644 python/ray/air/tests/test_arrow.py diff --git a/python/ray/air/BUILD b/python/ray/air/BUILD index 0799ef871078..58951ee92889 100644 --- a/python/ray/air/BUILD +++ b/python/ray/air/BUILD @@ -46,6 +46,14 @@ py_test( deps = [":ml_lib"] ) +py_test( + name = "test_arrow", + size = "small", + srcs = ["tests/test_arrow.py"], + tags = ["team:ml", "team:data", "ray_data", "exclusive"], + deps = [":ml_lib"] +) + py_test( name = "test_air_usage", size = "small", diff --git a/python/ray/air/tests/test_arrow.py b/python/ray/air/tests/test_arrow.py new file mode 100644 index 000000000000..efe68937836b --- /dev/null +++ b/python/ray/air/tests/test_arrow.py @@ -0,0 +1,71 @@ +from dataclasses import dataclass, field + +import pyarrow as pa +import pytest + +from ray.air.util.tensor_extensions.arrow import ( + ArrowConversionError, + _convert_to_pyarrow_native_array, + _infer_pyarrow_type, + convert_to_pyarrow_array, +) +from ray.air.util.tensor_extensions.utils import create_ragged_ndarray + + +@dataclass +class UserObj: + i: int = field() + + +def test_pa_infer_type_failing_to_infer(): + # Represent a single column that will be using `ArrowPythonObjectExtension` type + # to ser/de native Python objects into bytes + column_vals = create_ragged_ndarray( + [ + "hi", + 1, + None, + [[[[]]]], + {"a": [[{"b": 2, "c": UserObj(i=123)}]]}, + UserObj(i=456), + ] + ) + + inferred_dtype = _infer_pyarrow_type(column_vals) + + # Arrow (17.0) seem to fallback to assume the dtype of the first element + assert pa.string().equals(inferred_dtype) + + +def test_convert_to_pyarrow_array_object_ext_type_fallback(): + column_values = create_ragged_ndarray( + [ + "hi", + 1, + None, + [[[[]]]], + {"a": [[{"b": 2, "c": UserObj(i=123)}]]}, + UserObj(i=456), + ] + ) + column_name = "py_object_column" + + # First, assert that straightforward conversion into Arrow native types fails + with pytest.raises(ArrowConversionError) as exc_info: + _convert_to_pyarrow_native_array(column_values, column_name) + + assert ( + str(exc_info.value) + == "Error converting data to Arrow: ['hi' 1 None list([[[[]]]]) {'a': [[{'b': 2, 'c': UserObj(i=123)}]]}\n UserObj(i=456)]" # noqa: E501 + ) + + # Subsequently, assert that fallback to `ArrowObjectExtensionType` succeeds + pa_array = convert_to_pyarrow_array(column_values, column_name) + + assert pa_array.to_pylist() == column_values.tolist() + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", "-x", __file__])) diff --git a/python/ray/air/tests/test_object_extension.py b/python/ray/air/tests/test_object_extension.py index b1479dbc4ac0..64600bafc69c 100644 --- a/python/ray/air/tests/test_object_extension.py +++ b/python/ray/air/tests/test_object_extension.py @@ -7,13 +7,13 @@ from ray.air.util.object_extensions.arrow import ( ArrowPythonObjectArray, ArrowPythonObjectType, - object_extension_type_allowed, + _object_extension_type_allowed, ) from ray.air.util.object_extensions.pandas import PythonObjectArray @pytest.mark.skipif( - not object_extension_type_allowed(), reason="Object extension not supported." + not _object_extension_type_allowed(), reason="Object extension not supported." ) def test_object_array_validation(): # Test unknown input type raises TypeError. @@ -25,7 +25,7 @@ def test_object_array_validation(): @pytest.mark.skipif( - not object_extension_type_allowed(), reason="Object extension not supported." + not _object_extension_type_allowed(), reason="Object extension not supported." ) def test_arrow_scalar_object_array_roundtrip(): arr = np.array( @@ -41,7 +41,7 @@ def test_arrow_scalar_object_array_roundtrip(): @pytest.mark.skipif( - not object_extension_type_allowed(), reason="Object extension not supported." + not _object_extension_type_allowed(), reason="Object extension not supported." ) def test_arrow_python_object_array_slice(): arr = np.array(["test", 20, "test2", 40, "test3", 60], dtype=object) @@ -51,7 +51,7 @@ def test_arrow_python_object_array_slice(): @pytest.mark.skipif( - not object_extension_type_allowed(), reason="Object extension not supported." + not _object_extension_type_allowed(), reason="Object extension not supported." ) def test_arrow_pandas_roundtrip(): obj = types.SimpleNamespace(a=1, b="test") diff --git a/python/ray/air/util/object_extensions/arrow.py b/python/ray/air/util/object_extensions/arrow.py index c3158bbff68b..a56a04869855 100644 --- a/python/ray/air/util/object_extensions/arrow.py +++ b/python/ray/air/util/object_extensions/arrow.py @@ -16,7 +16,7 @@ PYARROW_VERSION = None if _VER is None else parse_version(_VER) -def object_extension_type_allowed() -> bool: +def _object_extension_type_allowed() -> bool: return ( PYARROW_VERSION is not None and PYARROW_VERSION >= MIN_PYARROW_VERSION_SCALAR_SUBCLASS @@ -104,7 +104,9 @@ def from_objects( arr = pa.array(all_dumped_bytes, type=type_.storage_type) return ArrowPythonObjectArray.from_storage(type_, arr) - def to_numpy(self, zero_copy_only: bool = False) -> np.ndarray: + def to_numpy( + self, zero_copy_only: bool = False, writable: bool = False + ) -> np.ndarray: arr = np.empty(len(self), dtype=object) arr[:] = self.to_pylist() return arr diff --git a/python/ray/air/util/tensor_extensions/arrow.py b/python/ray/air/util/tensor_extensions/arrow.py index bef940c136f9..1dc1a4984826 100644 --- a/python/ray/air/util/tensor_extensions/arrow.py +++ b/python/ray/air/util/tensor_extensions/arrow.py @@ -10,10 +10,14 @@ from packaging.version import parse as parse_version from ray._private.utils import _get_pyarrow_version +from ray.air.constants import TENSOR_COLUMN_NAME from ray.air.util.tensor_extensions.utils import ( + _is_ndarray_tensor, _is_ndarray_variable_shaped_tensor, create_ragged_ndarray, ) +from ray.data._internal.util import GiB +from ray.util import log_once from ray.util.annotations import DeveloperAPI, PublicAPI PYARROW_VERSION = _get_pyarrow_version() @@ -85,14 +89,152 @@ def pyarrow_table_from_pydict( raise ArrowConversionError(str(pydict)) from e -@DeveloperAPI -def convert_list_to_pyarrow_array( - val: List[Any], enclosing_dict: Dict[str, Any] +@DeveloperAPI(stability="alpha") +def convert_to_pyarrow_array(column_values: np.ndarray, column_name: str) -> pa.Array: + """Converts provided NumPy `ndarray` into PyArrow's `array` while utilizing + both Arrow's natively supported types as well as custom extension types: + + - ArrowTensorArray (for tensors) + - ArrowPythonObjectArray (for user-defined python class objects, as well as + any python object that aren't represented by a corresponding Arrow's native + scalar type) + """ + + try: + # Since Arrow does NOT support tensors (aka multidimensional arrays) natively, + # we have to make sure that we handle this case utilizing `ArrowTensorArray` + # extension type + if column_name == TENSOR_COLUMN_NAME or _is_ndarray_tensor(column_values): + from ray.data.extensions.tensor_extension import ArrowTensorArray + + return ArrowTensorArray.from_numpy(column_values, column_name) + else: + return _convert_to_pyarrow_native_array(column_values, column_name) + + except ArrowConversionError as ace: + from ray.data.extensions.object_extension import ( + ArrowPythonObjectArray, + _object_extension_type_allowed, + ) + + if not _object_extension_type_allowed(): + should_serialize_as_object_ext_type = False + object_ext_type_detail = ( + "skipping fallback to serialize as pickled python" + f" objects (due to unsupported Arrow version {PYARROW_VERSION}, " + f"min required version is {MIN_PYARROW_VERSION_SCALAR_SUBCLASS})" + ) + else: + from ray.data import DataContext + + if not DataContext.get_current().enable_fallback_to_arrow_object_ext_type: + should_serialize_as_object_ext_type = False + object_ext_type_detail = ( + "skipping fallback to serialize as pickled python objects " + "(due to DataContext.enable_fallback_to_arrow_object_ext_type " + "= False)" + ) + else: + should_serialize_as_object_ext_type = True + object_ext_type_detail = ( + "falling back to serialize as pickled python objects" + ) + + # NOTE: To avoid logging following warning for every block it's + # only going to be logged in following cases + # - When fallback is disabled, or + # - It's being logged for the first time + if not should_serialize_as_object_ext_type or log_once( + "_fallback_to_arrow_object_extension_type_warning" + ): + logger.warning( + f"Failed to convert column '{column_name}' into pyarrow " + f"array due to: {ace}; {object_ext_type_detail}", + exc_info=ace, + ) + + # If `ArrowPythonObjectType` is not supported raise original exception + if not should_serialize_as_object_ext_type: + raise + + # Otherwise, attempt to fall back to serialize as python objects + return ArrowPythonObjectArray.from_objects(column_values) + + +def _convert_to_pyarrow_native_array( + column_values: np.ndarray, column_name: str ) -> pa.Array: + """Converts provided NumPy `ndarray` into PyArrow's `array` while only utilizing + Arrow's natively supported types (ie no custom extension types)""" + try: - return pa.array(val) + # NOTE: We explicitly infer PyArrow `DataType` so that + # we can perform upcasting to be able to accommodate + # blocks that are larger than 2Gb in size (limited + # by int32 offsets used by Arrow internally) + dtype = _infer_pyarrow_type(column_values) + + logger.log( + logging.getLevelName("TRACE"), + f"Inferred dtype of '{dtype}' for column '{column_name}'", + ) + + return pa.array(column_values, type=dtype) except Exception as e: - raise ArrowConversionError(str(enclosing_dict)) from e + raise ArrowConversionError(str(column_values)) from e + + +def _infer_pyarrow_type(column_values: np.ndarray) -> Optional[pa.DataType]: + """Infers target Pyarrow `DataType` based on the provided + columnar values. + + NOTE: This is a wrapper on top of `pa.infer_type(...)` utility + performing up-casting of `binary` and `string` types to + corresponding `large_binary` and `large_string` types in case + any of the array elements exceeds 2Gb in size therefore + making it impossible for original types to accommodate such + values. + + Unfortunately, for unknown reasons PA doesn't perform + that upcasting itself henceforth we have to do perform + it manually + + Args: + column_values: List of columnar values + + Returns: + Instance of PyArrow's `DataType` based on the provided + column values + """ + + if len(column_values) == 0: + return None + + inferred_pa_dtype = pa.infer_type(column_values) + + def _len_gt_2gb(obj: Any) -> bool: + # NOTE: This utility could be seeing objects other than strings or bytes in + # cases when column contains non-scalar non-homogeneous object types as + # column values, therefore making Arrow unable to infer corresponding + # column type appropriately, therefore falling back to assume the type + # of the first element in the list. + # + # Check out test cases for this method for an additional context. + if isinstance(obj, (str, bytes)): + return len(obj) > 2 * GiB + + return False + + if pa.types.is_binary(inferred_pa_dtype) and any( + [_len_gt_2gb(v) for v in column_values] + ): + return pa.large_binary() + elif pa.types.is_string(inferred_pa_dtype) and any( + [_len_gt_2gb(v) for v in column_values] + ): + return pa.large_string() + + return inferred_pa_dtype @DeveloperAPI diff --git a/python/ray/air/util/tensor_extensions/utils.py b/python/ray/air/util/tensor_extensions/utils.py index be250d89a04d..dfaa95a0acab 100644 --- a/python/ray/air/util/tensor_extensions/utils.py +++ b/python/ray/air/util/tensor_extensions/utils.py @@ -9,9 +9,28 @@ from pandas.core.dtypes.generic import ABCSeries +def _is_ndarray_tensor(arr: np.ndarray) -> bool: + """Return whether the provided NumPy ndarray is comprised of tensors. + + NOTE: Tensor is defined as a NumPy array such that `len(arr.shape) > 1` + """ + + # Case of uniform-shaped (ie non-ragged) tensor + if arr.ndim > 1: + return True + + # Case of ragged tensor (as produced by `create_ragged_ndarray` utility) + elif ( + arr.dtype.type is np.object_ and len(arr) > 0 and isinstance(arr[0], np.ndarray) + ): + return True + + return False + + def _is_ndarray_variable_shaped_tensor(arr: np.ndarray) -> bool: - """Return whether the provided NumPy ndarray is representing a variable-shaped - tensor. + """Return whether the provided NumPy ndarray is comprised of variable-shaped + tensors. NOTE: This is an O(rows) check. """ @@ -69,7 +88,7 @@ def _create_possibly_ragged_ndarray( @PublicAPI(stability="alpha") -def create_ragged_ndarray(values: Sequence[np.ndarray]) -> np.ndarray: +def create_ragged_ndarray(values: Sequence[Any]) -> np.ndarray: """Create an array that contains arrays of different length If you're working with variable-length arrays like images, use this function to diff --git a/python/ray/data/BUILD b/python/ray/data/BUILD index 21b4e0d829b6..e5b7845d9454 100644 --- a/python/ray/data/BUILD +++ b/python/ray/data/BUILD @@ -163,7 +163,7 @@ py_test( py_test( name = "test_binary", - size = "small", + size = "medium", srcs = ["tests/test_binary.py"], tags = ["team:data", "exclusive"], deps = ["//:ray_lib", ":conftest"], diff --git a/python/ray/data/_internal/arrow_block.py b/python/ray/data/_internal/arrow_block.py index f12f89d8cceb..02acc2eb5769 100644 --- a/python/ray/data/_internal/arrow_block.py +++ b/python/ray/data/_internal/arrow_block.py @@ -21,15 +21,11 @@ from ray._private.utils import _get_pyarrow_version from ray.air.constants import TENSOR_COLUMN_NAME from ray.air.util.tensor_extensions.arrow import ( - ArrowConversionError, - convert_list_to_pyarrow_array, + convert_to_pyarrow_array, pyarrow_table_from_pydict, ) from ray.data._internal.arrow_ops import transform_polars, transform_pyarrow -from ray.data._internal.numpy_support import ( - convert_udf_returns_to_numpy, - validate_numpy_batch, -) +from ray.data._internal.numpy_support import convert_to_numpy from ray.data._internal.row import TableRow from ray.data._internal.table_block import TableBlockAccessor, TableBlockBuilder from ray.data._internal.util import NULL_SENTINEL, find_partitions @@ -43,7 +39,6 @@ U, ) from ray.data.context import DataContext -from ray.util.debug import log_once try: import pyarrow @@ -61,17 +56,6 @@ T = TypeVar("T") logger = logging.getLogger(__name__) -ARROW_OBJECT_FIXABLE_ERRORS = ( - pyarrow.lib.ArrowTypeError, - pyarrow.lib.ArrowNotImplementedError, - pyarrow.lib.ArrowInvalid, -) - - -def is_object_fixable_error(e: ArrowConversionError) -> bool: - """Returns whether this error can be fixed by using an ArrowPythonObjectArray""" - return isinstance(e.__cause__, ARROW_OBJECT_FIXABLE_ERRORS) - # We offload some transformations to polars for performance. def get_sort_transform(context: DataContext) -> Callable: @@ -151,27 +135,14 @@ def __init__(self): @staticmethod def _table_from_pydict(columns: Dict[str, List[Any]]) -> Block: - for col_name, col in columns.items(): - try: - if col_name == TENSOR_COLUMN_NAME or isinstance( - next(iter(col), None), np.ndarray - ): - from ray.data.extensions.tensor_extension import ArrowTensorArray - - columns[col_name] = ArrowTensorArray.from_numpy(col, col_name) - else: - columns[col_name] = convert_list_to_pyarrow_array(col, columns) - except ArrowConversionError as e: - from ray.data.extensions.object_extension import ( - ArrowPythonObjectArray, - object_extension_type_allowed, - ) + pa_cols: Dict[str, pyarrow.Array] = dict() + + for col_name, col_vals in columns.items(): + np_col_vals = convert_to_numpy(col_vals) + + pa_cols[col_name] = convert_to_pyarrow_array(np_col_vals, col_name) - if object_extension_type_allowed() and is_object_fixable_error(e): - columns[col_name] = ArrowPythonObjectArray.from_objects(col) - else: - raise - return pyarrow_table_from_pydict(columns) + return pyarrow_table_from_pydict(pa_cols) @staticmethod def _concat_tables(tables: List[Block]) -> Block: @@ -216,40 +187,6 @@ def from_bytes(cls, data: bytes) -> "ArrowBlockAccessor": reader = pyarrow.ipc.open_stream(data) return cls(reader.read_all()) - @staticmethod - def numpy_to_block( - batch: Union[Dict[str, np.ndarray], Dict[str, list]], - ) -> "pyarrow.Table": - from ray.data.extensions.object_extension import ( - ArrowPythonObjectArray, - object_extension_type_allowed, - ) - from ray.data.extensions.tensor_extension import ArrowTensorArray - - validate_numpy_batch(batch) - - new_batch = {} - for col_name, col in batch.items(): - # Coerce to np.ndarray format if possible. - col = convert_udf_returns_to_numpy(col) - # Use Arrow's native *List types for 1-dimensional ndarrays. - if col.dtype.type is np.object_ or col.ndim > 1: - try: - col = ArrowTensorArray.from_numpy(col, col_name) - except ArrowConversionError as e: - if object_extension_type_allowed() and is_object_fixable_error(e): - if log_once(f"arrow_object_pickle_{col_name}"): - logger.debug( - f"Failed to interpret {col_name} as " - "multi-dimensional arrays. It will be pickled." - ) - col = ArrowPythonObjectArray.from_objects(col) - else: - raise - - new_batch[col_name] = col - return pyarrow_table_from_pydict(new_batch) - @staticmethod def _build_tensor_row( row: ArrowRow, col_name: str = TENSOR_COLUMN_NAME diff --git a/python/ray/data/_internal/arrow_ops/transform_pyarrow.py b/python/ray/data/_internal/arrow_ops/transform_pyarrow.py index 093588ca8f34..d51f9a990ad1 100644 --- a/python/ray/data/_internal/arrow_ops/transform_pyarrow.py +++ b/python/ray/data/_internal/arrow_ops/transform_pyarrow.py @@ -236,6 +236,7 @@ def concat(blocks: List["pyarrow.Table"]) -> "pyarrow.Table": schema = unify_schemas(schemas_to_unify) except Exception as e: raise ArrowConversionError(str(blocks)) from e + if ( any(isinstance(type_, pa.ExtensionType) for type_ in schema.types) or cols_with_null_list @@ -246,6 +247,7 @@ def concat(blocks: List["pyarrow.Table"]) -> "pyarrow.Table": col_chunked_arrays = [] for block in blocks: col_chunked_arrays.append(block.column(col_name)) + if isinstance(schema.field(col_name).type, tensor_types): # For our tensor extension types, manually construct a chunked array # containing chunks from all blocks. This is to handle diff --git a/python/ray/data/_internal/numpy_support.py b/python/ray/data/_internal/numpy_support.py index 9e6a7c305dfb..d04060fc831e 100644 --- a/python/ray/data/_internal/numpy_support.py +++ b/python/ray/data/_internal/numpy_support.py @@ -1,4 +1,5 @@ import collections +import logging from datetime import datetime from typing import Any, Dict, List, Union @@ -7,6 +8,8 @@ from ray.air.util.tensor_extensions.utils import create_ragged_ndarray from ray.data._internal.util import _truncated_repr +logger = logging.getLogger(__name__) + def is_array_like(value: Any) -> bool: """Checks whether objects are array-like, excluding numpy scalars.""" @@ -66,7 +69,7 @@ def _convert_datetime_list_to_array(datetime_list: List[datetime]) -> np.ndarray ) -def convert_udf_returns_to_numpy(udf_return_col: Any) -> Any: +def convert_to_numpy(column_values: Any) -> np.ndarray: """Convert UDF columns (output of map_batches) to numpy, if possible. This includes lists of scalars, objects supporting the array protocol, and lists @@ -80,36 +83,31 @@ def convert_udf_returns_to_numpy(udf_return_col: Any) -> Any: ValueError if an input was array-like but we failed to convert it to an array. """ - if isinstance(udf_return_col, np.ndarray): + if isinstance(column_values, np.ndarray): # No copy/conversion needed, just keep it verbatim. - return udf_return_col + return column_values - if isinstance(udf_return_col, list): - if len(udf_return_col) == 1 and isinstance(udf_return_col[0], np.ndarray): + elif isinstance(column_values, list): + if len(column_values) == 1 and isinstance(column_values[0], np.ndarray): # Optimization to avoid conversion overhead from list to np.array. - udf_return_col = np.expand_dims(udf_return_col[0], axis=0) - return udf_return_col + return np.expand_dims(column_values[0], axis=0) - if all(isinstance(elem, datetime) for elem in udf_return_col): - return _convert_datetime_list_to_array(udf_return_col) + if all(isinstance(elem, datetime) for elem in column_values): + return _convert_datetime_list_to_array(column_values) # Try to convert list values into an numpy array via # np.array(), so users don't need to manually cast. # NOTE: we don't cast generic iterables, since types like # `str` are also Iterable. try: - # Try to cast the inner scalars to numpy as well, to avoid unnecessarily - # creating an inefficient array of array of object dtype. - # But don't convert if the list is nested. Because if sub-lists have - # heterogeneous shapes, we need to create a ragged ndarray. - if not is_nested_list(udf_return_col) and all( - is_valid_udf_return(e) for e in udf_return_col - ): + # Convert array-like objects (like torch.Tensor) to `np.ndarray`s + if all(is_array_like(e) for e in column_values): # Use np.asarray() instead of np.array() to avoid copying if possible. - udf_return_col = [np.asarray(e) for e in udf_return_col] + column_values = [np.asarray(e) for e in column_values] + shapes = set() has_object = False - for e in udf_return_col: + for e in column_values: if isinstance(e, np.ndarray): shapes.add((e.dtype, e.shape)) elif isinstance(e, bytes): @@ -122,24 +120,48 @@ def convert_udf_returns_to_numpy(udf_return_col: Any) -> Any: has_object = True elif not np.isscalar(e): has_object = True + + # When column values are + # - Arrays of heterogeneous shapes + # - Byte-strings (viewed as arrays of heterogeneous shapes) + # - Non-scalar objects (tuples, lists, arbitrary object types) + # + # Custom "ragged ndarray" is created, represented as an array of + # references (ie ndarray with dtype=object) if has_object or len(shapes) > 1: # This util works around some limitations of np.array(dtype=object). - udf_return_col = create_ragged_ndarray(udf_return_col) + return create_ragged_ndarray(column_values) else: - udf_return_col = np.array(udf_return_col) + return np.array(column_values) + except Exception as e: + logger.error( + f"Failed to convert column values to numpy array: " + f"{_truncated_repr(column_values)}", + exc_info=e, + ) + raise ValueError( "Failed to convert column values to numpy array: " - f"({_truncated_repr(udf_return_col)}): {e}." - ) - elif hasattr(udf_return_col, "__array__"): + f"({_truncated_repr(column_values)}): {e}." + ) from e + + elif is_array_like(column_values): # Converts other array-like objects such as torch.Tensor. try: - udf_return_col = np.array(udf_return_col) + # Use np.asarray() instead of np.array() to avoid copying if possible. + return np.asarray(column_values) except Exception as e: + logger.error( + f"Failed to convert column values to numpy array: " + f"{_truncated_repr(column_values)}", + exc_info=e, + ) + raise ValueError( "Failed to convert column values to numpy array: " - f"({_truncated_repr(udf_return_col)}): {e}." - ) + f"({_truncated_repr(column_values)}): {e}." + ) from e - return udf_return_col + else: + return column_values diff --git a/python/ray/data/_internal/pandas_block.py b/python/ray/data/_internal/pandas_block.py index 04ff4a35a7e0..119469b46c1b 100644 --- a/python/ray/data/_internal/pandas_block.py +++ b/python/ray/data/_internal/pandas_block.py @@ -17,10 +17,8 @@ import numpy as np from ray.air.constants import TENSOR_COLUMN_NAME -from ray.data._internal.numpy_support import ( - convert_udf_returns_to_numpy, - validate_numpy_batch, -) +from ray.air.util.tensor_extensions.utils import _is_ndarray_tensor +from ray.data._internal.numpy_support import convert_to_numpy, validate_numpy_batch from ray.data._internal.row import TableRow from ray.data._internal.table_block import TableBlockAccessor, TableBlockBuilder from ray.data._internal.util import find_partitions @@ -114,14 +112,20 @@ def __init__(self): @staticmethod def _table_from_pydict(columns: Dict[str, List[Any]]) -> "pandas.DataFrame": pandas = lazy_import_pandas() - for key, value in columns.items(): - if key == TENSOR_COLUMN_NAME or isinstance( - next(iter(value), None), np.ndarray - ): + + pd_columns: Dict[str, Any] = {} + + for col_name, col_vals in columns.items(): + np_col_vals = convert_to_numpy(col_vals) + + if col_name == TENSOR_COLUMN_NAME or _is_ndarray_tensor(np_col_vals): from ray.data.extensions.tensor_extension import TensorArray - columns[key] = TensorArray(value) - return pandas.DataFrame(columns) + pd_columns[col_name] = TensorArray(np_col_vals) + else: + pd_columns[col_name] = np_col_vals + + return pandas.DataFrame(pd_columns) @staticmethod def _concat_tables(tables: List["pandas.DataFrame"]) -> "pandas.DataFrame": @@ -283,10 +287,6 @@ def numpy_to_block( ) -> "pandas.DataFrame": validate_numpy_batch(batch) - batch = { - column_name: convert_udf_returns_to_numpy(column) - for column_name, column in batch.items() - } block = PandasBlockBuilder._table_from_pydict(batch) return block diff --git a/python/ray/data/_internal/table_block.py b/python/ray/data/_internal/table_block.py index 55ea4fcc553a..a8995fc6703b 100644 --- a/python/ray/data/_internal/table_block.py +++ b/python/ray/data/_internal/table_block.py @@ -15,9 +15,10 @@ from ray.air.constants import TENSOR_COLUMN_NAME from ray.data._internal.block_builder import BlockBuilder -from ray.data._internal.numpy_support import convert_udf_returns_to_numpy, is_array_like +from ray.data._internal.numpy_support import is_array_like from ray.data._internal.row import TableRow from ray.data._internal.size_estimator import SizeEstimator +from ray.data._internal.util import MiB from ray.data.block import Block, BlockAccessor if TYPE_CHECKING: @@ -28,7 +29,7 @@ # The max size of Python tuples to buffer before compacting them into a # table in the BlockBuilder. -MAX_UNCOMPACTED_SIZE_BYTES = 50 * 1024 * 1024 +MAX_UNCOMPACTED_SIZE_BYTES = 50 * MiB class TableBlockBuilder(BlockBuilder): @@ -121,14 +122,13 @@ def will_build_yield_copy(self) -> bool: return self._concat_would_copy() and len(self._tables) > 1 def build(self) -> Block: - columns = { - key: convert_udf_returns_to_numpy(col) for key, col in self._columns.items() - } - if columns: - tables = [self._table_from_pydict(columns)] + if self._columns: + tables = [self._table_from_pydict(self._columns)] else: tables = [] + tables.extend(self._tables) + if len(tables) > 0: return self._concat_tables(tables) else: @@ -149,10 +149,7 @@ def _compact_if_needed(self) -> None: assert self._columns if self._uncompacted_size.size_bytes() < MAX_UNCOMPACTED_SIZE_BYTES: return - columns = { - key: convert_udf_returns_to_numpy(col) for key, col in self._columns.items() - } - block = self._table_from_pydict(columns) + block = self._table_from_pydict(self._columns) self.add_block(block) self._uncompacted_size = SizeEstimator() self._columns.clear() diff --git a/python/ray/data/_internal/util.py b/python/ray/data/_internal/util.py index 5e8c921c3733..1d0b70cf6a6c 100644 --- a/python/ray/data/_internal/util.py +++ b/python/ray/data/_internal/util.py @@ -26,7 +26,6 @@ import ray from ray._private.utils import _get_pyarrow_version -from ray.data._internal.arrow_ops.transform_pyarrow import unify_schemas from ray.data.context import DEFAULT_READ_OP_MIN_NUM_BLOCKS, WARN_PREFIX, DataContext if TYPE_CHECKING: @@ -41,6 +40,12 @@ logger = logging.getLogger(__name__) + +KiB = 1024 # bytes +MiB = 1024 * KiB +GiB = 1024 * MiB + + # NOTE: Make sure that these lower and upper bounds stay in sync with version # constraints given in python/setup.py. # Inclusive minimum pyarrow version. @@ -707,6 +712,7 @@ def unify_block_metadata_schema( """ # Some blocks could be empty, in which case we cannot get their schema. # TODO(ekl) validate schema is the same across different blocks. + from ray.data._internal.arrow_ops.transform_pyarrow import unify_schemas # First check if there are blocks with computed schemas, then unify # valid schemas from all such blocks. diff --git a/python/ray/data/block.py b/python/ray/data/block.py index 15cf6b68b20c..56a3c6dbc40d 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -1,4 +1,5 @@ import collections +import logging import os import time from dataclasses import dataclass @@ -25,6 +26,7 @@ from ray.air.util.tensor_extensions.arrow import ArrowConversionError from ray.data._internal.util import _check_pyarrow_version, _truncated_repr from ray.types import ObjectRef +from ray.util import log_once from ray.util.annotations import DeveloperAPI import psutil @@ -57,6 +59,9 @@ Block = Union["pyarrow.Table", "pandas.DataFrame"] +logger = logging.getLogger(__name__) + + @DeveloperAPI class BlockType(Enum): ARROW = "arrow" @@ -374,6 +379,12 @@ def batch_to_block( try: return cls.batch_to_arrow_block(batch) except ArrowConversionError as e: + if log_once("_fallback_to_pandas_block_warning"): + logger.warning( + f"Failed to convert batch to Arrow due to: {e}; " + f"falling back to Pandas block" + ) + if block_type is None: return cls.batch_to_pandas_block(batch) else: @@ -386,9 +397,9 @@ def batch_to_block( @classmethod def batch_to_arrow_block(cls, batch: Dict[str, Any]) -> Block: """Create an Arrow block from user-facing data formats.""" - from ray.data._internal.arrow_block import ArrowBlockAccessor + from ray.data._internal.arrow_block import ArrowBlockBuilder - return ArrowBlockAccessor.numpy_to_block(batch) + return ArrowBlockBuilder._table_from_pydict(batch) @classmethod def batch_to_pandas_block(cls, batch: Dict[str, Any]) -> Block: diff --git a/python/ray/data/context.py b/python/ray/data/context.py index 5ed9b4fe68ef..347d3da68372 100644 --- a/python/ray/data/context.py +++ b/python/ray/data/context.py @@ -80,6 +80,8 @@ # V2 in turn relies on int64 offsets, therefore having a limit of ~9Eb (exabytes) DEFAULT_USE_ARROW_TENSOR_V2 = env_bool("RAY_DATA_USE_ARROW_TENSOR_V2", True) +DEFAULT_ENABLE_FALLBACK_TO_ARROW_OBJECT_EXT_TYPE = True + DEFAULT_AUTO_LOG_STATS = False DEFAULT_VERBOSE_STATS_LOG = False @@ -222,6 +224,12 @@ class DataContext: read_op_min_num_blocks: Minimum number of read output blocks for a dataset. enable_tensor_extension_casting: Whether to automatically cast NumPy ndarray columns in Pandas DataFrames to tensor extension columns. + use_arrow_tensor_v2: Config enabling V2 version of ArrowTensorArray supporting + tensors > 2Gb in size (off by default) + enable_fallback_to_arrow_object_ext_type: Enables fallback to serialize column + values not suppported by Arrow natively (like user-defined custom Python + classes for ex, etc) using `ArrowPythonObjectType` (simply serializing + these as bytes) enable_auto_log_stats: Whether to automatically log stats after execution. If disabled, you can still manually print stats with ``Dataset.stats()``. verbose_stats_logs: Whether stats logs should be verbose. This includes fields @@ -293,6 +301,9 @@ class DataContext: read_op_min_num_blocks: int = DEFAULT_READ_OP_MIN_NUM_BLOCKS enable_tensor_extension_casting: bool = DEFAULT_ENABLE_TENSOR_EXTENSION_CASTING use_arrow_tensor_v2: bool = DEFAULT_USE_ARROW_TENSOR_V2 + enable_fallback_to_arrow_object_ext_type = ( + DEFAULT_ENABLE_FALLBACK_TO_ARROW_OBJECT_EXT_TYPE + ) enable_auto_log_stats: bool = DEFAULT_AUTO_LOG_STATS verbose_stats_logs: bool = DEFAULT_VERBOSE_STATS_LOG trace_allocations: bool = DEFAULT_TRACE_ALLOCATIONS diff --git a/python/ray/data/extensions/__init__.py b/python/ray/data/extensions/__init__.py index bebf3c2b2a5c..517b4fe7a3a2 100644 --- a/python/ray/data/extensions/__init__.py +++ b/python/ray/data/extensions/__init__.py @@ -8,7 +8,7 @@ ArrowPythonObjectType, PythonObjectArray, PythonObjectDtype, - object_extension_type_allowed, + _object_extension_type_allowed, ) from ray.data.extensions.tensor_extension import ( ArrowConversionError, @@ -40,6 +40,6 @@ "ArrowPythonObjectScalar", "PythonObjectArray", "PythonObjectDtype", - "object_extension_type_allowed", + "_object_extension_type_allowed", "get_arrow_extension_tensor_types", ] diff --git a/python/ray/data/extensions/object_extension.py b/python/ray/data/extensions/object_extension.py index a5daf4811a05..42ab20a231c6 100644 --- a/python/ray/data/extensions/object_extension.py +++ b/python/ray/data/extensions/object_extension.py @@ -2,7 +2,7 @@ ArrowPythonObjectArray, ArrowPythonObjectScalar, ArrowPythonObjectType, - object_extension_type_allowed, + _object_extension_type_allowed, ) from ray.air.util.object_extensions.pandas import ( # noqa: F401 PythonObjectArray, diff --git a/python/ray/data/tests/preprocessors/test_encoder.py b/python/ray/data/tests/preprocessors/test_encoder.py index 46b719ba6e83..bfae00596439 100644 --- a/python/ray/data/tests/preprocessors/test_encoder.py +++ b/python/ray/data/tests/preprocessors/test_encoder.py @@ -298,7 +298,7 @@ def test_one_hot_encoder_with_max_categories(): expected_df = pd.DataFrame( { "A": col_a, - "B": [[0, 1], [1, 0], [0, 0], [1, 0]], + "B": [[0, 0], [1, 0], [0, 1], [1, 0]], "C": [[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]], } ) diff --git a/python/ray/data/tests/test_arrow_block.py b/python/ray/data/tests/test_arrow_block.py index 43888ace8c57..e72e6f2c4fc4 100644 --- a/python/ray/data/tests/test_arrow_block.py +++ b/python/ray/data/tests/test_arrow_block.py @@ -7,7 +7,7 @@ import ray from ray._private.test_utils import run_string_as_driver from ray.data._internal.arrow_block import ArrowBlockAccessor -from ray.data.extensions.object_extension import object_extension_type_allowed +from ray.data.extensions.object_extension import _object_extension_type_allowed def test_append_column(ray_start_regular_shared): @@ -46,7 +46,7 @@ def test_register_arrow_types(tmp_path): @pytest.mark.skipif( - not object_extension_type_allowed(), reason="Object extension type not supported." + not _object_extension_type_allowed(), reason="Object extension type not supported." ) def test_dict_doesnt_fallback_to_pandas_block(ray_start_regular_shared): # If the UDF returns a column with dict, previously, we would diff --git a/python/ray/data/tests/test_arrow_serialization.py b/python/ray/data/tests/test_arrow_serialization.py index b3e8b10c19b1..232ed32cc749 100644 --- a/python/ray/data/tests/test_arrow_serialization.py +++ b/python/ray/data/tests/test_arrow_serialization.py @@ -26,7 +26,7 @@ from ray._private.utils import _get_pyarrow_version from ray.data.extensions.object_extension import ( ArrowPythonObjectArray, - object_extension_type_allowed, + _object_extension_type_allowed, ) from ray.data.extensions.tensor_extension import ( ArrowTensorArray, @@ -423,7 +423,7 @@ def pickled_objects_array(): (lazy_fixture("complex_nested_array"), 0.1), ] -if object_extension_type_allowed(): +if _object_extension_type_allowed(): pytest_custom_serialization_arrays.append( # Array of pickled objects (lazy_fixture("pickled_objects_array"), 0.1), @@ -550,7 +550,7 @@ def fn(batch: list): @pytest.mark.skipif( - not object_extension_type_allowed(), reason="Object extension not supported." + not _object_extension_type_allowed(), reason="Object extension not supported." ) def test_arrow_object_and_array_support(ray_start_regular_shared): obj = types.SimpleNamespace(some_attribute="test") diff --git a/python/ray/data/tests/test_binary.py b/python/ray/data/tests/test_binary.py index f1735da802f7..ba0eb7b7a9ba 100644 --- a/python/ray/data/tests/test_binary.py +++ b/python/ray/data/tests/test_binary.py @@ -1,5 +1,7 @@ import os from io import BytesIO +from tempfile import TemporaryDirectory +from typing import Optional import pandas as pd import pyarrow as pa @@ -9,6 +11,7 @@ import ray from ray.data import Schema +from ray.data._internal.util import GiB, MiB from ray.data.datasource import ( BaseFileMetadataProvider, FastFileMetadataProvider, @@ -195,6 +198,66 @@ def skip_unpartitioned(kv_dict): ) +def _gen_chunked_binary( + dir_path: str, total_size: int, max_file_size: Optional[int] = None +): + # NOTE: This util is primed to be writing even single large binary files + # in chunks to reduce memory requirements while doing so + chunk_size = max_file_size or 256 * MiB + num_chunks = total_size // chunk_size + remainder = total_size % chunk_size + + if max_file_size is not None and max_file_size < total_size: + for i in range(num_chunks): + filename = f"part_{i}.bin" + with open(f"{dir_path}/{filename}", "wb") as f: + f.write(b"a" * chunk_size) + + print(f">>> Written file: {filename}") + + else: + with open(f"{dir_path}/chunk.bin", "wb") as f: + for i in range(num_chunks): + f.write(b"a" * chunk_size) + + print(f">>> Written chunk #{i}") + + if remainder: + f.write(b"a" * remainder) + + print(f">>> Wrote chunked dataset at: {dir_path}") + + +@pytest.mark.parametrize( + "col_name", + [ + "bytes", + # TODO fix numpy conversion + # "text", + ], +) +def test_single_row_gt_2gb(ray_start_regular_shared, col_name): + with TemporaryDirectory() as tmp_dir: + target_binary_size_gb = 2.1 + + # Write out single file > 2Gb + _gen_chunked_binary(tmp_dir, total_size=int(target_binary_size_gb * GiB)) + + def _id(row): + bs = row[col_name] + assert round(len(bs) / GiB, 1) == target_binary_size_gb + return row + + if col_name == "text": + ds = ray.data.read_text(tmp_dir) + elif col_name == "bytes": + ds = ray.data.read_binary_files(tmp_dir) + + total = ds.map(_id).count() + + assert total == 1 + + if __name__ == "__main__": import sys diff --git a/python/ray/data/tests/test_dynamic_block_split.py b/python/ray/data/tests/test_dynamic_block_split.py index cda00239331f..398831aaea9c 100644 --- a/python/ray/data/tests/test_dynamic_block_split.py +++ b/python/ray/data/tests/test_dynamic_block_split.py @@ -10,7 +10,7 @@ import ray from ray.data import Dataset -from ray.data._internal.arrow_block import ArrowBlockAccessor +from ray.data._internal.arrow_block import ArrowBlockBuilder from ray.data._internal.datasource.csv_datasource import CSVDatasource from ray.data.block import BlockMetadata from ray.data.datasource import Datasource @@ -68,7 +68,7 @@ def _blocks_generator(): (self.num_rows_per_batch, self.row_size), dtype=np.uint8 ) } - block = ArrowBlockAccessor.numpy_to_block(batch) + block = ArrowBlockBuilder._table_from_pydict(batch) yield block else: yield pd.DataFrame( diff --git a/python/ray/data/tests/test_numpy_support.py b/python/ray/data/tests/test_numpy_support.py index c14038918c0a..ec67bcf689bb 100644 --- a/python/ray/data/tests/test_numpy_support.py +++ b/python/ray/data/tests/test_numpy_support.py @@ -6,6 +6,7 @@ import ray from ray.air.util.tensor_extensions.utils import create_ragged_ndarray +from ray.data import DataContext from ray.data.tests.conftest import * # noqa from ray.tests.conftest import * # noqa @@ -27,22 +28,31 @@ def assert_structure_equals(a, b): assert a.dtype == b.dtype assert a.shape == b.shape for i in range(len(a)): - assert np.array_equiv(a[i], b[i]), (i, a, b) + assert np.array_equal(a[i], b[i]), (i, a[i], b[i]) -def test_list_of_scalars(ray_start_regular_shared): +def test_list_of_scalars(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [1, 2, 3] output = do_map_batches(data) assert_structure_equals(output, np.array([1, 2, 3], dtype=np.int64)) -def test_list_of_numpy_scalars(ray_start_regular_shared): +def test_list_of_numpy_scalars(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [np.int64(1), np.int64(2), np.int64(3)] output = do_map_batches(data) assert_structure_equals(output, np.array([1, 2, 3], dtype=np.int64)) -def test_list_of_objects(ray_start_regular_shared): +def test_list_of_objects(ray_start_regular_shared, restore_data_context): + # NOTE: Fallback is enabled by default, this is purely for notational purposes + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = True + data = [1, 2, 3, UserObj()] output = do_map_batches(data) assert_structure_equals(output, np.array([1, 2, 3, UserObj()])) @@ -88,34 +98,51 @@ def test_list_of_objects(ray_start_regular_shared): ), ], ) -def test_list_of_datetimes(data, expected_output, ray_start_regular_shared): +def test_list_of_datetimes( + data, expected_output, ray_start_regular_shared, restore_data_context +): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + output = do_map_batches(data) assert_structure_equals(output, expected_output) -def test_array_like(ray_start_regular_shared): +def test_array_like(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = torch.Tensor([1, 2, 3]) output = do_map_batches(data) assert_structure_equals(output, np.array([1.0, 2.0, 3.0], dtype=np.float32)) -def test_list_of_arrays(ray_start_regular_shared): +def test_list_of_arrays(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [np.array([1, 2, 3]), np.array([4, 5, 6])] output = do_map_batches(data) assert_structure_equals(output, np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)) -def test_list_of_array_like(ray_start_regular_shared): +def test_list_of_array_like(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [torch.Tensor([1, 2, 3]), torch.Tensor([4, 5, 6])] output = do_map_batches(data) assert_structure_equals(output, np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)) -def test_ragged_array_like(ray_start_regular_shared): +def test_ragged_tensors_map_batches(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [torch.Tensor([1, 2, 3]), torch.Tensor([1, 2])] output = do_map_batches(data) assert_structure_equals( - output, np.array([np.array([1, 2, 3]), np.array([1, 2])], dtype=object) + output, create_ragged_ndarray([np.array([1, 2, 3]), np.array([1, 2])]) ) data = [torch.zeros((3, 5, 10)), torch.zeros((3, 8, 8))] @@ -125,23 +152,42 @@ def test_ragged_array_like(ray_start_regular_shared): ) -def test_scalar_nested_arrays(ray_start_regular_shared): +def test_scalar_nested_arrays(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [[[1]], [[2]]] output = do_map_batches(data) - assert_structure_equals(output, create_ragged_ndarray(data)) + + assert_structure_equals( + output, + create_ragged_ndarray( + [np.array([1], dtype=np.object_), np.array([2], dtype=np.object_)] + ), + ) -def test_scalar_lists_not_converted(ray_start_regular_shared): +def test_scalar_lists_not_converted(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [[1, 2], [1, 2]] output = do_map_batches(data) - assert_structure_equals(output, create_ragged_ndarray([[1, 2], [1, 2]])) + assert_structure_equals( + output, create_ragged_ndarray([np.array([1, 2]), np.array([1, 2])]) + ) data = [[1, 2, 3], [1, 2]] output = do_map_batches(data) - assert_structure_equals(output, create_ragged_ndarray([[1, 2, 3], [1, 2]])) + assert_structure_equals( + output, create_ragged_ndarray([np.array([1, 2, 3]), np.array([1, 2])]) + ) -def test_scalar_numpy(ray_start_regular_shared): +def test_scalar_numpy(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = np.int64(1) ds = ray.data.range(2, override_num_blocks=1) ds = ds.map(lambda x: {"output": data}) @@ -149,7 +195,10 @@ def test_scalar_numpy(ray_start_regular_shared): assert_structure_equals(output, np.array([1, 1], dtype=np.int64)) -def test_scalar_arrays(ray_start_regular_shared): +def test_scalar_arrays(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = np.array([1, 2, 3]) ds = ray.data.range(2, override_num_blocks=1) ds = ds.map(lambda x: {"output": data}) @@ -157,7 +206,10 @@ def test_scalar_arrays(ray_start_regular_shared): assert_structure_equals(output, np.array([[1, 2, 3], [1, 2, 3]], dtype=np.int64)) -def test_bytes(ray_start_regular_shared): +def test_bytes(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + """Tests that bytes are converted to object dtype instead of zero-terminated.""" data = b"\x1a\n\x00\n\x1a" ds = ray.data.range(1, override_num_blocks=1) @@ -166,7 +218,10 @@ def test_bytes(ray_start_regular_shared): assert_structure_equals(output, np.array([b"\x1a\n\x00\n\x1a"], dtype=object)) -def test_scalar_array_like(ray_start_regular_shared): +def test_uniform_tensors(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = torch.Tensor([1, 2, 3]) ds = ray.data.range(2, override_num_blocks=1) ds = ds.map(lambda x: {"output": data}) @@ -174,17 +229,24 @@ def test_scalar_array_like(ray_start_regular_shared): assert_structure_equals(output, np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)) -def test_scalar_ragged_arrays(ray_start_regular_shared): +def test_scalar_ragged_arrays(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [np.array([1, 2, 3]), np.array([1, 2])] ds = ray.data.range(2, override_num_blocks=1) ds = ds.map(lambda x: {"output": data[x["id"]]}) output = ds.take_batch()["output"] + assert_structure_equals( output, np.array([np.array([1, 2, 3]), np.array([1, 2])], dtype=object) ) -def test_scalar_ragged_array_like(ray_start_regular_shared): +def test_ragged_tensors(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [torch.Tensor([1, 2, 3]), torch.Tensor([1, 2])] ds = ray.data.range(2, override_num_blocks=1) ds = ds.map(lambda x: {"output": data[x["id"]]}) @@ -202,7 +264,10 @@ def test_scalar_ragged_array_like(ray_start_regular_shared): ) -def test_nested_ragged_arrays(ray_start_regular_shared): +def test_nested_ragged_arrays(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [ {"a": [[1], [2, 3]]}, {"a": [[4, 5], [6]]}, @@ -216,10 +281,26 @@ def f(row): # https://github.com/ray-project/ray/issues/35340 -def test_complex_ragged_arrays(ray_start_regular_shared): +def test_complex_ragged_arrays(ray_start_regular_shared, restore_data_context): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + data = [[{"a": 1}, {"a": 2}, {"a": 3}], [{"b": 1}]] output = do_map_batches(data) - assert_structure_equals(output, create_ragged_ndarray(data)) + + # Assert resulting objects are coerced to appropriate shape, following + # table's schema + assert_structure_equals( + output, + create_ragged_ndarray( + [ + np.array( + [{"a": 1, "b": None}, {"a": 2, "b": None}, {"a": 3, "b": None}] + ), + np.array([{"a": None, "b": 1}]), + ] + ), + ) data = ["hi", 1, None, [[[[]]]], {"a": [[{"b": 2, "c": UserObj()}]]}, UserObj()] output = do_map_batches(data) diff --git a/python/ray/data/tests/test_pandas_block.py b/python/ray/data/tests/test_pandas_block.py index 725d2c8ce1bb..4585d0e2a133 100644 --- a/python/ray/data/tests/test_pandas_block.py +++ b/python/ray/data/tests/test_pandas_block.py @@ -4,7 +4,7 @@ import ray import ray.data from ray.data._internal.pandas_block import PandasBlockAccessor -from ray.data.extensions.object_extension import object_extension_type_allowed +from ray.data.extensions.object_extension import _object_extension_type_allowed def test_append_column(ray_start_regular_shared): @@ -20,7 +20,7 @@ def test_append_column(ray_start_regular_shared): @pytest.mark.skipif( - object_extension_type_allowed(), reason="Objects can be put into Arrow" + _object_extension_type_allowed(), reason="Objects can be put into Arrow" ) def test_dict_fallback_to_pandas_block(ray_start_regular_shared): # If the UDF returns a column with dict, this throws diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index 49b4b9cc4e37..e34c2f428329 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -181,7 +181,7 @@ def test_strict_schema(ray_start_regular_shared): from ray.data._internal.pandas_block import PandasBlockSchema from ray.data.extensions.object_extension import ( ArrowPythonObjectType, - object_extension_type_allowed, + _object_extension_type_allowed, ) from ray.data.extensions.tensor_extension import ArrowTensorType @@ -199,7 +199,7 @@ def test_strict_schema(ray_start_regular_shared): ds = ray.data.from_items([{"x": 2, "y": object(), "z": [1, 2]}]) schema = ds.schema() - if object_extension_type_allowed(): + if _object_extension_type_allowed(): assert isinstance(schema.base_schema, pa.lib.Schema) assert schema.names == ["x", "y", "z"] assert schema.types == [ diff --git a/python/ray/data/tests/test_transform_pyarrow.py b/python/ray/data/tests/test_transform_pyarrow.py index 570bd8f6592b..a221bd6c7683 100644 --- a/python/ray/data/tests/test_transform_pyarrow.py +++ b/python/ray/data/tests/test_transform_pyarrow.py @@ -18,7 +18,7 @@ ArrowTensorArray, ArrowTensorType, ArrowVariableShapedTensorType, - object_extension_type_allowed, + _object_extension_type_allowed, ) @@ -199,7 +199,7 @@ def test_arrow_concat_tensor_extension_uniform_but_different(): @pytest.mark.skipif( - not object_extension_type_allowed(), reason="Object extension type not supported." + not _object_extension_type_allowed(), reason="Object extension type not supported." ) def test_arrow_concat_with_objects(): obj = types.SimpleNamespace(a=1, b="test") @@ -458,9 +458,10 @@ def map(x): assert op == "map_batches" def map_batches(x): + row_id = x["id"][0] return { "id": x["id"], - "my_data": data[x["id"][0]], + "my_data": [data[row_id]], } ds = ds.map_batches(map_batches, batch_size=None) @@ -472,14 +473,14 @@ def map_batches(x): @pytest.mark.skipif( - object_extension_type_allowed(), reason="Arrow table supports pickled objects" + _object_extension_type_allowed(), reason="Arrow table supports pickled objects" ) @pytest.mark.parametrize( "op, data", [ ("map", [UnsupportedType(), 1]), - ("map_batches", [[None], [1]]), - ("map_batches", [[{"a": 1}], [{"a": 2}]]), + ("map_batches", [None, 1]), + ("map_batches", [{"a": 1}, {"a": 2}]), ], ) def test_fallback_to_pandas_on_incompatible_data( @@ -497,34 +498,59 @@ def test_fallback_to_pandas_on_incompatible_data( @pytest.mark.parametrize( - "op, data", + "op, data, should_fail, expected_type", [ - ("map", [1, 2**100]), - ("map_batches", [[1.0], [2**4]]), + # Case A: Upon serializing to Arrow fallback to `ArrowPythonObjectType` + ("map_batches", [1, 2**100], False, ArrowPythonObjectType()), + ("map_batches", [1.0, 2**100], False, ArrowPythonObjectType()), + ("map_batches", ["1.0", 2**100], False, ArrowPythonObjectType()), + # Case B: No fallback to `ArrowPythonObjectType` and hence arrow is enforcing + # deduced schema + ("map_batches", [1.0, 2**4], True, None), + ("map_batches", ["1.0", 2**4], True, None), ], ) -def test_pyarrow_conversion_error_detailed_info( +def test_pyarrow_conversion_error_handling( ray_start_regular_shared, op, data, + should_fail: bool, + expected_type: pa.DataType, ): # Ray Data infers the block type (arrow or pandas) and the block schema - # based on the first UDF output. - # In one of the following cases, an error will be raised: - # * The first UDF output is compatible with Arrow, but the second is not. - # * Both UDF outputs are compatible with Arrow, but the second has a different - # schema. - # Check that we'll raise an ArrowConversionError with detailed information - # about the incompatible data. + # based on the first *block* produced by UDF. + # + # These tests simulate following scenarios + # 1. (Case A) Type of the value of the first block is deduced as Arrow scalar + # type, but second block carries value that overflows pa.int64 representation, + # and column henceforth will be serialized as `ArrowPythonObjectExtensionType` + # coercing first block to it as well + # + # 2. (Case B) Both blocks carry proper Arrow scalars which, however, have + # diverging types and therefore Arrow fails during merging of these blocks + # into 1 ds = _create_dataset(op, data) - with pytest.raises(Exception) as e: + if should_fail: + with pytest.raises(Exception) as e: + ds.materialize() + + error_msg = str(e.value) + expected_msg = "ArrowConversionError: Error converting data to Arrow:" + + assert expected_msg in error_msg + assert "my_data" in error_msg + + else: ds.materialize() - error_msg = str(e.value) - expected_msg = "ArrowConversionError: Error converting data to Arrow:" - assert expected_msg in error_msg, error_msg - assert "my_data" in error_msg, error_msg + assert ds.schema().base_schema == pa.schema( + [pa.field("id", pa.int64()), pa.field("my_data", expected_type)] + ) + + assert ds.take_all() == [ + {"id": i, "my_data": data[i]} for i in range(len(data)) + ] if __name__ == "__main__": From aaac19c8307038021dd96ffc4c2e616fbbf14896 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Tue, 19 Nov 2024 12:32:27 +0100 Subject: [PATCH 013/107] [RLlib] Make opencv-python-headless default over opencv-python (#48776) ## Why are these changes needed? Make `opencv-python-headless` defeault requirement for RLlib/ This is necessary, because `opencv-python` may not work in many headless environments. --- .../ml/rllib-test-requirements.txt | 2 +- rllib/utils/images.py | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/python/requirements/ml/rllib-test-requirements.txt b/python/requirements/ml/rllib-test-requirements.txt index 027c57446e60..c67bf2cec445 100644 --- a/python/requirements/ml/rllib-test-requirements.txt +++ b/python/requirements/ml/rllib-test-requirements.txt @@ -5,7 +5,7 @@ # Atari ale_py==0.10.1 imageio==2.34.2 -opencv-python==4.8.1.78 +opencv-python-headless==4.8.1.78 # For testing MuJoCo envs with gymnasium. mujoco==3.2.4 diff --git a/rllib/utils/images.py b/rllib/utils/images.py index 0716ea5c45b4..7b0f1601d574 100644 --- a/rllib/utils/images.py +++ b/rllib/utils/images.py @@ -1,4 +1,5 @@ import logging +import importlib import numpy as np @@ -6,13 +7,29 @@ logger = logging.getLogger(__name__) + +@DeveloperAPI +def is_package_installed(package_name): + try: + importlib.metadata.version(package_name) + return True + except importlib.metadata.PackageNotFoundError: + return False + + try: import cv2 cv2.ocl.setUseOpenCL(False) logger.debug("CV2 found for image processing.") -except ImportError: +except ImportError as e: + if is_package_installed("opencv-python"): + raise ImportError( + f"OpenCV is installed, but we failed to import it. This may be because " + f"you need to install `opencv-python-headless` instead of " + f"`opencv-python`. Error message: {e}", + ) cv2 = None From 856b01d774f844326d91cc5a7eeedce7d8e9f445 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 19 Nov 2024 05:39:14 -0800 Subject: [PATCH 014/107] [ci] update requirements for rllib changes (#48787) sync with ci generated compile results; remove packages that are no longer being declared Signed-off-by: Lonnie Liu --- python/requirements_compiled.txt | 114 +++---------------------------- 1 file changed, 11 insertions(+), 103 deletions(-) diff --git a/python/requirements_compiled.txt b/python/requirements_compiled.txt index 45d40d81b7a0..97216e3e74e2 100644 --- a/python/requirements_compiled.txt +++ b/python/requirements_compiled.txt @@ -4,24 +4,18 @@ absl-py==1.4.0 # via # array-record - # chex # dm-control # dm-env - # dopamine-rl # etils # labmaze # ml-collections # mujoco # open-spiel - # optax - # orbax-checkpoint - # recsim # tensorboard # tensorflow # tensorflow-datasets # tensorflow-metadata # tensorflow-probability - # tf-slim accelerate==0.28.0 # via -r /ray/ci/../python/requirements/ml/core-requirements.txt adagio==0.2.4 @@ -76,9 +70,7 @@ aiosqlite==0.19.0 alabaster==0.7.13 # via sphinx ale-py==0.10.1 - # via - # -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt - # gymnasium + # via -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt alembic==1.12.1 # via # aim @@ -147,10 +139,6 @@ attrs==21.4.0 # open-spiel # sarif-om # semgrep -autorom==0.6.1 ; platform_machine != "arm64" - # via -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt -autorom-accept-rom-license==0.6.1 - # via autorom aws-sam-translator==1.81.0 # via cfn-lint aws-xray-sdk==2.12.1 @@ -242,8 +230,6 @@ braceexpand==0.1.7 # via webdataset bracex==2.4 # via wcmatch -cached-property==1.5.2 - # via orbax-checkpoint cachetools==5.3.2 # via # aim @@ -272,16 +258,12 @@ charset-normalizer==3.3.2 # via # requests # snowflake-connector-python -chex==0.1.7 - # via optax clang-format==12.0.1 # via -r /ray/ci/../python/requirements/lint-requirements.txt click==8.1.7 # via # -r /ray/ci/../python/requirements.txt # aim - # autorom - # autorom-accept-rom-license # black # click-option-group # dask @@ -429,7 +411,6 @@ dm-env==1.6 dm-tree==0.1.8 # via # -r /ray/ci/../python/requirements.txt - # chex # dm-control # dm-env # tensorflow-datasets @@ -450,10 +431,6 @@ docutils==0.19 # -r /ray/ci/../python/requirements/lint-requirements.txt # myst-parser # sphinx -dopamine-rl==4.0.5 ; (sys_platform != "darwin" or platform_machine != "arm64") and python_version < "3.12" - # via - # -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt - # recsim dulwich==0.21.6 # via comet-ml ecdsa==0.18.0 @@ -472,7 +449,7 @@ etils==1.5.2 ; python_version < "3.12" # via # -r /ray/ci/../python/requirements/ml/dl-cpu-requirements.txt # array-record - # orbax-checkpoint + # mujoco # tensorflow-datasets evaluate==0.4.0 # via -r /ray/ci/../python/requirements/ml/train-test-requirements.txt @@ -545,8 +522,6 @@ flatbuffers==23.5.26 # onnxruntime # tensorflow # tf2onnx -flax==0.7.2 - # via dopamine-rl fonttools==4.45.1 # via matplotlib fqdn==1.5.1 @@ -590,10 +565,6 @@ gast==0.4.0 # tensorflow-probability gcs-oauth2-boto-plugin==3.0 # via gsutil -gin-config==0.5.0 - # via - # dopamine-rl - # recsim gitdb==4.0.11 # via gitpython gitpython==3.1.40 @@ -677,6 +648,8 @@ gradio-client==0.6.1 # via gradio graphql-core==3.2.3 # via moto +graphviz==0.20.3 + # via -r /ray/ci/../python/requirements/test-requirements.txt greenlet==3.0.1 # via sqlalchemy grpcio==1.66.2 ; sys_platform != "darwin" @@ -704,7 +677,6 @@ gunicorn==20.1.0 gymnasium==1.0.0 # via # -r /ray/ci/../python/requirements.txt - # -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt # minigrid # pettingzoo # shimmy @@ -773,7 +745,7 @@ idna==3.7 # snowflake-connector-python # trustme # yarl -imageio==2.34.2 ; python_version < "3.12" +imageio==2.34.2 # via # -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt # moviepy @@ -790,10 +762,8 @@ importlib-metadata==6.11.0 # myst-nb importlib-resources==5.13.0 # via - # ale-py # etils # gradio - # orbax-checkpoint # prophet iniconfig==2.0.0 # via pytest @@ -831,21 +801,6 @@ isort==5.10.1 # via -r /ray/ci/../python/requirements/lint-requirements.txt itsdangerous==2.1.2 # via flask -jax==0.4.13 - # via - # chex - # dopamine-rl - # flax - # optax - # orbax-checkpoint -jax-jumpy==1.0.0 - # via gymnasium -jaxlib==0.4.13 - # via - # chex - # dopamine-rl - # optax - # orbax-checkpoint jedi==0.19.1 # via ipython jinja2==3.1.2 @@ -910,7 +865,6 @@ jsonschema==4.17.3 # jsonschema-spec # jupyter-events # jupyterlab-server - # kaggle-environments # nbformat # openapi-schema-validator # openapi-spec-validator @@ -968,8 +922,6 @@ jupyterlab-widgets==3.0.11 # via ipywidgets jupytext==1.16.3 # via -r /ray/ci/../python/requirements/test-requirements.txt -kaggle-environments==1.7.11 - # via -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt keras==2.15.0 # via tensorflow kiwisolver==1.4.5 @@ -1062,11 +1014,7 @@ mistune==0.8.4 ml-collections==0.1.1 # via open-spiel ml-dtypes==0.3.2 - # via - # jax - # jaxlib - # tensorflow - # tensorstore + # via tensorflow mlagents-envs==0.28.0 # via -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt mlflow==2.9.2 @@ -1107,9 +1055,7 @@ msgpack==1.0.7 # -r /ray/ci/../python/requirements.txt # -r /ray/ci/../python/requirements/ml/rllib-requirements.txt # distributed - # flax # msgpack-numpy - # orbax-checkpoint # ray msgpack-numpy==0.4.8 # via -r /ray/ci/../python/requirements/ml/rllib-requirements.txt @@ -1179,7 +1125,6 @@ nest-asyncio==1.5.8 # nbclassic # nbclient # notebook - # orbax-checkpoint netifaces==0.11.0 # via # hpbandster @@ -1220,7 +1165,6 @@ numpy==1.26.4 # altair # bayesian-optimization # bokeh - # chex # cma # cmaes # cmdstanpy @@ -1233,10 +1177,8 @@ numpy==1.26.4 # deepspeed # dm-control # dm-env - # dopamine-rl # etils # evaluate - # flax # gpy # gradio # gymnasium @@ -1244,9 +1186,6 @@ numpy==1.26.4 # hpbandster # hyperopt # imageio - # jax - # jax-jumpy - # jaxlib # labmaze # lightgbm # matplotlib @@ -1268,9 +1207,7 @@ numpy==1.26.4 # open-spiel # opencv-python # opt-einsum - # optax # optuna - # orbax-checkpoint # pandas # paramz # patsy @@ -1295,7 +1232,6 @@ numpy==1.26.4 # tensorflow # tensorflow-datasets # tensorflow-probability - # tensorstore # tf2onnx # tifffile # tinyscaler @@ -1338,7 +1274,7 @@ opencensus-context==0.1.3 opencensus-proto==0.1.0 # via opentelemetry-exporter-opencensus opencv-python==4.8.1.78 - # via dopamine-rl + # via -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt openpyxl==3.0.10 # via -r /ray/ci/../python/requirements/test-requirements.txt opentelemetry-api==1.1.0 @@ -1371,15 +1307,10 @@ opentelemetry-semantic-conventions==0.20b0 # via opentelemetry-sdk opt-einsum==3.3.0 # via - # jax # pyro-ppl # tensorflow -optax==0.1.7 - # via flax optuna==3.2.0 # via -r /ray/ci/../python/requirements/ml/tune-requirements.txt -orbax-checkpoint==0.2.3 - # via flax orjson==3.9.10 # via gradio packaging==23.0 @@ -1441,7 +1372,6 @@ pandas==1.5.3 ; python_version < "3.12" # dask # datasets # delta-sharing - # dopamine-rl # evaluate # gradio # mlflow @@ -1493,7 +1423,6 @@ pillow==10.3.0 ; platform_system != "Windows" # -r /ray/ci/../python/requirements/test-requirements.txt # aim # bokeh - # dopamine-rl # gradio # imageio # matplotlib @@ -1640,7 +1569,6 @@ pyflakes==2.3.1 pygame==2.5.2 # via # -r /ray/ci/../python/requirements/test-requirements.txt - # dopamine-rl # minigrid pyglet==1.5.15 # via -r /ray/ci/../python/requirements/ml/rllib-requirements.txt @@ -1811,7 +1739,6 @@ pyyaml==6.0.1 # dask # datasets # distributed - # flax # gradio # huggingface-hub # jsonschema-spec @@ -1828,7 +1755,6 @@ pyyaml==6.0.1 # myst-nb # myst-parser # optuna - # orbax-checkpoint # pymars # pytorch-lightning # ray @@ -1864,8 +1790,6 @@ requests==2.31.0 # -r /ray/ci/../python/requirements.txt # adal # aim - # autorom - # autorom-accept-rom-license # azure-cli-core # azure-core # comet-ml @@ -1939,7 +1863,6 @@ rich==13.3.2 # -r /ray/ci/../python/requirements.txt # -r /ray/ci/../python/requirements/ml/rllib-requirements.txt # comet-ml - # flax # memray # pyiceberg # semgrep @@ -1991,8 +1914,6 @@ scipy==1.11.4 # gpy # hpbandster # hyperopt - # jax - # jaxlib # lightgbm # linear-operator # medpy @@ -2000,7 +1921,6 @@ scipy==1.11.4 # open-spiel # paramz # pymars - # recsim # scikit-image # scikit-learn # statsforecast @@ -2156,6 +2076,8 @@ statsmodels==0.14.0 # via # hpbandster # statsforecast +strictyaml==1.7.3 + # via pyiceberg supersuit==3.9.3 # via -r /ray/ci/../python/requirements/ml/rllib-test-requirements.txt sympy==1.13.1 @@ -2190,10 +2112,7 @@ tensorboardx==2.6.2.2 # -r /ray/ci/../python/requirements/test-requirements.txt # pytorch-lightning tensorflow==2.15.1 ; python_version < "3.12" and (sys_platform != "darwin" or platform_machine != "arm64") - # via - # -r /ray/ci/../python/requirements/ml/dl-cpu-requirements.txt - # dopamine-rl - # recsim + # via -r /ray/ci/../python/requirements/ml/dl-cpu-requirements.txt tensorflow-datasets==4.9.3 ; python_version < "3.12" # via # -r /ray/ci/../python/requirements/ml/data-test-requirements.txt @@ -2209,13 +2128,7 @@ tensorflow-io-gcs-filesystem==0.31.0 ; python_version < "3.12" tensorflow-metadata==1.14.0 # via tensorflow-datasets tensorflow-probability==0.23.0 ; python_version < "3.12" - # via - # -r /ray/ci/../python/requirements/ml/dl-cpu-requirements.txt - # dopamine-rl -tensorstore==0.1.63 - # via - # flax - # orbax-checkpoint + # via -r /ray/ci/../python/requirements/ml/dl-cpu-requirements.txt termcolor==2.4.0 # via # pytest-shutil @@ -2229,8 +2142,6 @@ terminado==0.18.1 # notebook testfixtures==7.0.0 # via -r /ray/ci/../python/requirements/test-requirements.txt -tf-slim==1.1.0 - # via dopamine-rl tf2onnx==1.15.1 ; sys_platform != "darwin" or platform_machine != "arm64" # via -r /ray/ci/../python/requirements/ml/rllib-requirements.txt threadpoolctl==3.1.0 @@ -2260,7 +2171,6 @@ tomlkit==0.13.0 toolz==0.12.1 # via # altair - # chex # dask # distributed # partd @@ -2396,7 +2306,6 @@ typing-extensions==4.8.0 # configspace # etils # fastapi - # flax # gradio # gradio-client # gymnasium @@ -2405,7 +2314,6 @@ typing-extensions==4.8.0 # mypy # myst-nb # nevergrad - # orbax-checkpoint # pydantic # pydantic-core # pytorch-lightning From b75cb793e437aa617d61dcb13e5f5d2fcc83ee68 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Tue, 19 Nov 2024 07:14:55 -0800 Subject: [PATCH 015/107] [Core] Support labels for ray.remote (#48715) Signed-off-by: Jiajun Yao --- python/ray/_private/ray_option_utils.py | 1 + python/ray/_private/worker.py | 2 +- python/ray/_raylet.pyx | 36 ++++++++++++--- python/ray/actor.py | 2 + python/ray/includes/common.pxd | 6 ++- python/ray/remote_function.py | 3 ++ python/ray/tests/test_advanced.py | 58 ++++++++++++++++++++----- src/ray/common/task/task_util.h | 4 +- src/ray/core_worker/actor_handle.cc | 11 +++-- src/ray/core_worker/actor_handle.h | 7 ++- src/ray/core_worker/actor_manager.h | 1 + src/ray/core_worker/common.h | 22 ++++++---- src/ray/core_worker/core_worker.cc | 17 +++++--- src/ray/core_worker/core_worker.h | 3 +- src/ray/core_worker/task_manager.cc | 15 +++++-- src/ray/core_worker/task_manager.h | 4 +- src/ray/protobuf/common.proto | 10 ++++- src/ray/protobuf/core_worker.proto | 3 ++ 18 files changed, 159 insertions(+), 46 deletions(-) diff --git a/python/ray/_private/ray_option_utils.py b/python/ray/_private/ray_option_utils.py index 91345e536446..61c898aff8c4 100644 --- a/python/ray/_private/ray_option_utils.py +++ b/python/ray/_private/ray_option_utils.py @@ -147,6 +147,7 @@ def _validate_resources(resources: Optional[Dict[str, float]]) -> Optional[str]: ), "_metadata": Option((dict, type(None))), "enable_task_events": Option(bool, default_value=True), + "_labels": Option((dict, type(None))), } diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index 118c556ec966..e62375c5e523 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -3549,7 +3549,7 @@ def method(self): for more details. _metadata: Extended options for Ray libraries. For example, _metadata={"workflows.io/options": } for Ray workflows. - + _labels: The key-value labels of a task or actor. """ # "callable" returns true for both function and class. if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 3ddf101189dc..944adba8fc33 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -737,11 +737,26 @@ cdef class Language: JAVA = Language.from_native(LANGUAGE_JAVA) +cdef int prepare_labels( + dict label_dict, + unordered_map[c_string, c_string] *label_map) except -1: + + if label_dict is None: + return 0 + + for key, value in label_dict.items(): + if not isinstance(key, str): + raise ValueError(f"Label key must be string, but got {type(key)}") + if not isinstance(value, str): + raise ValueError(f"Label value must be string, but got {type(value)}") + label_map[0][key.encode("utf-8")] = value.encode("utf-8") + + return 0 + cdef int prepare_resources( dict resource_dict, unordered_map[c_string, double] *resource_map) except -1: cdef: - unordered_map[c_string, double] out c_string resource_name list unit_resources @@ -4009,10 +4024,12 @@ cdef class CoreWorker: c_string debugger_breakpoint, c_string serialized_runtime_env_info, int64_t generator_backpressure_num_objects, - c_bool enable_task_events + c_bool enable_task_events, + labels, ): cdef: unordered_map[c_string, double] c_resources + unordered_map[c_string, c_string] c_labels CRayFunction ray_function CTaskOptions task_options c_vector[unique_ptr[CTaskArg]] args_vector @@ -4032,6 +4049,7 @@ cdef class CoreWorker: with self.profile_event(b"submit_task"): prepare_resources(resources, &c_resources) + prepare_labels(labels, &c_labels) ray_function = CRayFunction( language.lang, function_descriptor.descriptor) prepare_args_and_increment_put_refs( @@ -4043,7 +4061,9 @@ cdef class CoreWorker: b"", generator_backpressure_num_objects, serialized_runtime_env_info, - enable_task_events) + enable_task_events, + c_labels, + ) current_c_task_id = current_task.native() @@ -4089,6 +4109,7 @@ cdef class CoreWorker: int32_t max_pending_calls, scheduling_strategy, c_bool enable_task_events, + labels, ): cdef: CRayFunction ray_function @@ -4101,6 +4122,7 @@ cdef class CoreWorker: CSchedulingStrategy c_scheduling_strategy c_vector[CObjectID] incremented_put_arg_ids optional[c_bool] is_detached_optional = nullopt + unordered_map[c_string, c_string] c_labels self.python_scheduling_strategy_to_c( scheduling_strategy, &c_scheduling_strategy) @@ -4108,6 +4130,7 @@ cdef class CoreWorker: with self.profile_event(b"submit_task"): prepare_resources(resources, &c_resources) prepare_resources(placement_resources, &c_placement_resources) + prepare_labels(labels, &c_labels) ray_function = CRayFunction( language.lang, function_descriptor.descriptor) prepare_args_and_increment_put_refs( @@ -4136,7 +4159,8 @@ cdef class CoreWorker: # async or threaded actors. is_asyncio or max_concurrency > 1, max_pending_calls, - enable_task_events), + enable_task_events, + c_labels), extension_data, &c_actor_id) @@ -4247,6 +4271,7 @@ cdef class CoreWorker: TaskID current_task = self.get_current_task_id() c_string serialized_retry_exception_allowlist c_string serialized_runtime_env = b"{}" + unordered_map[c_string, c_string] c_labels serialized_retry_exception_allowlist = serialize_retry_exception_allowlist( retry_exception_allowlist, @@ -4275,7 +4300,8 @@ cdef class CoreWorker: concurrency_group_name, generator_backpressure_num_objects, serialized_runtime_env, - enable_task_events), + enable_task_events, + c_labels), max_retries, retry_exceptions, serialized_retry_exception_allowlist, diff --git a/python/ray/actor.py b/python/ray/actor.py index 222f52c24b5f..824de9efad73 100644 --- a/python/ray/actor.py +++ b/python/ray/actor.py @@ -924,6 +924,7 @@ def _remote(self, args=None, kwargs=None, **actor_options): scheduling_strategy: Strategy about how to schedule this actor. enable_task_events: True if tracing is enabled, i.e., task events from the actor should be reported. Defaults to True. + _labels: The key-value labels of the actor. Returns: A handle to the newly created actor. @@ -1197,6 +1198,7 @@ def _remote(self, args=None, kwargs=None, **actor_options): max_pending_calls=max_pending_calls, scheduling_strategy=scheduling_strategy, enable_task_events=enable_task_events, + labels=actor_options.get("_labels"), ) if _actor_launch_hook: diff --git a/python/ray/includes/common.pxd b/python/ray/includes/common.pxd index 7d4b6ece9e7a..f5c6d4655ac9 100644 --- a/python/ray/includes/common.pxd +++ b/python/ray/includes/common.pxd @@ -329,7 +329,8 @@ cdef extern from "ray/core_worker/common.h" nogil: unordered_map[c_string, double] &resources, c_string concurrency_group_name, int64_t generator_backpressure_num_objects, - c_string serialized_runtime_env, c_bool enable_task_events) + c_string serialized_runtime_env, c_bool enable_task_events, + const unordered_map[c_string, c_string] &labels) cdef cppclass CActorCreationOptions "ray::core::ActorCreationOptions": CActorCreationOptions() @@ -347,7 +348,8 @@ cdef extern from "ray/core_worker/common.h" nogil: const c_vector[CConcurrencyGroup] &concurrency_groups, c_bool execute_out_of_order, int32_t max_pending_calls, - c_bool enable_task_events) + c_bool enable_task_events, + const unordered_map[c_string, c_string] &labels) cdef cppclass CPlacementGroupCreationOptions \ "ray::core::PlacementGroupCreationOptions": diff --git a/python/ray/remote_function.py b/python/ray/remote_function.py index 72c07fd63b86..311ba1a5f8c2 100644 --- a/python/ray/remote_function.py +++ b/python/ray/remote_function.py @@ -217,6 +217,7 @@ def options(self, **task_options): _metadata: Extended options for Ray libraries. For example, _metadata={"workflows.io/options": } for Ray workflows. + _labels: The key-value labels of a task. Examples: @@ -417,6 +418,7 @@ def _remote(self, args=None, kwargs=None, **task_options): # Override enable_task_events to default for actor if not specified (i.e. None) enable_task_events = task_options.get("enable_task_events") + labels = task_options.get("_labels") def invocation(args, kwargs): if self._is_cross_language: @@ -447,6 +449,7 @@ def invocation(args, kwargs): serialized_runtime_env_info or "{}", generator_backpressure_num_objects, enable_task_events, + labels, ) # Reset worker's debug context from the last "remote" command # (which applies only to this .remote call). diff --git a/python/ray/tests/test_advanced.py b/python/ray/tests/test_advanced.py index a3cc669c8c90..8642099b042b 100644 --- a/python/ray/tests/test_advanced.py +++ b/python/ray/tests/test_advanced.py @@ -125,11 +125,11 @@ def test_internal_get_local_ongoing_lineage_reconstruction_tasks( ray_start_cluster_enabled, ): cluster = ray_start_cluster_enabled - cluster.add_node(resources={"head": 1}) + cluster.add_node(resources={"head": 2}) ray.init(address=cluster.address) - worker1 = cluster.add_node(resources={"worker": 1}) + worker1 = cluster.add_node(resources={"worker": 2}) - @ray.remote(resources={"head": 1}) + @ray.remote(num_cpus=0, resources={"head": 1}) class Counter: def __init__(self): self.count = 0 @@ -138,7 +138,9 @@ def inc(self): self.count = self.count + 1 return self.count - @ray.remote(max_retries=-1, num_cpus=0, resources={"worker": 1}) + @ray.remote( + max_retries=-1, num_cpus=0, resources={"worker": 1}, _labels={"key1": "value1"} + ) def task(counter): count = ray.get(counter.inc.remote()) if count > 1: @@ -146,10 +148,31 @@ def task(counter): time.sleep(100000) return [1] * 1024 * 1024 - counter = Counter.remote() - obj = task.remote(counter) + @ray.remote( + max_restarts=-1, + max_task_retries=-1, + num_cpus=0, + resources={"worker": 1}, + _labels={"key2": "value2"}, + ) + class Actor: + def run(self, counter): + count = ray.get(counter.inc.remote()) + if count > 1: + # lineage reconstruction + time.sleep(100000) + return [1] * 1024 * 1024 + + counter1 = Counter.remote() + obj1 = task.remote(counter1) # Wait for task to finish - ray.wait([obj], fetch_local=False) + ray.wait([obj1], fetch_local=False) + + counter2 = Counter.remote() + actor = Actor.remote() + obj2 = actor.run.remote(counter2) + # Wait for actor task to finish + ray.wait([obj2], fetch_local=False) assert len(get_local_ongoing_lineage_reconstruction_tasks()) == 0 @@ -158,16 +181,27 @@ def task(counter): def verify(expected_task_status): lineage_reconstruction_tasks = get_local_ongoing_lineage_reconstruction_tasks() - return ( - len(lineage_reconstruction_tasks) == 1 - and lineage_reconstruction_tasks[0][0].name == "task" - and lineage_reconstruction_tasks[0][0].resources == {"worker": 1.0} + lineage_reconstruction_tasks.sort(key=lambda task: task[0].name) + assert len(lineage_reconstruction_tasks) == 2 + assert [ + lineage_reconstruction_tasks[0][0].name, + lineage_reconstruction_tasks[1][0].name, + ] == ["Actor.run", "task"] + assert ( + lineage_reconstruction_tasks[0][0].labels == {"key2": "value2"} and lineage_reconstruction_tasks[0][0].status == expected_task_status and lineage_reconstruction_tasks[0][1] == 1 ) + assert ( + lineage_reconstruction_tasks[1][0].labels == {"key1": "value1"} + and lineage_reconstruction_tasks[1][0].status == expected_task_status + and lineage_reconstruction_tasks[1][1] == 1 + ) + + return True wait_for_condition(lambda: verify(common_pb2.TaskStatus.PENDING_NODE_ASSIGNMENT)) - cluster.add_node(resources={"worker": 1}) + cluster.add_node(resources={"worker": 2}) wait_for_condition(lambda: verify(common_pb2.TaskStatus.SUBMITTED_TO_WORKER)) diff --git a/src/ray/common/task/task_util.h b/src/ray/common/task/task_util.h index 488c52069aa4..4ecfab358c7b 100644 --- a/src/ray/common/task/task_util.h +++ b/src/ray/common/task/task_util.h @@ -135,7 +135,8 @@ class TaskSpecBuilder { const TaskID &submitter_task_id, const std::shared_ptr runtime_env_info = nullptr, const std::string &concurrency_group_name = "", - bool enable_task_events = true) { + bool enable_task_events = true, + const std::unordered_map &labels = {}) { message_->set_type(TaskType::NORMAL_TASK); message_->set_name(name); message_->set_language(language); @@ -165,6 +166,7 @@ class TaskSpecBuilder { } message_->set_concurrency_group_name(concurrency_group_name); message_->set_enable_task_events(enable_task_events); + message_->mutable_labels()->insert(labels.begin(), labels.end()); return *this; } diff --git a/src/ray/core_worker/actor_handle.cc b/src/ray/core_worker/actor_handle.cc index 4228cd392351..39257bbb7fcc 100644 --- a/src/ray/core_worker/actor_handle.cc +++ b/src/ray/core_worker/actor_handle.cc @@ -33,7 +33,8 @@ rpc::ActorHandle CreateInnerActorHandle( const std::string &ray_namespace, int32_t max_pending_calls, bool execute_out_of_order, - absl::optional enable_task_events) { + absl::optional enable_task_events, + const std::unordered_map &labels) { rpc::ActorHandle inner; inner.set_actor_id(actor_id.Data(), actor_id.Size()); inner.set_owner_id(owner_id.Binary()); @@ -50,6 +51,7 @@ rpc::ActorHandle CreateInnerActorHandle( inner.set_execute_out_of_order(execute_out_of_order); inner.set_max_pending_calls(max_pending_calls); inner.set_enable_task_events(enable_task_events.value_or(kDefaultTaskEventEnabled)); + inner.mutable_labels()->insert(labels.begin(), labels.end()); return inner; } @@ -82,6 +84,7 @@ rpc::ActorHandle CreateInnerActorHandleFromActorData( inner.set_execute_out_of_order( task_spec.actor_creation_task_spec().execute_out_of_order()); inner.set_max_pending_calls(task_spec.actor_creation_task_spec().max_pending_calls()); + inner.mutable_labels()->insert(task_spec.labels().begin(), task_spec.labels().end()); return inner; } } // namespace @@ -100,7 +103,8 @@ ActorHandle::ActorHandle( const std::string &ray_namespace, int32_t max_pending_calls, bool execute_out_of_order, - absl::optional enable_task_events) + absl::optional enable_task_events, + const std::unordered_map &labels) : ActorHandle(CreateInnerActorHandle(actor_id, owner_id, owner_address, @@ -114,7 +118,8 @@ ActorHandle::ActorHandle( ray_namespace, max_pending_calls, execute_out_of_order, - enable_task_events)) {} + enable_task_events, + labels)) {} ActorHandle::ActorHandle(const std::string &serialized) : ActorHandle(CreateInnerActorHandleFromString(serialized)) {} diff --git a/src/ray/core_worker/actor_handle.h b/src/ray/core_worker/actor_handle.h index 22f00c066dff..98306cb6d6b6 100644 --- a/src/ray/core_worker/actor_handle.h +++ b/src/ray/core_worker/actor_handle.h @@ -45,7 +45,8 @@ class ActorHandle { const std::string &ray_namespace, int32_t max_pending_calls, bool execute_out_of_order = false, - absl::optional enable_task_events = absl::nullopt); + absl::optional enable_task_events = absl::nullopt, + const std::unordered_map &labels = {}); /// Constructs an ActorHandle from a serialized string. explicit ActorHandle(const std::string &serialized); @@ -105,6 +106,10 @@ class ActorHandle { bool ExecuteOutOfOrder() const { return inner_.execute_out_of_order(); } + const ::google::protobuf::Map &GetLabels() const { + return inner_.labels(); + } + private: // Protobuf-defined persistent state of the actor handle. const rpc::ActorHandle inner_; diff --git a/src/ray/core_worker/actor_manager.h b/src/ray/core_worker/actor_manager.h index dcfe8e11a68d..a42cdcc13d6b 100644 --- a/src/ray/core_worker/actor_manager.h +++ b/src/ray/core_worker/actor_manager.h @@ -20,6 +20,7 @@ #include "ray/core_worker/actor_creator.h" #include "ray/core_worker/actor_handle.h" #include "ray/core_worker/reference_count.h" +#include "ray/core_worker/transport/actor_task_submitter.h" #include "ray/core_worker/transport/task_receiver.h" #include "ray/gcs/gcs_client/gcs_client.h" namespace ray { diff --git a/src/ray/core_worker/common.h b/src/ray/core_worker/common.h index 3a160cd302d8..bc4c18c22bb1 100644 --- a/src/ray/core_worker/common.h +++ b/src/ray/core_worker/common.h @@ -67,14 +67,16 @@ struct TaskOptions { const std::string &concurrency_group_name = "", int64_t generator_backpressure_num_objects = -1, const std::string &serialized_runtime_env_info = "{}", - bool enable_task_events = kDefaultTaskEventEnabled) + bool enable_task_events = kDefaultTaskEventEnabled, + const std::unordered_map &labels = {}) : name(name), num_returns(num_returns), resources(resources), concurrency_group_name(concurrency_group_name), serialized_runtime_env_info(serialized_runtime_env_info), generator_backpressure_num_objects(generator_backpressure_num_objects), - enable_task_events(enable_task_events) {} + enable_task_events(enable_task_events), + labels(labels) {} /// The name of this task. std::string name; @@ -95,6 +97,7 @@ struct TaskOptions { /// True if task events (worker::TaskEvent) from this task should be reported, default /// to true. bool enable_task_events = kDefaultTaskEventEnabled; + std::unordered_map labels; }; /// Options for actor creation tasks. @@ -115,7 +118,8 @@ struct ActorCreationOptions { const std::vector &concurrency_groups = {}, bool execute_out_of_order = false, int32_t max_pending_calls = -1, - bool enable_task_events = kDefaultTaskEventEnabled) + bool enable_task_events = kDefaultTaskEventEnabled, + const std::unordered_map &labels = {}) : max_restarts(max_restarts), max_task_retries(max_task_retries), max_concurrency(max_concurrency), @@ -132,7 +136,8 @@ struct ActorCreationOptions { execute_out_of_order(execute_out_of_order), max_pending_calls(max_pending_calls), scheduling_strategy(scheduling_strategy), - enable_task_events(enable_task_events) { + enable_task_events(enable_task_events), + labels(labels) { // Check that resources is a subset of placement resources. for (auto &resource : resources) { auto it = this->placement_resources.find(resource.first); @@ -187,6 +192,7 @@ struct ActorCreationOptions { /// True if task events (worker::TaskEvent) from this creation task should be reported /// default to true. const bool enable_task_events = kDefaultTaskEventEnabled; + const std::unordered_map labels; }; using PlacementStrategy = rpc::PlacementStrategy; @@ -285,11 +291,11 @@ template <> struct hash { size_t operator()(const ray::rpc::LineageReconstructionTask &task) const { size_t hash = std::hash()(task.name()); - for (const auto &resource : task.resources()) { - hash ^= std::hash()(resource.first); - hash ^= std::hash()(resource.second); - } hash ^= std::hash()(task.status()); + for (const auto &label : task.labels()) { + hash ^= std::hash()(label.first); + hash ^= std::hash()(label.second); + } return hash; } }; diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 9a15e8702892..9e8880d3aba3 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -2115,7 +2115,8 @@ void CoreWorker::BuildCommonTaskSpec( const std::string &concurrency_group_name, bool include_job_config, int64_t generator_backpressure_num_objects, - bool enable_task_events) { + bool enable_task_events, + const std::unordered_map &labels) { // Build common task spec. auto override_runtime_env_info = OverrideTaskOrActorRuntimeEnvInfo(serialized_runtime_env_info); @@ -2161,7 +2162,8 @@ void CoreWorker::BuildCommonTaskSpec( main_thread_current_task_id, override_runtime_env_info, concurrency_group_name, - enable_task_events); + enable_task_events, + labels); // Set task arguments. for (const auto &arg : args) { builder.AddArg(*arg); @@ -2217,7 +2219,8 @@ std::vector CoreWorker::SubmitTask( /*include_job_config*/ true, /*generator_backpressure_num_objects*/ task_options.generator_backpressure_num_objects, - /*enable_task_event*/ task_options.enable_task_events); + /*enable_task_event*/ task_options.enable_task_events, + task_options.labels); ActorID root_detached_actor_id; if (!worker_context_.GetRootDetachedActorID().IsNil()) { root_detached_actor_id = worker_context_.GetRootDetachedActorID(); @@ -2308,7 +2311,8 @@ Status CoreWorker::CreateActor(const RayFunction &function, /*concurrency_group_name*/ "", /*include_job_config*/ true, /*generator_backpressure_num_objects*/ -1, - /*enable_task_events*/ actor_creation_options.enable_task_events); + /*enable_task_events*/ actor_creation_options.enable_task_events, + actor_creation_options.labels); // If the namespace is not specified, get it from the job. const auto ray_namespace = (actor_creation_options.ray_namespace.empty() @@ -2328,7 +2332,8 @@ Status CoreWorker::CreateActor(const RayFunction &function, ray_namespace, actor_creation_options.max_pending_calls, actor_creation_options.execute_out_of_order, - actor_creation_options.enable_task_events); + actor_creation_options.enable_task_events, + actor_creation_options.labels); std::string serialized_actor_handle; actor_handle->Serialize(&serialized_actor_handle); ActorID root_detached_actor_id; @@ -3939,7 +3944,7 @@ void CoreWorker::ProcessSubscribeObjectLocations( std::unordered_map CoreWorker::GetLocalOngoingLineageReconstructionTasks() const { - return task_manager_->GetOngoingLineageReconstructionTasks(); + return task_manager_->GetOngoingLineageReconstructionTasks(*actor_manager_); } Status CoreWorker::GetLocalObjectLocations( diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index 2d135a5983f8..c7f07800c700 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -1467,7 +1467,8 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { const std::string &concurrency_group_name = "", bool include_job_config = false, int64_t generator_backpressure_num_objects = -1, - bool enable_task_events = true); + bool enable_task_events = true, + const std::unordered_map &labels = {}); void SetCurrentTaskId(const TaskID &task_id, uint64_t attempt_number, const std::string &task_name); diff --git a/src/ray/core_worker/task_manager.cc b/src/ray/core_worker/task_manager.cc index bc5a78c7862e..cb175bfd2ebb 100644 --- a/src/ray/core_worker/task_manager.cc +++ b/src/ray/core_worker/task_manager.cc @@ -16,6 +16,7 @@ #include "ray/common/buffer.h" #include "ray/common/common_protocol.h" +#include "ray/core_worker/actor_manager.h" #include "ray/gcs/pb_util.h" #include "ray/util/exponential_backoff.h" #include "ray/util/util.h" @@ -1475,7 +1476,8 @@ void TaskManager::SetTaskStatus( } std::unordered_map -TaskManager::GetOngoingLineageReconstructionTasks() const { +TaskManager::GetOngoingLineageReconstructionTasks( + const ActorManager &actor_manager) const { absl::MutexLock lock(&mu_); std::unordered_map result; for (const auto &task_it : submissible_tasks_) { @@ -1491,9 +1493,16 @@ TaskManager::GetOngoingLineageReconstructionTasks() const { rpc::LineageReconstructionTask task; task.set_name(task_entry.spec.GetName()); - auto resources = task_entry.spec.GetRequiredResources().GetResourceUnorderedMap(); - task.mutable_resources()->insert(resources.begin(), resources.end()); task.set_status(task_entry.GetStatus()); + if (task_entry.spec.IsNormalTask()) { + task.mutable_labels()->insert(task_entry.spec.GetMessage().labels().begin(), + task_entry.spec.GetMessage().labels().end()); + } else if (task_entry.spec.IsActorTask()) { + auto actor_handle = actor_manager.GetActorHandle(task_entry.spec.ActorId()); + RAY_CHECK(actor_handle) << "Actor task must be submitted via actor handle"; + const auto &labels = actor_handle->GetLabels(); + task.mutable_labels()->insert(labels.begin(), labels.end()); + } if (result.find(task) != result.end()) { result[task] += 1; diff --git a/src/ray/core_worker/task_manager.h b/src/ray/core_worker/task_manager.h index 196e18beb277..49188e9a630f 100644 --- a/src/ray/core_worker/task_manager.h +++ b/src/ray/core_worker/task_manager.h @@ -30,6 +30,8 @@ namespace ray { namespace core { +class ActorManager; + class TaskFinisherInterface { public: virtual void CompletePendingTask(const TaskID &task_id, @@ -603,7 +605,7 @@ class TaskManager : public TaskFinisherInterface, public TaskResubmissionInterfa /// Key is the lineage reconstruction task info. /// Value is the number of ongoing lineage reconstruction tasks of this type. std::unordered_map - GetOngoingLineageReconstructionTasks() const; + GetOngoingLineageReconstructionTasks(const ActorManager &actor_manager) const; /// Returns the generator ID that contains the dynamically allocated /// ObjectRefs, if the task is dynamic. Else, returns Nil. diff --git a/src/ray/protobuf/common.proto b/src/ray/protobuf/common.proto index 604c92d0d4db..f18175a12f2c 100644 --- a/src/ray/protobuf/common.proto +++ b/src/ray/protobuf/common.proto @@ -556,6 +556,8 @@ message TaskSpec { // this field contains the detached actor id. // Otherwise it's empty and is originated from a driver. bytes root_detached_actor_id = 40; + // The key-value labels for task and actor. + map labels = 41; } message TaskInfoEntry { @@ -986,6 +988,10 @@ message NamedActorInfo { message LineageReconstructionTask { string name = 1; - map resources = 2; - TaskStatus status = 3; + TaskStatus status = 2; + // If the task is a normal task, + // this has the labels of the normal task. + // If the task is an actor task, + // this has the labels of the corresponding actor. + map labels = 3; } diff --git a/src/ray/protobuf/core_worker.proto b/src/ray/protobuf/core_worker.proto index caa869ce18e9..9b6dad5191dd 100644 --- a/src/ray/protobuf/core_worker.proto +++ b/src/ray/protobuf/core_worker.proto @@ -70,6 +70,9 @@ message ActorHandle { // Whether task events will be reported from this actor. bool enable_task_events = 14; + + // The key-value labels for actor. + map labels = 15; } message PushTaskRequest { From 4165b1ab80a8b269aa254ddda42f8450424f3980 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Tue, 19 Nov 2024 18:11:52 +0100 Subject: [PATCH 016/107] [RLlib] APPO enhancements (new API stack) vol 02: Cleanup loss function, add GAE-lambda to vtrace, make rho-clip configurable. (#48800) --- rllib/algorithms/appo/appo.py | 10 +- rllib/algorithms/appo/appo_rl_module.py | 4 +- .../appo/torch/appo_torch_learner.py | 126 ++++++++++++------ rllib/algorithms/impala/vtrace_torch.py | 1 + 4 files changed, 94 insertions(+), 47 deletions(-) diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index 37b8fd863c66..1640cf4b5338 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -32,8 +32,7 @@ LEARNER_RESULTS_KL_KEY = "mean_kl_loss" LEARNER_RESULTS_CURR_KL_COEFF_KEY = "curr_kl_coeff" -OLD_ACTION_DIST_KEY = "old_action_dist" -OLD_ACTION_DIST_LOGITS_KEY = "old_action_dist_logits" +TARGET_ACTION_DIST_LOGITS_KEY = "target_action_dist_logits" class APPOConfig(IMPALAConfig): @@ -108,6 +107,7 @@ def __init__(self, algo_class=None): self.use_kl_loss = False self.kl_coeff = 1.0 self.kl_target = 0.01 + self.target_worker_clipping = 2.0 # TODO (sven): Activate once v-trace sequences in non-RNN batch are solved. # If we switch this on right now, the shuffling would destroy the rollout # sequences (non-zero-padded!) needed in the batch for v-trace. @@ -163,6 +163,7 @@ def training( kl_target: Optional[float] = NotProvided, tau: Optional[float] = NotProvided, target_network_update_freq: Optional[int] = NotProvided, + target_worker_clipping: Optional[float] = NotProvided, # Deprecated keys. target_update_frequency=DEPRECATED_VALUE, **kwargs, @@ -193,6 +194,9 @@ def training( on before updating the target networks and tune the kl loss coefficients. NOTE: This parameter is only applicable when using the Learner API (enable_rl_module_and_learner=True). + target_worker_clipping: The maximum value for the target-worker-clipping + used for computing the IS ratio, described in [1] + IS = min(π(i) / π(target), ρ) * (π / π(i)) Returns: This updated AlgorithmConfig object. @@ -227,6 +231,8 @@ def training( self.tau = tau if target_network_update_freq is not NotProvided: self.target_network_update_freq = target_network_update_freq + if target_worker_clipping is not NotProvided: + self.target_worker_clipping = target_worker_clipping return self diff --git a/rllib/algorithms/appo/appo_rl_module.py b/rllib/algorithms/appo/appo_rl_module.py index a3a34bb37735..178f3d0951fb 100644 --- a/rllib/algorithms/appo/appo_rl_module.py +++ b/rllib/algorithms/appo/appo_rl_module.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Tuple from ray.rllib.algorithms.ppo.ppo_rl_module import PPORLModule -from ray.rllib.algorithms.appo.appo import OLD_ACTION_DIST_LOGITS_KEY +from ray.rllib.algorithms.appo.appo import TARGET_ACTION_DIST_LOGITS_KEY from ray.rllib.core.learner.utils import make_target_network from ray.rllib.core.models.base import ACTOR from ray.rllib.core.models.tf.encoder import ENCODER_OUT @@ -32,7 +32,7 @@ def get_target_network_pairs(self) -> List[Tuple[NetworkType, NetworkType]]: def forward_target(self, batch: Dict[str, Any]) -> Dict[str, Any]: old_pi_inputs_encoded = self._old_encoder(batch)[ENCODER_OUT][ACTOR] old_action_dist_logits = self._old_pi(old_pi_inputs_encoded) - return {OLD_ACTION_DIST_LOGITS_KEY: old_action_dist_logits} + return {TARGET_ACTION_DIST_LOGITS_KEY: old_action_dist_logits} @OverrideToImplementCustomLogic_CallToSuperRecommended @override(PPORLModule) diff --git a/rllib/algorithms/appo/torch/appo_torch_learner.py b/rllib/algorithms/appo/torch/appo_torch_learner.py index d53815989e09..67d585424343 100644 --- a/rllib/algorithms/appo/torch/appo_torch_learner.py +++ b/rllib/algorithms/appo/torch/appo_torch_learner.py @@ -1,10 +1,21 @@ +"""Asynchronous Proximal Policy Optimization (APPO) + +The algorithm is described in [1] (under the name of "IMPACT"): + +Detailed documentation: +https://docs.ray.io/en/master/rllib-algorithms.html#appo + +[1] IMPACT: Importance Weighted Asynchronous Architectures with Clipped Target Networks. +Luo et al. 2020 +https://arxiv.org/pdf/1912.00167 +""" from typing import Dict from ray.rllib.algorithms.appo.appo import ( APPOConfig, LEARNER_RESULTS_CURR_KL_COEFF_KEY, LEARNER_RESULTS_KL_KEY, - OLD_ACTION_DIST_LOGITS_KEY, + TARGET_ACTION_DIST_LOGITS_KEY, ) from ray.rllib.algorithms.appo.appo_learner import APPOLearner from ray.rllib.algorithms.impala.torch.impala_torch_learner import IMPALATorchLearner @@ -60,45 +71,49 @@ def compute_loss_for_module( ) action_dist_cls_train = module.get_train_action_dist_cls() - target_policy_dist = action_dist_cls_train.from_logits( - fwd_out[Columns.ACTION_DIST_INPUTS] - ) - old_target_policy_dist = action_dist_cls_train.from_logits( - module.forward_target(batch)[OLD_ACTION_DIST_LOGITS_KEY] - ) - old_target_policy_actions_logp = old_target_policy_dist.logp( - batch[Columns.ACTIONS] + # Policy being trained (current). + current_action_dist = action_dist_cls_train.from_logits( + fwd_out[Columns.ACTION_DIST_INPUTS] ) - behaviour_actions_logp = batch[Columns.ACTION_LOGP] - target_actions_logp = target_policy_dist.logp(batch[Columns.ACTIONS]) - - behaviour_actions_logp_time_major = make_time_major( - behaviour_actions_logp, + current_actions_logp = current_action_dist.logp(batch[Columns.ACTIONS]) + current_actions_logp_time_major = make_time_major( + current_actions_logp, trajectory_len=rollout_frag_or_episode_len, recurrent_seq_len=recurrent_seq_len, ) + + # Target policy. + target_action_dist = action_dist_cls_train.from_logits( + module.forward_target(batch)[TARGET_ACTION_DIST_LOGITS_KEY] + ) + target_actions_logp = target_action_dist.logp(batch[Columns.ACTIONS]) target_actions_logp_time_major = make_time_major( target_actions_logp, trajectory_len=rollout_frag_or_episode_len, recurrent_seq_len=recurrent_seq_len, ) - old_actions_logp_time_major = make_time_major( - old_target_policy_actions_logp, + + # EnvRunner's policy (behavior). + behavior_actions_logp = batch[Columns.ACTION_LOGP] + behavior_actions_logp_time_major = make_time_major( + behavior_actions_logp, trajectory_len=rollout_frag_or_episode_len, recurrent_seq_len=recurrent_seq_len, ) + rewards_time_major = make_time_major( batch[Columns.REWARDS], trajectory_len=rollout_frag_or_episode_len, recurrent_seq_len=recurrent_seq_len, ) + + assert Columns.VALUES_BOOTSTRAPPED not in batch values_time_major = make_time_major( values, trajectory_len=rollout_frag_or_episode_len, recurrent_seq_len=recurrent_seq_len, ) - assert Columns.VALUES_BOOTSTRAPPED not in batch # Use as bootstrap values the vf-preds in the next "batch row", except # for the very last row (which doesn't have a next row), for which the # bootstrap value does not matter b/c it has a +1ts value at its end @@ -112,61 +127,86 @@ def compute_loss_for_module( dim=0, ) - # The discount factor that is used should be gamma except for timesteps where - # the episode is terminated. In that case, the discount factor should be 0. + # The discount factor that is used should be `gamma * lambda_`, except for + # termination timesteps, in which case the discount factor should be 0. discounts_time_major = ( - 1.0 - - make_time_major( - batch[Columns.TERMINATEDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=recurrent_seq_len, - ).float() - ) * config.gamma + ( + 1.0 + - make_time_major( + batch[Columns.TERMINATEDS], + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=recurrent_seq_len, + ).float() + # See [1] 3.1: Discounts must contain the GAE lambda_ parameter as well. + ) + * config.gamma + * config.lambda_ + ) # Note that vtrace will compute the main loop on the CPU for better performance. vtrace_adjusted_target_values, pg_advantages = vtrace_torch( - target_action_log_probs=old_actions_logp_time_major, - behaviour_action_log_probs=behaviour_actions_logp_time_major, + # See [1] 3.1: For AˆV-GAE, the ratios used are: min(c¯, π(target)/π(i)) + # π(target) + target_action_log_probs=target_actions_logp_time_major, + # π(i) + behaviour_action_log_probs=behavior_actions_logp_time_major, + # See [1] 3.1: Discounts must contain the GAE lambda_ parameter as well. discounts=discounts_time_major, rewards=rewards_time_major, values=values_time_major, bootstrap_values=bootstrap_values, - clip_pg_rho_threshold=config.vtrace_clip_pg_rho_threshold, + # c¯ clip_rho_threshold=config.vtrace_clip_rho_threshold, + # c¯ (but we allow users to distinguish between c¯ used for + # value estimates and c¯ used for the advantages. + clip_pg_rho_threshold=config.vtrace_clip_pg_rho_threshold, ) pg_advantages = pg_advantages * loss_mask_time_major - # The policy gradients loss. - is_ratio = torch.clip( - torch.exp(behaviour_actions_logp_time_major - old_actions_logp_time_major), + # The policy gradient loss. + # As described in [1], use a logp-ratio of: + # min(π(i) / π(target), ρ) * (π / π(i)), where .. + # - π are the action probs from the current (learner) policy + # - π(i) are the action probs from the ith EnvRunner + # - π(target) are the action probs from the target network + # - ρ is the "target-worker clipping" (2.0 in the paper) + target_worker_is_ratio = torch.clip( + torch.exp( + behavior_actions_logp_time_major - target_actions_logp_time_major + ), 0.0, - 2.0, + config.target_worker_clipping, ) - logp_ratio = is_ratio * torch.exp( - target_actions_logp_time_major - behaviour_actions_logp_time_major + target_worker_logp_ratio = target_worker_is_ratio * torch.exp( + current_actions_logp_time_major - behavior_actions_logp_time_major ) - surrogate_loss = torch.minimum( - pg_advantages * logp_ratio, + pg_advantages * target_worker_logp_ratio, pg_advantages - * torch.clip(logp_ratio, 1 - config.clip_param, 1 + config.clip_param), + * torch.clip( + target_worker_logp_ratio, + 1 - config.clip_param, + 1 + config.clip_param, + ), ) + mean_pi_loss = -(torch.sum(surrogate_loss) / size_loss_mask) + # Compute KL-loss (if required): KL divergence between current action dist. + # and target action dict. if config.use_kl_loss: - action_kl = old_target_policy_dist.kl(target_policy_dist) * loss_mask + action_kl = target_action_dist.kl(current_action_dist) * loss_mask mean_kl_loss = torch.sum(action_kl) / size_loss_mask else: mean_kl_loss = 0.0 - mean_pi_loss = -(torch.sum(surrogate_loss) / size_loss_mask) - # The baseline loss. + # Compute value function loss. delta = values_time_major - vtrace_adjusted_target_values vf_loss = 0.5 * torch.sum(torch.pow(delta, 2.0) * loss_mask_time_major) mean_vf_loss = vf_loss / size_loss_mask - # The entropy loss. + # Compute entropy loss. mean_entropy_loss = ( - -torch.sum(target_policy_dist.entropy() * loss_mask) / size_loss_mask + -torch.sum(current_action_dist.entropy() * loss_mask) / size_loss_mask ) # The summed weighted loss. diff --git a/rllib/algorithms/impala/vtrace_torch.py b/rllib/algorithms/impala/vtrace_torch.py index 35d88822ca89..b63a5181c7ac 100644 --- a/rllib/algorithms/impala/vtrace_torch.py +++ b/rllib/algorithms/impala/vtrace_torch.py @@ -228,6 +228,7 @@ def multi_from_logits( behaviour_action_log_probs, device="cpu" ) behaviour_action_log_probs = force_list(behaviour_action_log_probs) + # log_rhos = target_logp - behavior_logp log_rhos = get_log_rhos(target_action_log_probs, behaviour_action_log_probs) vtrace_returns = from_importance_weights( From eb3a4b70c951b3d4aaba8555d25f1eedb244d893 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 19 Nov 2024 09:14:52 -0800 Subject: [PATCH 017/107] [deps] upgrade cryptography and pyopenssl (#48766) package versions were too old Signed-off-by: Lonnie Liu --- python/requirements/test-requirements.txt | 6 ++---- python/requirements_compiled.txt | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/python/requirements/test-requirements.txt b/python/requirements/test-requirements.txt index b73f554ec524..175affd3e7f7 100644 --- a/python/requirements/test-requirements.txt +++ b/python/requirements/test-requirements.txt @@ -14,8 +14,7 @@ beautifulsoup4==4.11.1 boto3==1.26.76 # Todo: investigate if we can get rid of this and exchange for ray.cloudpickle cloudpickle==2.2.0 -# Keep in sync with `ci/build/upload_build_info.sh` -cryptography==38.0.1 +cryptography==42.0.5 cython==0.29.37 fastapi==0.109.2 feather-format==0.4.1 @@ -45,8 +44,7 @@ Pillow==10.3.0; platform_system != "Windows" proxy.py==2.4.3 pydantic==2.5.0 pydot==1.4.2 -# Keep in sync with `ci/build/upload_build_info.sh` -PyOpenSSL==23.0.0 +pyopenssl==24.2.1 pygame==2.5.2 Pygments==2.18.0 pymongo==4.3.2 diff --git a/python/requirements_compiled.txt b/python/requirements_compiled.txt index 97216e3e74e2..9dd3caa3ecab 100644 --- a/python/requirements_compiled.txt +++ b/python/requirements_compiled.txt @@ -334,7 +334,7 @@ crc32c==2.3 # via -r /ray/ci/../python/requirements/ml/data-requirements.txt crcmod==1.7 # via gsutil -cryptography==38.0.1 +cryptography==42.0.5 # via # -r /ray/ci/../python/requirements/test-requirements.txt # adal @@ -1605,7 +1605,7 @@ pyopengl==3.1.7 # via # dm-control # mujoco -pyopenssl==23.0.0 +pyopenssl==24.2.1 # via # -r /ray/ci/../python/requirements.txt # -r /ray/ci/../python/requirements/anyscale-requirements.txt From 69862281fefb6b044b20cfc46ada629422b785f7 Mon Sep 17 00:00:00 2001 From: Kai-Hsun Chen Date: Tue, 19 Nov 2024 10:28:20 -0800 Subject: [PATCH 018/107] [CLI] Remove monkey-patching of `logging.Logger.makeRecord` (#48777) Signed-off-by: kaihsun --- python/ray/autoscaler/_private/cli_logger.py | 33 -------------------- python/ray/tests/test_cli_logger.py | 10 ++++++ 2 files changed, 10 insertions(+), 33 deletions(-) diff --git a/python/ray/autoscaler/_private/cli_logger.py b/python/ray/autoscaler/_private/cli_logger.py index 01083be23eff..5172891d3119 100644 --- a/python/ray/autoscaler/_private/cli_logger.py +++ b/python/ray/autoscaler/_private/cli_logger.py @@ -113,39 +113,6 @@ def __getattr__(self, name): colorama.init(strip=False) -def _patched_makeRecord( - self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None -): - """Monkey-patched version of logging.Logger.makeRecord - We have to patch default loggers so they use the proper frame for - line numbers and function names (otherwise everything shows up as - e.g. cli_logger:info() instead of as where it was called from). - - In Python 3.8 we could just use stacklevel=2, but we have to support - Python 3.6 and 3.7 as well. - - The solution is this Python magic superhack. - - The default makeRecord will deliberately check that we don't override - any existing property on the LogRecord using `extra`, - so we remove that check. - - This patched version is otherwise identical to the one in the standard - library. - - TODO: Remove this magic superhack. Find a more responsible workaround. - """ - rv = logging._logRecordFactory( - name, level, fn, lno, msg, args, exc_info, func, sinfo - ) - if extra is not None: - rv.__dict__.update(extra) - return rv - - -logging.Logger.makeRecord = _patched_makeRecord - - def _external_caller_info(): """Get the info from the caller frame. diff --git a/python/ray/tests/test_cli_logger.py b/python/ray/tests/test_cli_logger.py index b164f5dc3725..bced27abee01 100644 --- a/python/ray/tests/test_cli_logger.py +++ b/python/ray/tests/test_cli_logger.py @@ -1,4 +1,6 @@ from ray.autoscaler._private import cli_logger +import io +from unittest.mock import patch import pytest @@ -14,6 +16,14 @@ def test_colorful_mock_random_function(): assert cm.bold("abc") == "abc" +def test_pathname(): + # Ensure that the `pathname` of the `LogRecord` points to the + # caller of `cli_logger`, not `cli_logger` itself. + with patch("sys.stdout", new=io.StringIO()) as mock_stdout: + cli_logger.cli_logger.info("123") + assert "test_cli_logger.py" in mock_stdout.getvalue() + + if __name__ == "__main__": import os import sys From d4476ace81f594286065a45bc40bbd8640c5d3eb Mon Sep 17 00:00:00 2001 From: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Date: Tue, 19 Nov 2024 10:49:20 -0800 Subject: [PATCH 019/107] [core][compiled graphs] Fix flaky test CI command (#48761) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Why are these changes needed? Seeing the following errors for ":ray: core: flaky gpu tests" target: ``` [2024-11-15T17:50:08Z] ________ test_torch_tensor_nccl_overlap_timed[ray_start_regular1-True] _________ --   | [2024-11-15T17:50:08Z]   | [2024-11-15T17:50:08Z] ray_start_regular = RayContext(dashboard_url='127.0.0.1:8265', python_version='3.9.20', ray_version='3.0.0.dev0', ray_commit='{{RAY_COMMIT_SHA}}')   | [2024-11-15T17:50:08Z] overlap_gpu_communication = True   | [2024-11-15T17:50:08Z]   | [2024-11-15T17:50:08Z] @pytest.mark.parametrize(   | [2024-11-15T17:50:08Z] "ray_start_regular, overlap_gpu_communication",   | [2024-11-15T17:50:08Z] [({"num_cpus": 4}, False), ({"num_cpus": 4}, True)],   | [2024-11-15T17:50:08Z] indirect=["ray_start_regular"],   | [2024-11-15T17:50:08Z] )   | [2024-11-15T17:50:08Z] def test_torch_tensor_nccl_overlap_timed(ray_start_regular, overlap_gpu_communication):   | [2024-11-15T17:50:08Z] if not USE_GPU:   | [2024-11-15T17:50:08Z] pytest.skip("NCCL tests require GPUs")   | [2024-11-15T17:50:08Z]   | [2024-11-15T17:50:08Z] > assert (   | [2024-11-15T17:50:08Z] sum(node["Resources"].get("GPU", 0) for node in ray.nodes()) >= 4   | [2024-11-15T17:50:08Z] ), "This test requires at least 4 GPUs"   | [2024-11-15T17:50:08Z] E AssertionError: This test requires at least 4 GPUs   | [2024-11-15T17:50:08Z] E assert 2.0 >= 4   | [2024-11-15T17:50:08Z] E + where 2.0 = sum(. at 0x7f6c8799e200>) ``` This PR makes the config consistent with ":ray: core: multi gpu tests". ## Related issue number ## Checks - [ ] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :( Signed-off-by: Rui Qiao --- .buildkite/core.rayci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/core.rayci.yml b/.buildkite/core.rayci.yml index 09005593c319..9c474bb66276 100644 --- a/.buildkite/core.rayci.yml +++ b/.buildkite/core.rayci.yml @@ -321,7 +321,7 @@ steps: commands: - bazel run //ci/ray_ci:test_in_docker -- //... core --run-flaky-tests --build-type clang - --parallelism-per-worker 2 --gpus 2 + --gpus 4 --build-name coregpubuild --only-tags multi_gpu depends_on: coregpubuild From b73cf25fef04405628e7b7309b40a9cd3022f1b2 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:09:36 -0800 Subject: [PATCH 020/107] [dashboard] add actor pid to dashboard (#48791) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add actor PID to ray dashboard, as requested by @rynewang Test: - CI - https://console.anyscale-staging.com/v2/cld_kvedZWag2qA8i5BjxUevf5i7/prj_qC3ZfndQWYYjx2cz8KWGNUL4/jobs/prodjob_t4ypxualxjk73pryekjrh433i6?job-logs-section-tabs=application_logs&job-tab=ray-dashboard Screenshot 2024-11-19 at 10 08 25 AM --------- Signed-off-by: can Signed-off-by: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Co-authored-by: Alan Guo --- .../ray/dashboard/client/src/pages/actor/ActorDetail.tsx | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/python/ray/dashboard/client/src/pages/actor/ActorDetail.tsx b/python/ray/dashboard/client/src/pages/actor/ActorDetail.tsx index 60e8fc8ec8ac..22cb1e030351 100644 --- a/python/ray/dashboard/client/src/pages/actor/ActorDetail.tsx +++ b/python/ray/dashboard/client/src/pages/actor/ActorDetail.tsx @@ -133,6 +133,15 @@ const ActorDetailPage = () => { } : { value: "-" }, }, + { + label: "PID", + content: actorDetail.pid + ? { + value: `${actorDetail.pid}`, + copyableValue: `${actorDetail.pid}`, + } + : { value: "-" }, + }, { label: "Started at", content: { From c4c5a12c2a7842a639dffd9b9533e570c76373d2 Mon Sep 17 00:00:00 2001 From: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Date: Tue, 19 Nov 2024 13:01:41 -0800 Subject: [PATCH 021/107] [core][compiled graphs] Fix test_accelerated_dag.py (#48795) The test is flaky: test_dag_exception_multi_output segfaults. I noticed the teardown() of test_dag_exception_chained does not finish unlike other tests: --- python/ray/dag/tests/experimental/test_accelerated_dag.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/python/ray/dag/tests/experimental/test_accelerated_dag.py b/python/ray/dag/tests/experimental/test_accelerated_dag.py index d6176fd57dc6..cbec80a871c4 100644 --- a/python/ray/dag/tests/experimental/test_accelerated_dag.py +++ b/python/ray/dag/tests/experimental/test_accelerated_dag.py @@ -1078,6 +1078,12 @@ def test_dag_exception_chained(ray_start_regular, capsys): # Can use the DAG after exceptions are thrown. assert ray.get(compiled_dag.execute(1)) == 2 + # Note: somehow the auto triggered teardown() from ray.shutdown() + # does not finish in time for this test, leading to a segfault + # of the following test (likely due to a dangling monitor thread + # upon the new Ray init). + compiled_dag.teardown() + @pytest.mark.parametrize("single_fetch", [True, False]) def test_dag_exception_multi_output(ray_start_regular, single_fetch, capsys): From dbd9c95bb57197f3f7ab6416b4c86ceb55a99d83 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 19 Nov 2024 14:43:11 -0800 Subject: [PATCH 022/107] [ci] fix misc group dependency (#48785) so that dependency recompiling can work without the dependency on `oss-ci-base_build`, which requires a valid, working dependency constraint file. Signed-off-by: Lonnie Liu --- .buildkite/others.rayci.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.buildkite/others.rayci.yml b/.buildkite/others.rayci.yml index 16508b6f7864..790266fbf4d2 100644 --- a/.buildkite/others.rayci.yml +++ b/.buildkite/others.rayci.yml @@ -1,12 +1,8 @@ group: others depends_on: - forge - - oss-ci-base_build steps: - #build - - name: doctestbuild - wanda: ci/docker/doctest.build.wanda.yaml - + # dependencies - label: ":tapioca: build: pip-compile dependencies" key: pip_compile_dependencies instance_type: small @@ -19,10 +15,13 @@ steps: - cp -f ./python/requirements_compiled.txt /artifact-mount/ soft_fail: true job_env: oss-ci-base_test-py3.11 - depends_on: - - oss-ci-base_test-multipy + depends_on: oss-ci-base_test-multipy + + # docs + - name: doctestbuild + wanda: ci/docker/doctest.build.wanda.yaml + depends_on: oss-ci-base_build - # test - label: doc tests instance_type: large commands: @@ -40,6 +39,7 @@ steps: --skip-ray-installation depends_on: doctestbuild + # java - label: ":java: java tests" tags: java instance_type: medium @@ -48,7 +48,7 @@ steps: - docker run -i --rm --volume /tmp/artifacts:/artifact-mount --shm-size=2.5gb "$${RAYCI_WORK_REPO}":"$${RAYCI_BUILD_ID}"-corebuild /bin/bash -iecuo pipefail "./java/test.sh" - depends_on: [ "corebuild", "forge" ] + depends_on: corebuild # bot - label: ":robot_face: CI weekly green metric" From c6d26fd987b4f89e93341878b66d15cb94e7806a Mon Sep 17 00:00:00 2001 From: Josh Karpel Date: Tue, 19 Nov 2024 17:22:33 -0600 Subject: [PATCH 023/107] [Serve] Handle multiple changed objects per `LongPollHost.listen_for_change` RPC (#48803) ## Why are these changes needed? Currently, in the `LongPollHost`/`LongPollClient`, if multiple objects are updated that a `listen_for_change` request is waiting for *before the async task in the host can run again*, only one of those updated objects will be returned. This is inefficient because the `LongPollClient` will immediately do a `listen_for_change` RPC again, and that will see outdated snapshot IDs for the updates that weren't returned and get all of the missed updates. This is because of an asymmetry between https://github.com/ray-project/ray/blob/b75cb793e437aa617d61dcb13e5f5d2fcc83ee68/python/ray/serve/_private/long_poll.py#L252-L272 , which looks for *all* outdated keys, and https://github.com/ray-project/ray/blob/b75cb793e437aa617d61dcb13e5f5d2fcc83ee68/python/ray/serve/_private/long_poll.py#L309 , which only looks at a single complete `Event`, even if multiple events completed during the [`wait`](https://github.com/ray-project/ray/blob/b75cb793e437aa617d61dcb13e5f5d2fcc83ee68/python/ray/serve/_private/long_poll.py#L289-L293). To prove that the `wait` can indeed see multiple completed `Event`s, see this example: ```python from asyncio import wait, Event, run, create_task, FIRST_COMPLETED async def main(): a = Event() b = Event() wait_for_a = create_task(a.wait()) wait_for_b = create_task(b.wait()) a.set() b.set() done, pending = await wait([wait_for_a, wait_for_b], return_when=FIRST_COMPLETED) print(f"{len(done)=}") print(f"{len(pending)=}") run(main()) # len(done)=2 # len(pending)=0 ``` Generally this won't be a big issue because most `listen_for_change` requests in the current Serve setup are asking for a very small number of keys and are likely to only get one key update anyway. But, as I've been discussing with @edoakes and @zcin on Slack, I'd like to group up the `DeploymentHandle` `listen_for_change` RPCs under a single `LongPollClient`, which will be requesting many keys and is therefore more likely to hit this situation. To complement this change, I also changed `LongPollHost.notify_changed` so that it takes multiple updates at the same time. ## Related issue number ## Checks - [x] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [x] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [x] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [x] Unit tests - [ ] Release tests - [ ] This PR is not tested :( --------- Signed-off-by: Josh Karpel --- python/ray/serve/_private/controller.py | 3 +- python/ray/serve/_private/deployment_state.py | 24 ++++----- python/ray/serve/_private/endpoint_state.py | 2 +- python/ray/serve/_private/long_poll.py | 54 ++++++++++--------- python/ray/serve/tests/test_long_poll.py | 34 ++++++------ python/ray/serve/tests/test_metrics.py | 6 +-- 6 files changed, 62 insertions(+), 61 deletions(-) diff --git a/python/ray/serve/_private/controller.py b/python/ray/serve/_private/controller.py index 8eff4c80315a..4aa6906b241f 100644 --- a/python/ray/serve/_private/controller.py +++ b/python/ray/serve/_private/controller.py @@ -226,8 +226,7 @@ def reconfigure_global_logging_config(self, global_logging_config: LoggingConfig self.global_logging_config = global_logging_config self.long_poll_host.notify_changed( - LongPollNamespace.GLOBAL_LOGGING_CONFIG, - global_logging_config, + {LongPollNamespace.GLOBAL_LOGGING_CONFIG: global_logging_config} ) configure_component_logger( component_name="controller", diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index ca0fb2d446c6..562fd62f62c6 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -1448,16 +1448,17 @@ def broadcast_running_replicas_if_changed(self) -> None: return self._long_poll_host.notify_changed( - (LongPollNamespace.RUNNING_REPLICAS, self._id), - running_replica_infos, - ) - # NOTE(zcin): notify changed for Java routers. Since Java only - # supports 1.x API, there is no concept of applications in Java, - # so the key should remain a string describing the deployment - # name. If there are no Java routers, this is a no-op. - self._long_poll_host.notify_changed( - (LongPollNamespace.RUNNING_REPLICAS, self._id.name), - running_replica_infos, + { + (LongPollNamespace.RUNNING_REPLICAS, self._id): running_replica_infos, + # NOTE(zcin): notify changed for Java routers. Since Java only + # supports 1.x API, there is no concept of applications in Java, + # so the key should remain a string describing the deployment + # name. If there are no Java routers, this is a no-op. + ( + LongPollNamespace.RUNNING_REPLICAS, + self._id.name, + ): running_replica_infos, + } ) self._last_broadcasted_running_replica_infos = running_replica_infos self._multiplexed_model_ids_updated = False @@ -1473,8 +1474,7 @@ def broadcast_deployment_config_if_changed(self) -> None: return self._long_poll_host.notify_changed( - (LongPollNamespace.DEPLOYMENT_CONFIG, self._id), - current_deployment_config, + {(LongPollNamespace.DEPLOYMENT_CONFIG, self._id): current_deployment_config} ) self._last_broadcasted_deployment_config = current_deployment_config diff --git a/python/ray/serve/_private/endpoint_state.py b/python/ray/serve/_private/endpoint_state.py index abc4c0615ad6..fd2074fd6669 100644 --- a/python/ray/serve/_private/endpoint_state.py +++ b/python/ray/serve/_private/endpoint_state.py @@ -46,7 +46,7 @@ def _checkpoint(self): def _notify_route_table_changed(self): self._long_poll_host.notify_changed( - LongPollNamespace.ROUTE_TABLE, self._endpoints + {LongPollNamespace.ROUTE_TABLE: self._endpoints} ) def _get_endpoint_for_route(self, route: str) -> Optional[DeploymentID]: diff --git a/python/ray/serve/_private/long_poll.py b/python/ray/serve/_private/long_poll.py index f3538913b76b..d6fb52e72310 100644 --- a/python/ray/serve/_private/long_poll.py +++ b/python/ray/serve/_private/long_poll.py @@ -4,6 +4,7 @@ import random from asyncio.events import AbstractEventLoop from collections import defaultdict +from collections.abc import Mapping from dataclasses import dataclass from enum import Enum, auto from typing import Any, Callable, DefaultDict, Dict, Optional, Set, Tuple, Union @@ -179,12 +180,12 @@ class LongPollHost: The desired use case is to embed this in an Ray actor. Client will be expected to call actor.listen_for_change.remote(...). On the host side, - you can call host.notify_changed(key, object) to update the state and + you can call host.notify_changed({key: object}) to update the state and potentially notify whoever is polling for these values. Internally, we use snapshot_ids for each object to identify client with outdated object and immediately return the result. If the client has the - up-to-date verison, then the listen_for_change call will only return when + up-to-date version, then the listen_for_change call will only return when the object is updated. """ @@ -306,15 +307,15 @@ async def listen_for_change( self._count_send(LongPollState.TIME_OUT) return LongPollState.TIME_OUT else: - updated_object_key: str = async_task_to_watched_keys[done.pop()] - updated_object = { - updated_object_key: UpdatedObject( + updated_objects = {} + for task in done: + updated_object_key = async_task_to_watched_keys[task] + updated_objects[updated_object_key] = UpdatedObject( self.object_snapshots[updated_object_key], self.snapshot_ids[updated_object_key], ) - } - self._count_send(updated_object) - return updated_object + self._count_send(updated_objects) + return updated_objects async def listen_for_change_java( self, @@ -403,21 +404,22 @@ def _listen_result_to_proto_bytes( proto = LongPollResult(**data) return proto.SerializeToString() - def notify_changed( - self, - object_key: KeyType, - updated_object: Any, - ): - try: - self.snapshot_ids[object_key] += 1 - except KeyError: - # Initial snapshot id must be >= 0, so that the long poll client - # can send a negative initial snapshot id to get a fast update. - # They should also be randomized; - # see https://github.com/ray-project/ray/pull/45881#discussion_r1645243485 - self.snapshot_ids[object_key] = random.randint(0, 1_000_000) - self.object_snapshots[object_key] = updated_object - logger.debug(f"LongPollHost: Notify change for key {object_key}.") - - for event in self.notifier_events.pop(object_key, set()): - event.set() + def notify_changed(self, updates: Mapping[KeyType, Any]) -> None: + """ + Update the current snapshot of some objects + and notify any long poll clients. + """ + for object_key, updated_object in updates.items(): + try: + self.snapshot_ids[object_key] += 1 + except KeyError: + # Initial snapshot id must be >= 0, so that the long poll client + # can send a negative initial snapshot id to get a fast update. + # They should also be randomized; see + # https://github.com/ray-project/ray/pull/45881#discussion_r1645243485 + self.snapshot_ids[object_key] = random.randint(0, 1_000_000) + self.object_snapshots[object_key] = updated_object + logger.debug(f"LongPollHost: Notify change for key {object_key}.") + + for event in self.notifier_events.pop(object_key, set()): + event.set() diff --git a/python/ray/serve/tests/test_long_poll.py b/python/ray/serve/tests/test_long_poll.py index 86bf03880e33..2ba31d414e05 100644 --- a/python/ray/serve/tests/test_long_poll.py +++ b/python/ray/serve/tests/test_long_poll.py @@ -38,7 +38,7 @@ def test_notifier_events_cleared_without_update(serve_instance): host = ray.remote(LongPollHost).remote( listen_for_change_request_timeout_s=(0.1, 0.1) ) - ray.get(host.notify_changed.remote("key_1", 999)) + ray.get(host.notify_changed.remote({"key_1": 999})) # Get an initial object snapshot for the key. object_ref = host.listen_for_change.remote({"key_1": -1}) @@ -60,8 +60,8 @@ def test_host_standalone(serve_instance): host = ray.remote(LongPollHost).remote() # Write two values - ray.get(host.notify_changed.remote("key_1", 999)) - ray.get(host.notify_changed.remote("key_2", 999)) + ray.get(host.notify_changed.remote({"key_1": 999})) + ray.get(host.notify_changed.remote({"key_2": 999})) object_ref = host.listen_for_change.remote({"key_1": -1, "key_2": -1}) # We should be able to get the result immediately @@ -77,7 +77,7 @@ def test_host_standalone(serve_instance): assert len(not_done) == 1 # Now update the value, we should immediately get updated value - ray.get(host.notify_changed.remote("key_2", 999)) + ray.get(host.notify_changed.remote({"key_2": 999})) result = ray.get(object_ref) assert len(result) == 1 assert "key_2" in result @@ -88,13 +88,13 @@ def test_long_poll_wait_for_keys(serve_instance): # are set. host = ray.remote(LongPollHost).remote() object_ref = host.listen_for_change.remote({"key_1": -1, "key_2": -1}) - ray.get(host.notify_changed.remote("key_1", 999)) - ray.get(host.notify_changed.remote("key_2", 999)) - # We should be able to get the one of the result immediately + ray.get(host.notify_changed.remote({"key_1": 123, "key_2": 456})) + + # We should be able to get the both results immediately result: Dict[str, UpdatedObject] = ray.get(object_ref) - assert set(result.keys()).issubset({"key_1", "key_2"}) - assert {v.object_snapshot for v in result.values()} == {999} + assert result.keys() == {"key_1", "key_2"} + assert {v.object_snapshot for v in result.values()} == {123, 456} def test_long_poll_restarts(serve_instance): @@ -106,7 +106,7 @@ class RestartableLongPollHost: def __init__(self) -> None: print("actor started") self.host = LongPollHost() - self.host.notify_changed("timer", time.time()) + self.host.notify_changed({"timer": time.time()}) self.should_exit = False async def listen_for_change(self, key_to_ids): @@ -142,8 +142,8 @@ async def test_client_callbacks(serve_instance): host = ray.remote(LongPollHost).remote() # Write two values - ray.get(host.notify_changed.remote("key_1", 100)) - ray.get(host.notify_changed.remote("key_2", 999)) + ray.get(host.notify_changed.remote({"key_1": 100})) + ray.get(host.notify_changed.remote({"key_2": 999})) callback_results = dict() @@ -167,7 +167,7 @@ def key_2_callback(result): timeout=1, ) - ray.get(host.notify_changed.remote("key_2", 1999)) + ray.get(host.notify_changed.remote({"key_2": 1999})) await async_wait_for_condition( lambda: callback_results == {"key_1": 100, "key_2": 999}, @@ -178,7 +178,7 @@ def key_2_callback(result): @pytest.mark.asyncio async def test_client_threadsafe(serve_instance): host = ray.remote(LongPollHost).remote() - ray.get(host.notify_changed.remote("key_1", 100)) + ray.get(host.notify_changed.remote({"key_1": 100})) e = asyncio.Event() @@ -198,7 +198,7 @@ def key_1_callback(_): def test_listen_for_change_java(serve_instance): host = ray.remote(LongPollHost).remote() - ray.get(host.notify_changed.remote("key_1", 999)) + ray.get(host.notify_changed.remote({"key_1": 999})) request_1 = {"keys_to_snapshot_ids": {"key_1": -1}} object_ref = host.listen_for_change_java.remote( LongPollRequest(**request_1).SerializeToString() @@ -211,7 +211,7 @@ def test_listen_for_change_java(serve_instance): endpoints: Dict[DeploymentID, EndpointInfo] = dict() endpoints["deployment_name"] = EndpointInfo(route="/test/xlang/poll") endpoints["deployment_name1"] = EndpointInfo(route="/test/xlang/poll1") - ray.get(host.notify_changed.remote(LongPollNamespace.ROUTE_TABLE, endpoints)) + ray.get(host.notify_changed.remote({LongPollNamespace.ROUTE_TABLE: endpoints})) object_ref_2 = host.listen_for_change_java.remote( LongPollRequest(**request_2).SerializeToString() ) @@ -240,7 +240,7 @@ def test_listen_for_change_java(serve_instance): ] ray.get( host.notify_changed.remote( - (LongPollNamespace.RUNNING_REPLICAS, "deployment_name"), replicas + {(LongPollNamespace.RUNNING_REPLICAS, "deployment_name"): replicas} ) ) object_ref_3 = host.listen_for_change_java.remote( diff --git a/python/ray/serve/tests/test_metrics.py b/python/ray/serve/tests/test_metrics.py index f93e37661394..6b3e674e7907 100644 --- a/python/ray/serve/tests/test_metrics.py +++ b/python/ray/serve/tests/test_metrics.py @@ -1581,7 +1581,7 @@ def test_long_poll_host_sends_counted(serve_instance): ) # Write a value. - ray.get(host.notify_changed.remote("key_1", 999)) + ray.get(host.notify_changed.remote({"key_1": 999})) object_ref = host.listen_for_change.remote({"key_1": -1}) # Check that the result's size is reported. @@ -1595,8 +1595,8 @@ def test_long_poll_host_sends_counted(serve_instance): ) # Write two new values. - ray.get(host.notify_changed.remote("key_1", 1000)) - ray.get(host.notify_changed.remote("key_2", 1000)) + ray.get(host.notify_changed.remote({"key_1": 1000})) + ray.get(host.notify_changed.remote({"key_2": 1000})) object_ref = host.listen_for_change.remote( {"key_1": result_1["key_1"].snapshot_id, "key_2": -1} ) From d50104cd535d8c2d368a766c4ba63de12279eb3d Mon Sep 17 00:00:00 2001 From: dentiny Date: Tue, 19 Nov 2024 15:56:31 -0800 Subject: [PATCH 024/107] [core] Performance improvement for runtime env serialization (#48749) Signed-off-by: dentiny --- python/ray/_private/utils.py | 3 +- python/ray/remote_function.py | 59 ++++++++++++++++++++++++++-------- python/ray/tests/test_basic.py | 4 +-- src/ray/raylet/raylet.h | 8 ++--- 4 files changed, 52 insertions(+), 22 deletions(-) diff --git a/python/ray/_private/utils.py b/python/ray/_private/utils.py index 0eb5bf09c997..bd23131bebdf 100644 --- a/python/ray/_private/utils.py +++ b/python/ray/_private/utils.py @@ -1608,7 +1608,8 @@ def get_runtime_env_info( In the user interface, the argument `runtime_env` contains some fields which not contained in `ProtoRuntimeEnv` but in `ProtoRuntimeEnvInfo`, such as `eager_install`. This function will extract those fields from - `RuntimeEnv` and create a new `ProtoRuntimeEnvInfo`, and serialize it. + `RuntimeEnv` and create a new `ProtoRuntimeEnvInfo`, and serialize it + into json format. """ from ray.runtime_env import RuntimeEnvConfig diff --git a/python/ray/remote_function.py b/python/ray/remote_function.py index 311ba1a5f8c2..b44eae3d84ce 100644 --- a/python/ray/remote_function.py +++ b/python/ray/remote_function.py @@ -4,6 +4,7 @@ import uuid from functools import wraps from threading import Lock +from typing import Optional import ray._private.signature from ray import Language, cross_language @@ -120,6 +121,22 @@ def __init__( if "runtime_env" in self._default_options: self._default_options["runtime_env"] = self._runtime_env + # Pre-calculate runtime env info, to avoid re-calculation at `remote` + # invocation. When `remote` call has specified extra `option` field, + # runtime env will be overwritten and re-serialized. + # + # Caveat: To support dynamic runtime envs in + # `func.option(runtime_env={...}).remote()`, we recalculate the serialized + # runtime env info in the `option` call. But it's acceptable since + # pre-calculation here only happens once at `RemoteFunction` initialization. + self._serialized_base_runtime_env_info = "" + if self._runtime_env: + self._serialized_base_runtime_env_info = get_runtime_env_info( + self._runtime_env, + is_job_runtime_env=False, + serialize=True, + ) + self._language = language self._is_generator = inspect.isgeneratorfunction(function) self._function = function @@ -136,7 +153,12 @@ def __init__( # Override task.remote's signature and docstring @wraps(function) def _remote_proxy(*args, **kwargs): - return self._remote(args=args, kwargs=kwargs, **self._default_options) + return self._remote( + serialized_runtime_env_info=self._serialized_base_runtime_env_info, + args=args, + kwargs=kwargs, + **self._default_options, + ) self.remote = _remote_proxy @@ -240,15 +262,29 @@ def f(): updated_options = ray_option_utils.update_options(default_options, task_options) ray_option_utils.validate_task_options(updated_options, in_options=True) - # only update runtime_env when ".options()" specifies new runtime_env + # Only update runtime_env and re-calculate serialized runtime env info when + # ".options()" specifies new runtime_env. + serialized_runtime_env_info = self._serialized_base_runtime_env_info if "runtime_env" in task_options: updated_options["runtime_env"] = parse_runtime_env( updated_options["runtime_env"] ) + # Re-calculate runtime env info based on updated runtime env. + if updated_options["runtime_env"]: + serialized_runtime_env_info = get_runtime_env_info( + updated_options["runtime_env"], + is_job_runtime_env=False, + serialize=True, + ) class FuncWrapper: def remote(self, *args, **kwargs): - return func_cls._remote(args=args, kwargs=kwargs, **updated_options) + return func_cls._remote( + args=args, + kwargs=kwargs, + serialized_runtime_env_info=serialized_runtime_env_info, + **updated_options, + ) @DeveloperAPI def bind(self, *args, **kwargs): @@ -264,7 +300,13 @@ class or functions. @wrap_auto_init @_tracing_task_invocation - def _remote(self, args=None, kwargs=None, **task_options): + def _remote( + self, + args=None, + kwargs=None, + serialized_runtime_env_info: Optional[str] = None, + **task_options, + ): """Submit the remote function for execution.""" # We pop the "max_calls" coming from "@ray.remote" here. We no longer need # it in "_remote()". @@ -330,7 +372,6 @@ def _remote(self, args=None, kwargs=None, **task_options): # TODO(suquark): cleanup these fields name = task_options["name"] - runtime_env = parse_runtime_env(task_options["runtime_env"]) placement_group = task_options["placement_group"] placement_group_bundle_index = task_options["placement_group_bundle_index"] placement_group_capture_child_tasks = task_options[ @@ -405,14 +446,6 @@ def _remote(self, args=None, kwargs=None, **task_options): else: scheduling_strategy = "DEFAULT" - serialized_runtime_env_info = None - if runtime_env is not None: - serialized_runtime_env_info = get_runtime_env_info( - runtime_env, - is_job_runtime_env=False, - serialize=True, - ) - if _task_launch_hook: _task_launch_hook(self._function_descriptor, resources, scheduling_strategy) diff --git a/python/ray/tests/test_basic.py b/python/ray/tests/test_basic.py index 001f38393390..55b6ec73f21d 100644 --- a/python/ray/tests/test_basic.py +++ b/python/ray/tests/test_basic.py @@ -581,7 +581,7 @@ def foo(): # TODO(suquark): The current implementation of `.options()` is so bad that we # cannot even access its options from outside. Here we hack the closures to # achieve our goal. Need futher efforts to clean up the tech debt. - assert f2.remote.__closure__[1].cell_contents == { + assert f2.remote.__closure__[2].cell_contents == { "_metadata": {"namespace": {"a": 11, "b": 2, "c": 3}}, "num_cpus": 1, "num_gpus": 1, @@ -593,7 +593,7 @@ def __init__(self, **options): f3 = foo.options(num_cpus=1, num_gpus=1, **mock_options2(a=11, c=3)) - assert f3.remote.__closure__[1].cell_contents == { + assert f3.remote.__closure__[2].cell_contents == { "_metadata": {"namespace": {"a": 1, "b": 2}, "namespace2": {"a": 11, "c": 3}}, "num_cpus": 1, "num_gpus": 1, diff --git a/src/ray/raylet/raylet.h b/src/ray/raylet/raylet.h index c220e2bc2fab..619fcd7b69ec 100644 --- a/src/ray/raylet/raylet.h +++ b/src/ray/raylet/raylet.h @@ -23,9 +23,7 @@ #include "ray/common/asio/instrumented_io_context.h" // clang-format on -namespace ray { - -namespace raylet { +namespace ray::raylet { using rpc::GcsNodeInfo; using rpc::NodeSnapshot; @@ -109,6 +107,4 @@ class Raylet { local_stream_socket socket_; }; -} // namespace raylet - -} // namespace ray +} // namespace ray::raylet From 03cba6bfb1f0fc5727c190754340590653506983 Mon Sep 17 00:00:00 2001 From: dentiny Date: Tue, 19 Nov 2024 21:22:00 -0800 Subject: [PATCH 025/107] [core] C++ improvement for `Task` class (#48721) Signed-off-by: dentiny --- src/ray/common/BUILD | 1 + src/ray/common/task/task.cc | 6 ++---- src/ray/common/task/task.h | 6 ++---- src/ray/common/task/task_spec.cc | 8 ++++---- src/ray/common/task/task_spec.h | 6 +++--- src/ray/raylet/local_task_manager.cc | 28 ++++++++++++++-------------- 6 files changed, 26 insertions(+), 29 deletions(-) diff --git a/src/ray/common/BUILD b/src/ray/common/BUILD index 416dd9659983..563c53007ecc 100644 --- a/src/ray/common/BUILD +++ b/src/ray/common/BUILD @@ -181,6 +181,7 @@ ray_cc_library( "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/strings", + "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/synchronization", ], ) diff --git a/src/ray/common/task/task.cc b/src/ray/common/task/task.cc index e2ac8571c4e5..812c0598cc35 100644 --- a/src/ray/common/task/task.cc +++ b/src/ray/common/task/task.cc @@ -14,7 +14,7 @@ #include "ray/common/task/task.h" -#include +#include "absl/strings/str_format.h" namespace ray { @@ -42,9 +42,7 @@ const std::string &RayTask::GetPreferredNodeID() const { return preferred_node_i void RayTask::ComputeDependencies() { dependencies_ = task_spec_.GetDependencies(); } std::string RayTask::DebugString() const { - std::ostringstream stream; - stream << "task_spec={" << task_spec_.DebugString() << "}"; - return stream.str(); + return absl::StrFormat("task_spec={%s}", task_spec_.DebugString()); } } // namespace ray diff --git a/src/ray/common/task/task.h b/src/ray/common/task/task.h index 5a4a9e323de5..52165665da2c 100644 --- a/src/ray/common/task/task.h +++ b/src/ray/common/task/task.h @@ -21,8 +21,6 @@ namespace ray { -typedef std::function CancelTaskCallback; - /// \class RayTask /// /// A RayTask represents a Ray task and a specification of its execution (e.g., @@ -33,7 +31,7 @@ class RayTask { public: /// Construct an empty task. This should only be used to pass a task /// as an out parameter to a function or method. - RayTask() {} + RayTask() = default; /// Construct a `RayTask` object from a protobuf message. /// @@ -41,7 +39,7 @@ class RayTask { explicit RayTask(const rpc::Task &message); /// Construct a `RayTask` object from a `TaskSpecification`. - RayTask(TaskSpecification task_spec); + explicit RayTask(TaskSpecification task_spec); RayTask(TaskSpecification task_spec, std::string preferred_node_id); diff --git a/src/ray/common/task/task_spec.cc b/src/ray/common/task/task_spec.cc index a1b3c04f80b3..6ffc13f5a8e6 100644 --- a/src/ray/common/task/task_spec.cc +++ b/src/ray/common/task/task_spec.cc @@ -181,15 +181,15 @@ ray::FunctionDescriptor TaskSpecification::FunctionDescriptor() const { return ray::FunctionDescriptorBuilder::FromProto(message_->function_descriptor()); } -rpc::RuntimeEnvInfo TaskSpecification::RuntimeEnvInfo() const { +const rpc::RuntimeEnvInfo &TaskSpecification::RuntimeEnvInfo() const { return message_->runtime_env_info(); } -std::string TaskSpecification::SerializedRuntimeEnv() const { +const std::string &TaskSpecification::SerializedRuntimeEnv() const { return message_->runtime_env_info().serialized_runtime_env(); } -rpc::RuntimeEnvConfig TaskSpecification::RuntimeEnvConfig() const { +const rpc::RuntimeEnvConfig &TaskSpecification::RuntimeEnvConfig() const { return message_->runtime_env_info().runtime_env_config(); } @@ -208,7 +208,7 @@ int TaskSpecification::GetRuntimeEnvHash() const { return runtime_env_hash_; } const SchedulingClass TaskSpecification::GetSchedulingClass() const { if (!IsActorTask()) { // Actor task doesn't have scheudling id, so we don't need to check this. - RAY_CHECK(sched_cls_id_ > 0); + RAY_CHECK_GT(sched_cls_id_, 0); } return sched_cls_id_; } diff --git a/src/ray/common/task/task_spec.h b/src/ray/common/task/task_spec.h index 019835062d31..ac4a38c92cee 100644 --- a/src/ray/common/task/task_spec.h +++ b/src/ray/common/task/task_spec.h @@ -306,11 +306,11 @@ class TaskSpecification : public MessageWrapper { ray::FunctionDescriptor FunctionDescriptor() const; - [[nodiscard]] rpc::RuntimeEnvInfo RuntimeEnvInfo() const; + [[nodiscard]] const rpc::RuntimeEnvInfo &RuntimeEnvInfo() const; - std::string SerializedRuntimeEnv() const; + const std::string &SerializedRuntimeEnv() const; - rpc::RuntimeEnvConfig RuntimeEnvConfig() const; + const rpc::RuntimeEnvConfig &RuntimeEnvConfig() const; bool HasRuntimeEnv() const; diff --git a/src/ray/raylet/local_task_manager.cc b/src/ray/raylet/local_task_manager.cc index f53fca5a365a..aae37371e29f 100644 --- a/src/ray/raylet/local_task_manager.cc +++ b/src/ray/raylet/local_task_manager.cc @@ -252,7 +252,7 @@ void LocalTaskManager::DispatchScheduledTasksToWorkers() { for (auto work_it = dispatch_queue.begin(); work_it != dispatch_queue.end();) { auto &work = *work_it; const auto &task = work->task; - const auto spec = task.GetTaskSpecification(); + const auto &spec = task.GetTaskSpecification(); TaskID task_id = spec.TaskId(); if (work->GetState() == internal::WorkStatus::WAITING_FOR_WORKER) { work_it++; @@ -436,7 +436,8 @@ void LocalTaskManager::SpillWaitingTasks() { while (it != waiting_task_queue_.begin()) { it--; const auto &task = (*it)->task; - const auto &task_id = task.GetTaskSpecification().TaskId(); + const auto &spec = task.GetTaskSpecification(); + const auto &task_id = spec.TaskId(); // Check whether this task's dependencies are blocked (not being actively // pulled). If this is true, then we should force the task onto a remote @@ -452,9 +453,9 @@ void LocalTaskManager::SpillWaitingTasks() { // object store memory availability. Ideally, we should pick the node with // the most memory availability. scheduling::NodeID scheduling_node_id; - if (!task.GetTaskSpecification().IsSpreadSchedulingStrategy()) { + if (!spec.IsSpreadSchedulingStrategy()) { scheduling_node_id = cluster_resource_scheduler_->GetBestSchedulableNode( - task.GetTaskSpecification(), + spec, /*preferred_node_id*/ self_node_id_.Binary(), /*exclude_local_node*/ task_dependencies_blocked, /*requires_object_store_memory*/ true, @@ -470,9 +471,8 @@ void LocalTaskManager::SpillWaitingTasks() { scheduling_node_id.Binary() != self_node_id_.Binary()) { NodeID node_id = NodeID::FromBinary(scheduling_node_id.Binary()); Spillback(node_id, *it); - if (!task.GetTaskSpecification().GetDependencies().empty()) { - task_dependency_manager_.RemoveTaskDependencies( - task.GetTaskSpecification().TaskId()); + if (!spec.GetDependencies().empty()) { + task_dependency_manager_.RemoveTaskDependencies(spec.TaskId()); } num_waiting_task_spilled_++; waiting_tasks_index_.erase(task_id); @@ -495,14 +495,15 @@ void LocalTaskManager::SpillWaitingTasks() { bool LocalTaskManager::TrySpillback(const std::shared_ptr &work, bool &is_infeasible) { + const auto &spec = work->task.GetTaskSpecification(); auto scheduling_node_id = cluster_resource_scheduler_->GetBestSchedulableNode( - work->task.GetTaskSpecification(), + spec, // We should prefer to stay local if possible // to avoid unnecessary spillback // since this node is already selected by the cluster scheduler. - /*preferred_node_id*/ self_node_id_.Binary(), - /*exclude_local_node*/ false, - /*requires_object_store_memory*/ false, + /*preferred_node_id=*/self_node_id_.Binary(), + /*exclude_local_node=*/false, + /*requires_object_store_memory=*/false, &is_infeasible); if (is_infeasible || scheduling_node_id.IsNil() || @@ -513,9 +514,8 @@ bool LocalTaskManager::TrySpillback(const std::shared_ptr &work, NodeID node_id = NodeID::FromBinary(scheduling_node_id.Binary()); Spillback(node_id, work); num_unschedulable_task_spilled_++; - if (!work->task.GetTaskSpecification().GetDependencies().empty()) { - task_dependency_manager_.RemoveTaskDependencies( - work->task.GetTaskSpecification().TaskId()); + if (!spec.GetDependencies().empty()) { + task_dependency_manager_.RemoveTaskDependencies(spec.TaskId()); } return true; } From c455472f67e5235b88f9186e32d768db9a4684aa Mon Sep 17 00:00:00 2001 From: Sumanth R Hegde <39546518+SumanthRH@users.noreply.github.com> Date: Tue, 19 Nov 2024 22:22:03 -0800 Subject: [PATCH 026/107] [Core][Telemetry] Fix `total_num_cpus` and `total_num_gpus` with `autoscaler_v2` (#48814) Signed-off-by: SumanthRH --- python/ray/_private/usage/usage_lib.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/ray/_private/usage/usage_lib.py b/python/ray/_private/usage/usage_lib.py index e980703ed3eb..558f56c602ef 100644 --- a/python/ray/_private/usage/usage_lib.py +++ b/python/ray/_private/usage/usage_lib.py @@ -634,8 +634,8 @@ def _get_cluster_status_to_report_v2(gcs_client) -> ClusterStatusToReport: try: cluster_status = get_cluster_status(gcs_client.address) total_resources = cluster_status.total_resources() - result.total_num_cpus = total_resources.get("CPU", 0) - result.total_num_gpus = total_resources.get("GPU", 0) + result.total_num_cpus = int(total_resources.get("CPU", 0)) + result.total_num_gpus = int(total_resources.get("GPU", 0)) to_GiB = 1 / 2**30 result.total_memory_gb = total_resources.get("memory", 0) * to_GiB From 487679e988ee3109b539fd4c8f77e93aa282b710 Mon Sep 17 00:00:00 2001 From: matthewdeng Date: Wed, 20 Nov 2024 01:50:26 -0800 Subject: [PATCH 027/107] [release] set golden_notebook_torch_tune_serve_test frequency to manual (#48811) Current test is failing due to spot instance unavailability. Converting this test to manual right now. Signed-off-by: Matthew Deng --- release/release_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 9f27a1a71d93..91cb2e9331ea 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -1469,7 +1469,7 @@ group: Golden Notebook tests working_dir: golden_notebook_tests - frequency: nightly-3x + frequency: manual team: ml cluster: From 032b83bc6f5d974cb6bc2d3e80ea40c5c71a319e Mon Sep 17 00:00:00 2001 From: Superskyyy Date: Wed, 20 Nov 2024 12:19:35 -0500 Subject: [PATCH 028/107] [Serve] Add more nuanced checks for http proxy status errors (#47896) Closes: https://github.com/ray-project/ray/issues/47895 --------- Signed-off-by: Superskyyy Co-authored-by: Edward Oakes --- python/ray/serve/_private/proxy.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/python/ray/serve/_private/proxy.py b/python/ray/serve/_private/proxy.py index 614a8dc39508..7b850b6ccf1c 100644 --- a/python/ray/serve/_private/proxy.py +++ b/python/ray/serve/_private/proxy.py @@ -988,8 +988,7 @@ async def send_request_to_replica( status_code = str(asgi_message["status"]) status = ResponseStatus( code=status_code, - # TODO(edoakes): we need a more nuanced check than this. - is_error=status_code != "200", + is_error=not status_code.startswith("2"), ) expecting_trailers = asgi_message.get("trailers", False) elif asgi_message["type"] == "websocket.accept": @@ -1011,11 +1010,17 @@ async def send_request_to_replica( if not asgi_message.get("more_trailers", False): response_generator.stop_checking_for_disconnect() elif asgi_message["type"] == "websocket.disconnect": - status = ResponseStatus( - code=str(asgi_message["code"]), - # TODO(edoakes): we need a more nuanced check than this. - is_error=False, - ) + status_code = str(asgi_message["code"]) + + # Check based on standard WebSocket status codes + if status_code in ["1000", "1001"]: + # Normal closure or going away, no error + is_error = False + else: + # Other 1xxx codes are specified as errors + is_error = status_code.startswith("1") + + status = ResponseStatus(code=status_code, is_error=is_error) response_generator.stop_checking_for_disconnect() yield asgi_message From 8512c9ed48ed6c5b7d2006861b935cdc845deb24 Mon Sep 17 00:00:00 2001 From: KepingYan Date: Thu, 21 Nov 2024 01:59:42 +0800 Subject: [PATCH 029/107] [doc] Add hpu resource description in ray serve docs (#48796) HPU resource is already supported in Ray, and there are many examples to guide users to use HPU device in Ray, so this PR adds some instructions for HPU device to the Ray Serve related documents. --------- Signed-off-by: KepingYan --- doc/source/serve/getting_started.md | 1 + doc/source/serve/resource-allocation.md | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/doc/source/serve/getting_started.md b/doc/source/serve/getting_started.md index ff2620cc8052..0bbe4084f3e5 100644 --- a/doc/source/serve/getting_started.md +++ b/doc/source/serve/getting_started.md @@ -101,6 +101,7 @@ parameters in the `@serve.deployment` decorator. The example configures a few co * `ray_actor_options`: a dictionary containing configuration options for each replica. * `num_cpus`: a float representing the logical number of CPUs each replica should reserve. You can make this a fraction to pack multiple replicas together on a machine with fewer CPUs than replicas. * `num_gpus`: a float representing the logical number of GPUs each replica should reserve. You can make this a fraction to pack multiple replicas together on a machine with fewer GPUs than replicas. + * `resources`: a dictionary containing other resource requirements for the replicate, such as non-GPU accelerators like HPUs or TPUs. All these parameters are optional, so feel free to omit them: diff --git a/doc/source/serve/resource-allocation.md b/doc/source/serve/resource-allocation.md index 57f580f2c370..18df5a8181a4 100644 --- a/doc/source/serve/resource-allocation.md +++ b/doc/source/serve/resource-allocation.md @@ -6,14 +6,14 @@ This guide helps you configure Ray Serve to: - Scale your deployments horizontally by specifying a number of replicas - Scale up and down automatically to react to changing traffic -- Allocate hardware resources (CPUs, GPUs, etc) for each deployment +- Allocate hardware resources (CPUs, GPUs, other accelerators, etc) for each deployment (serve-cpus-gpus)= -## Resource management (CPUs, GPUs) +## Resource management (CPUs, GPUs, accelerators) -You may want to specify a deployment's resource requirements to reserve cluster resources like GPUs. To assign hardware resources per replica, you can pass resource requirements to +You may want to specify a deployment's resource requirements to reserve cluster resources like GPUs or other accelerators. To assign hardware resources per replica, you can pass resource requirements to `ray_actor_options`. By default, each replica reserves one CPU. To learn about options to pass in, take a look at the [Resources with Actors guide](actor-resource-guide). @@ -27,6 +27,14 @@ def func(*args): return do_something_with_my_gpu() ``` +Or if you want to create a deployment where each replica uses another type of accelerator such as an HPU, follow the example below: + +```python +@serve.deployment(ray_actor_options={"resources": {"HPU": 1}}) +def func(*args): + return do_something_with_my_hpu() +``` + (serve-fractional-resources-guide)= ### Fractional CPUs and fractional GPUs From eea0057e17ad14374c5e747908dcd9d559596a97 Mon Sep 17 00:00:00 2001 From: Stephanie Wang Date: Wed, 20 Nov 2024 10:18:49 -0800 Subject: [PATCH 030/107] [core][compiled graphs] Wait for monitor thread to join before shutting down (#48808) Each compiled graph starts a monitor thread to tear down the DAG upon detecting an error in one of the workers' task loops. Currently, during driver shutdown, this thread can live past the lifetime of the C++ CoreWorker. This causes a silent process exit when the thread later tries to call on the CoreWorker but it has already been destructed. To prevent this from happening, this fix joins the monitor thread *before* destructing the CoreWorker. ## Related issue number Closes #48288. --------- Signed-off-by: Stephanie Wang --- python/ray/dag/compiled_dag_node.py | 9 ++++++++- python/ray/dag/context.py | 4 ++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/python/ray/dag/compiled_dag_node.py b/python/ray/dag/compiled_dag_node.py index c964b890f3d4..7c6160d8937d 100644 --- a/python/ray/dag/compiled_dag_node.py +++ b/python/ray/dag/compiled_dag_node.py @@ -1880,7 +1880,7 @@ def wait_teardown(self, kill_actors: bool = False): from ray.dag import DAGContext ctx = DAGContext.get_current() - teardown_timeout = ctx.retrieval_timeout + teardown_timeout = ctx.teardown_timeout for actor, ref in outer.worker_task_refs.items(): timeout = False @@ -2443,7 +2443,14 @@ def teardown(self, kill_actors: bool = False): monitor = getattr(self, "_monitor", None) if monitor is not None: + from ray.dag import DAGContext + + ctx = DAGContext.get_current() monitor.teardown(kill_actors=kill_actors) + monitor.join(timeout=ctx.teardown_timeout) + # We do not log a warning here if the thread is still alive because + # wait_teardown already logs upon teardown_timeout. + self._is_teardown = True def __del__(self): diff --git a/python/ray/dag/context.py b/python/ray/dag/context.py index 29e1d5bf2c78..531785c50262 100644 --- a/python/ray/dag/context.py +++ b/python/ray/dag/context.py @@ -10,6 +10,7 @@ DEFAULT_EXECUTION_TIMEOUT_S = int(os.environ.get("RAY_DAG_execution_timeout", 10)) DEFAULT_RETRIEVAL_TIMEOUT_S = int(os.environ.get("RAY_DAG_retrieval_timeout", 10)) +DEFAULT_TEARDOWN_TIMEOUT_S = int(os.environ.get("RAY_DAG_teardown_timeout", 30)) # Default buffer size is 1MB. DEFAULT_BUFFER_SIZE_BYTES = int(os.environ.get("RAY_DAG_buffer_size_bytes", 1e6)) # Default asyncio_max_queue_size is 0, which means no limit. @@ -51,6 +52,8 @@ class DAGContext: calls. retrieval_timeout: The maximum time in seconds to wait to retrieve a result from the DAG. + teardown_timeout: The maximum time in seconds to wait for the DAG to + cleanly shut down. buffer_size_bytes: The maximum size of messages that can be passed between tasks in the DAG. asyncio_max_queue_size: The max queue size for the async execution. @@ -72,6 +75,7 @@ class DAGContext: execution_timeout: int = DEFAULT_EXECUTION_TIMEOUT_S retrieval_timeout: int = DEFAULT_RETRIEVAL_TIMEOUT_S + teardown_timeout: int = DEFAULT_TEARDOWN_TIMEOUT_S buffer_size_bytes: int = DEFAULT_BUFFER_SIZE_BYTES asyncio_max_queue_size: int = DEFAULT_ASYNCIO_MAX_QUEUE_SIZE max_buffered_results: int = DEFAULT_MAX_BUFFERED_RESULTS From 9a19f790f107040df9ecc1b2c03c270948125a75 Mon Sep 17 00:00:00 2001 From: Mimi Liao <44743062+mimiliaogo@users.noreply.github.com> Date: Wed, 20 Nov 2024 12:21:56 -0600 Subject: [PATCH 031/107] Pass logging_config to controller global logger (#48790) ## Why are these changes needed? Currently in serve.run the logging_config is not passed to controller. This PR add this arguments into the function call so the logging_config can be correctly specified for system-level logging. ## Related issue number Closes #48652 ### Example ``` logging_config = {"log_level": "DEBUG", "logs_dir": "./mimi_debug"} handle: DeploymentHandle = serve.run(app, logging_config=logging_config) ``` ### Before controller logs aren't saved in the specified logs_dir image ### After controller logs are correctly configured image Signed-off-by: Mimi Liao --- python/ray/serve/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/ray/serve/api.py b/python/ray/serve/api.py index 182795889d47..13b92c7fcaae 100644 --- a/python/ray/serve/api.py +++ b/python/ray/serve/api.py @@ -474,6 +474,7 @@ def _run( else: client = _private_api.serve_start( http_options={"location": "EveryNode"}, + global_logging_config=logging_config, ) # Record after Ray has been started. ServeUsageTag.API_VERSION.record("v2") From 52b07685e744e5d72a6c0691e015a74aebb9bca9 Mon Sep 17 00:00:00 2001 From: kanwang Date: Wed, 20 Nov 2024 14:32:19 -0500 Subject: [PATCH 032/107] [Core] GCS FT with redis sentinel (#47335) Signed-off-by: Kan Wang --- .../kubernetes/user-guides/kuberay-gcs-ft.md | 2 +- python/ray/_private/test_utils.py | 63 ++++++++ python/ray/tests/conftest.py | 54 ++++++- python/ray/tests/test_gcs_fault_tolerance.py | 115 ++++++++++++++ src/ray/gcs/redis_context.cc | 146 ++++++++++++++---- 5 files changed, 344 insertions(+), 36 deletions(-) diff --git a/doc/source/cluster/kubernetes/user-guides/kuberay-gcs-ft.md b/doc/source/cluster/kubernetes/user-guides/kuberay-gcs-ft.md index dd0000049140..a54161faf82c 100644 --- a/doc/source/cluster/kubernetes/user-guides/kuberay-gcs-ft.md +++ b/doc/source/cluster/kubernetes/user-guides/kuberay-gcs-ft.md @@ -27,7 +27,7 @@ See {ref}`Ray Serve end-to-end fault tolerance documentation >> Waiting gcs server to exit", gcs_server_pid) + wait_for_pid_to_exit(gcs_server_pid, 1000) + print("GCS killed") + + wait_for_condition(lambda: current_leader != get_sentinel_nodes()[0]) + + # Kill Counter actor. It should restart after GCS is back + c_process.kill() + # Cleanup the in memory data and then start gcs + cluster.head_node.kill_gcs_server(False) + + print("Start gcs") + sleep(2) + cluster.head_node.start_gcs_server() + + assert len(ray.nodes()) == 1 + assert ray.nodes()[0]["alive"] + + driver_script = f""" +import ray +ray.init('{cluster.address}') +@ray.remote +def f(): + return 10 +assert ray.get(f.remote()) == 10 + +c = ray.get_actor("c", namespace="test") +v = ray.get(c.r.remote(10)) +assert v == 10 +print("DONE") +""" + + # Make sure the cluster is usable + wait_for_condition(lambda: "DONE" in run_string_as_driver(driver_script)) + + @pytest.mark.parametrize( "ray_start_regular", [ diff --git a/src/ray/gcs/redis_context.cc b/src/ray/gcs/redis_context.cc index 6de20bfe34af..f26333299b11 100644 --- a/src/ray/gcs/redis_context.cc +++ b/src/ray/gcs/redis_context.cc @@ -431,6 +431,114 @@ void ValidateRedisDB(RedisContext &context) { } } +bool isRedisSentinel(RedisContext &context) { + auto reply = context.RunArgvSync(std::vector{"INFO", "SENTINEL"}); + if (reply->IsNil() || reply->IsError() || reply->ReadAsString().length() == 0) { + return false; + } else { + return true; + } +} + +Status ConnectRedisCluster(RedisContext &context, + const std::string &password, + bool enable_ssl, + const std::string &redis_address) { + RAY_LOG(INFO) << "Connect to Redis Cluster"; + // Ray has some restrictions for RedisDB. Validate it here. + ValidateRedisDB(context); + + // Find the true leader + std::vector argv; + std::vector argc; + std::vector cmds = {"DEL", "DUMMY"}; + for (const auto &arg : cmds) { + argv.push_back(arg.data()); + argc.push_back(arg.size()); + } + + auto redis_reply = reinterpret_cast( + ::redisCommandArgv(context.sync_context(), cmds.size(), argv.data(), argc.data())); + + if (redis_reply->type == REDIS_REPLY_ERROR) { + // This should be a MOVED error + // MOVED 14946 10.xx.xx.xx:7001 + std::string error_msg(redis_reply->str, redis_reply->len); + freeReplyObject(redis_reply); + auto maybe_ip_port = ParseIffMovedError(error_msg); + RAY_CHECK(maybe_ip_port.has_value()) + << "Setup Redis cluster failed in the dummy deletion: " << error_msg; + context.Disconnect(); + const auto &[ip, port] = maybe_ip_port.value(); + // Connect to the true leader. + RAY_LOG(INFO) << "Redis cluster leader is " << ip << ":" << port + << ". Reconnect to it."; + return context.Connect(ip, port, password, enable_ssl); + } else { + RAY_LOG(INFO) << "Redis cluster leader is " << redis_address; + freeReplyObject(redis_reply); + } + + return Status::OK(); +} + +Status ConnectRedisSentinel(RedisContext &context, + const std::string &password, + bool enable_ssl) { + RAY_LOG(INFO) << "Connect to Redis sentinel"; + + std::vector argv; + std::vector argc; + std::vector cmds = {"SENTINEL", "MASTERS"}; + for (const auto &arg : cmds) { + argv.push_back(arg.data()); + argc.push_back(arg.size()); + } + + // use raw redis context since we need to parse a complex reply. + // sample reply (array of arrays): + // 1) 1) "name" + // 2) "redis-ha" + // 3) "ip" + // 4) "10.112.202.115" + // 5) "port" + // 6) "6379" + // 7) "runid" + // 8) "18a76cedbf445bd25bbd412c92e237137b5c7d4d" + auto redis_reply = reinterpret_cast( + ::redisCommandArgv(context.sync_context(), cmds.size(), argv.data(), argc.data())); + + RAY_CHECK(redis_reply) << "Failed to get redis sentinel masters info"; + RAY_CHECK_EQ(redis_reply->type, REDIS_REPLY_ARRAY) + << "Redis sentinel master info should be REDIS_REPLY_ARRAY but got " + << redis_reply->type; + RAY_CHECK_EQ(redis_reply->elements, 1UL) + << "There should be only one primary behind the Redis sentinel"; + auto primary = redis_reply->element[0]; + std::string actual_ip, actual_port; + for (size_t i = 0; i < primary->elements; i += 2) { + std::string key = primary->element[i]->str; // Key (e.g., "name", "ip") + std::string value = primary->element[i + 1]->str; // Value corresponding to the key + if ("ip" == key) { + actual_ip = value; + } else if ("port" == key) { + actual_port = value; + } + } + freeReplyObject(redis_reply); + if (actual_ip.empty() || actual_port.empty()) { + RAY_LOG(ERROR) + << "Failed to get the ip and port of the primary node from Redis sentinel"; + return Status::RedisError( + "Failed to get the ip and port of the primary node from Redis sentinel"); + } else { + RAY_LOG(INFO) << "Connecting to the Redis primary node behind sentinel: " << actual_ip + << ":" << actual_port; + context.Disconnect(); + return context.Connect(actual_ip, std::stoi(actual_port), password, enable_ssl); + } +} + std::vector ResolveDNS(const std::string &address, int port) { using namespace boost::asio; io_context ctx; @@ -503,41 +611,13 @@ Status RedisContext::Connect(const std::string &address, redis_async_context_.reset(new RedisAsyncContext(std::move(async_context))); SetDisconnectCallback(redis_async_context_.get()); - // Ray has some restrictions for RedisDB. Validate it here. - ValidateRedisDB(*this); - - // Find the true leader - std::vector argv; - std::vector argc; - std::vector cmds = {"DEL", "DUMMY"}; - for (const auto &arg : cmds) { - argv.push_back(arg.data()); - argc.push_back(arg.size()); - } - - auto redis_reply = reinterpret_cast( - ::redisCommandArgv(context_.get(), cmds.size(), argv.data(), argc.data())); - - if (redis_reply->type == REDIS_REPLY_ERROR) { - // This should be a MOVED error - // MOVED 14946 10.xx.xx.xx:7001 - std::string error_msg(redis_reply->str, redis_reply->len); - freeReplyObject(redis_reply); - auto maybe_ip_port = ParseIffMovedError(error_msg); - RAY_CHECK(maybe_ip_port.has_value()) - << "Setup Redis cluster failed in the dummy deletion: " << error_msg; - Disconnect(); - const auto &[ip, port] = maybe_ip_port.value(); - // Connect to the true leader. - RAY_LOG(INFO) << "Redis cluster leader is " << ip << ":" << port - << ". Reconnect to it."; - return Connect(ip, port, password, enable_ssl); + // handle validation and primary connection for different types of redis + if (isRedisSentinel(*this)) { + return ConnectRedisSentinel(*this, password, enable_ssl); } else { - RAY_LOG(INFO) << "Redis cluster leader is " << ip_addresses[0] << ":" << port; - freeReplyObject(redis_reply); + return ConnectRedisCluster( + *this, password, enable_ssl, ip_addresses[0] + ":" + std::to_string(port)); } - - return Status::OK(); } std::unique_ptr RedisContext::RunArgvSync( From 895f15bc135aef3999d1694479b645d2e4860efc Mon Sep 17 00:00:00 2001 From: dentiny Date: Wed, 20 Nov 2024 11:47:17 -0800 Subject: [PATCH 033/107] [core] Avoid parsing env exception (#48816) A small change to use `absl::SimpleAtoi` to avoid integer casting to throw exception; Also avoid double map lookup and ignore all invalid values (i.e. negative values). Signed-off-by: dentiny --- src/ray/util/logging.cc | 43 ++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/src/ray/util/logging.cc b/src/ray/util/logging.cc index 12737eabed7e..7562b3e2ecce 100644 --- a/src/ray/util/logging.cc +++ b/src/ray/util/logging.cc @@ -27,14 +27,17 @@ #endif #include +#include #include #include #include #include +#include #include "absl/debugging/failure_signal_handler.h" #include "absl/debugging/stacktrace.h" #include "absl/debugging/symbolize.h" +#include "absl/strings/numbers.h" #include "absl/strings/str_format.h" #include "nlohmann/json.hpp" #include "ray/util/event_label.h" @@ -301,10 +304,8 @@ void RayLog::InitLogFormat() { log_format_json_ = false; log_format_pattern_ = kLogFormatTextPattern; - const char *var_value = std::getenv("RAY_BACKEND_LOG_JSON"); - if (var_value != nullptr) { - std::string data = var_value; - if (data == "1") { + if (const char *var_value = std::getenv("RAY_BACKEND_LOG_JSON"); var_value != nullptr) { + if (std::string_view{var_value} == std::string_view{"1"}) { log_format_json_ = true; log_format_pattern_ = kLogFormatJsonPattern; } @@ -321,7 +322,9 @@ void RayLog::StartRayLog(const std::string &app_name, log_dir_ = log_dir; // All the logging sinks to add. - std::vector sinks; + // One for file/stdout, another for stderr. + std::array sinks; // Intentionally no initialization. + auto level = GetMappedSeverity(severity_threshold_); std::string app_name_without_path = app_name; if (app_name.empty()) { @@ -343,17 +346,20 @@ void RayLog::StartRayLog(const std::string &app_name, #endif // Reset log pattern and level and we assume a log file can be rotated with // 10 files in max size 512M by default. - if (std::getenv("RAY_ROTATION_MAX_BYTES")) { - long max_size = std::atol(std::getenv("RAY_ROTATION_MAX_BYTES")); - // 0 means no log rotation in python, but not in spdlog. We just use the default - // value here. - if (max_size != 0) { + if (const char *ray_rotation_max_bytes = std::getenv("RAY_ROTATION_MAX_BYTES"); + ray_rotation_max_bytes != nullptr) { + long max_size = 0; + if (absl::SimpleAtoi(ray_rotation_max_bytes, &max_size) && max_size > 0) { + // 0 means no log rotation in python, but not in spdlog. We just use the default + // value here. log_rotation_max_size_ = max_size; } } - if (std::getenv("RAY_ROTATION_BACKUP_COUNT")) { - long file_num = std::atol(std::getenv("RAY_ROTATION_BACKUP_COUNT")); - if (file_num != 0) { + + if (const char *ray_rotation_backup_count = std::getenv("RAY_ROTATION_BACKUP_COUNT"); + ray_rotation_backup_count != nullptr) { + long file_num = 0; + if (absl::SimpleAtoi(ray_rotation_backup_count, &file_num) && file_num > 0) { log_rotation_file_num_ = file_num; } } @@ -370,23 +376,24 @@ void RayLog::StartRayLog(const std::string &app_name, log_rotation_max_size_, log_rotation_file_num_); file_sink->set_level(level); - sinks.push_back(file_sink); + sinks[0] = std::move(file_sink); } else { component_name_ = app_name_without_path; auto console_sink = std::make_shared(); console_sink->set_level(level); - sinks.push_back(console_sink); + sinks[0] = std::move(console_sink); } // In all cases, log errors to the console log so they are in driver logs. // https://github.com/ray-project/ray/issues/12893 auto err_sink = std::make_shared(); err_sink->set_level(spdlog::level::err); - sinks.push_back(err_sink); + sinks[1] = std::move(err_sink); // Set the combined logger. - auto logger = std::make_shared( - RayLog::GetLoggerName(), sinks.begin(), sinks.end()); + auto logger = std::make_shared(RayLog::GetLoggerName(), + std::make_move_iterator(sinks.begin()), + std::make_move_iterator(sinks.end())); logger->set_level(level); // Set the pattern of all sinks. logger->set_pattern(log_format_pattern_); From 6e828d137239acc41e630be01394749a0211cf4a Mon Sep 17 00:00:00 2001 From: dentiny Date: Wed, 20 Nov 2024 11:50:04 -0800 Subject: [PATCH 034/107] [core] Ray log if error (#48752) Two benefits for the util macro: - Better branch prediction, better performance - Focus on happy path in code implementation Signed-off-by: dentiny --- src/ray/raylet_client/raylet_client.cc | 26 ++++++++++---------------- src/ray/util/logging.h | 5 +++++ 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/ray/raylet_client/raylet_client.cc b/src/ray/raylet_client/raylet_client.cc index 7911a3ce0a86..5eff4538f837 100644 --- a/src/ray/raylet_client/raylet_client.cc +++ b/src/ray/raylet_client/raylet_client.cc @@ -370,10 +370,10 @@ void raylet::RayletClient::ReportWorkerBacklog( request.set_worker_id(worker_id.Binary()); request.mutable_backlog_reports()->Add(backlog_reports.begin(), backlog_reports.end()); grpc_client_->ReportWorkerBacklog( - request, [](const Status &status, rpc::ReportWorkerBacklogReply &&reply) { - if (!status.ok()) { - RAY_LOG(INFO) << "Error reporting task backlog information: " << status; - } + request, + [](const Status &status, rpc::ReportWorkerBacklogReply &&reply /*unused*/) { + RAY_LOG_IF_ERROR(INFO, status) + << "Error reporting task backlog information: " << status; }); } @@ -389,12 +389,10 @@ Status raylet::RayletClient::ReturnWorker( request.set_disconnect_worker(disconnect_worker); request.set_disconnect_worker_error_detail(disconnect_worker_error_detail); request.set_worker_exiting(worker_exiting); - grpc_client_->ReturnWorker(request, - [](const Status &status, rpc::ReturnWorkerReply &&reply) { - if (!status.ok()) { - RAY_LOG(INFO) << "Error returning worker: " << status; - } - }); + grpc_client_->ReturnWorker( + request, [](const Status &status, rpc::ReturnWorkerReply &&reply /*unused*/) { + RAY_LOG_IF_ERROR(INFO, status) << "Error returning worker: " << status; + }); return Status::OK(); } @@ -405,9 +403,7 @@ void raylet::RayletClient::GetTaskFailureCause( request.set_task_id(task_id.Binary()); grpc_client_->GetTaskFailureCause( request, [callback](const Status &status, rpc::GetTaskFailureCauseReply &&reply) { - if (!status.ok()) { - RAY_LOG(INFO) << "Error getting task result: " << status; - } + RAY_LOG_IF_ERROR(INFO, status) << "Error getting task result: " << status; callback(status, std::move(reply)); }); } @@ -459,9 +455,7 @@ void raylet::RayletClient::PushMutableObject( // TODO: Add failure recovery, retries, and timeout. grpc_client_->PushMutableObject( request, [callback](const Status &status, rpc::PushMutableObjectReply &&reply) { - if (!status.ok()) { - RAY_LOG(ERROR) << "Error pushing mutable object: " << status; - } + RAY_LOG_IF_ERROR(ERROR, status) << "Error pushing mutable object: " << status; if (reply.done()) { // The callback is only executed once the receiver node receives all chunks // for the mutable object write. diff --git a/src/ray/util/logging.h b/src/ray/util/logging.h index bea9c0b5de44..b5e06f4b083a 100644 --- a/src/ray/util/logging.h +++ b/src/ray/util/logging.h @@ -129,6 +129,11 @@ enum class RayLogLevel { if (ray::RayLog::IsLevelEnabled(ray::RayLogLevel::level)) \ RAY_LOG_INTERNAL(ray::RayLogLevel::level) +// `cond` is a `Status` class, could be `ray::Status`, or from third-party like +// `grpc::Status`. +#define RAY_LOG_IF_ERROR(level, cond) \ + if (RAY_PREDICT_FALSE(!(cond).ok())) RAY_LOG(level) + #define RAY_IGNORE_EXPR(expr) ((void)(expr)) #define RAY_CHECK(condition) \ From 05915c1b389ab0bada23217a3cb2768311d1184b Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Wed, 20 Nov 2024 20:52:38 +0100 Subject: [PATCH 035/107] [RLlib] APPO enhancements (new API stack) vol 01: Add circular buffer (#48798) --- rllib/BUILD | 39 +++++----- rllib/algorithms/appo/appo.py | 80 +++++++++++++++---- rllib/algorithms/appo/appo_learner.py | 6 ++ rllib/algorithms/appo/utils.py | 89 ++++++++++++++++++++- rllib/algorithms/impala/impala_learner.py | 91 ++++++++++++---------- rllib/core/learner/learner.py | 9 --- rllib/tuned_examples/appo/cartpole_appo.py | 1 + 7 files changed, 231 insertions(+), 84 deletions(-) diff --git a/rllib/BUILD b/rllib/BUILD index e2ec7386ae0a..5886d865a052 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -164,23 +164,24 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_discrete", "torch_only"], size = "large", srcs = ["tuned_examples/appo/cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-learners=1"] -) -py_test( - name = "learning_tests_cartpole_appo_gpu", - main = "tuned_examples/appo/cartpole_appo.py", - tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core", "gpu"], - size = "large", - srcs = ["tuned_examples/appo/cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-learners=0", "--num-gpus-per-learner=1"] + args = ["--as-test", "--num-learners=1", "--num-cpus=8", "--num-env-runners=6"] ) +# TODO (sven): For some weird reason, this test runs extremely slow on the CI (not on cluster, not locally) -> taking this out for now ... +# py_test( +# name = "learning_tests_cartpole_appo_gpu", +# main = "tuned_examples/appo/cartpole_appo.py", +# tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core", "gpu"], +# size = "large", +# srcs = ["tuned_examples/appo/cartpole_appo.py"], +# args = ["--as-test", "--num-learners=0", "--num-gpus-per-learner=1", "--num-cpus=7", "--num-env-runners=6"] +# ) py_test( name = "learning_tests_cartpole_appo_multi_cpu", main = "tuned_examples/appo/cartpole_appo.py", tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core"], size = "large", srcs = ["tuned_examples/appo/cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-learners=2"] + args = ["--as-test", "--num-learners=2", "--num-cpus=9", "--num-env-runners=6"] ) py_test( name = "learning_tests_cartpole_appo_multi_gpu", @@ -188,7 +189,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core", "multi_gpu"], size = "large", srcs = ["tuned_examples/appo/cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-learners=2", "--num-gpus-per-learner=1"] + args = ["--as-test", "--num-learners=2", "--num-gpus-per-learner=1", "--num-cpus=7", "--num-env-runners=6"] ) # MultiAgentCartPole py_test( @@ -197,7 +198,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_discrete", "torch_only"], size = "large", srcs = ["tuned_examples/appo/multi_agent_cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-agents=2", "--num-learners=1"] + args = ["--as-test", "--num-agents=2", "--num-learners=1", "--num-cpus=8", "--num-env-runners=6"] ) py_test( name = "learning_tests_multi_agent_cartpole_appo_gpu", @@ -205,7 +206,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core", "gpu"], size = "large", srcs = ["tuned_examples/appo/multi_agent_cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-agents=2", "--num-learners=0", "--num-gpus-per-learner=1", "--num-cpus=6"] + args = ["--as-test", "--num-agents=2", "--num-learners=0", "--num-gpus-per-learner=1", "--num-cpus=7", "--num-env-runners=6"] ) py_test( name = "learning_tests_multi_agent_cartpole_appo_multi_cpu", @@ -213,7 +214,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core"], size = "large", srcs = ["tuned_examples/appo/multi_agent_cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-agents=2", "--num-learners=2", "--num-cpus=7"] + args = ["--as-test", "--num-agents=2", "--num-learners=2", "--num-cpus=9", "--num-env-runners=6"] ) py_test( name = "learning_tests_multi_agent_cartpole_appo_multi_gpu", @@ -221,7 +222,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core", "multi_gpu"], size = "large", srcs = ["tuned_examples/appo/multi_agent_cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-agents=2", "--num-learners=2", "--num-gpus-per-learner=1", "--num-cpus=7"] + args = ["--as-test", "--num-agents=2", "--num-learners=2", "--num-gpus-per-learner=1", "--num-cpus=7", "--num-env-runners=6"] ) # StatelessCartPole py_test( @@ -230,7 +231,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core"], size = "large", srcs = ["tuned_examples/appo/stateless_cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-learners=1"] + args = ["--as-test", "--num-learners=1", "--num-cpus=8", "--num-env-runners=6"] ) py_test( name = "learning_tests_stateless_cartpole_appo_gpu", @@ -238,7 +239,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core", "gpu"], size = "large", srcs = ["tuned_examples/appo/stateless_cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-agents=2", "--num-learners=0", "--num-gpus-per-learner=1"] + args = ["--as-test", "--num-agents=2", "--num-learners=0", "--num-gpus-per-learner=1", "--num-cpus=7", "--num-env-runners=6"] ) py_test( name = "learning_tests_stateless_cartpole_appo_multi_cpu", @@ -246,7 +247,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core"], size = "large", srcs = ["tuned_examples/appo/stateless_cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-learners=2"] + args = ["--as-test", "--num-learners=2", "--num-cpus=9", "--num-env-runners=6"] ) py_test( name = "learning_tests_stateless_cartpole_appo_multi_gpu", @@ -254,7 +255,7 @@ py_test( tags = ["team:rllib", "exclusive", "learning_tests", "torch_only", "learning_tests_discrete", "learning_tests_pytorch_use_all_core", "multi_gpu"], size = "large", srcs = ["tuned_examples/appo/stateless_cartpole_appo.py"], - args = ["--as-test", "--enable-new-api-stack", "--num-learners=2", "--num-gpus-per-learner=1"] + args = ["--as-test", "--num-learners=2", "--num-gpus-per-learner=1", "--num-cpus=7", "--num-env-runners=6"] ) # MultiAgentStatelessCartPole # py_test( diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index 1640cf4b5338..3632ffab954b 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -1,13 +1,13 @@ -""" -Asynchronous Proximal Policy Optimization (APPO) -================================================ +"""Asynchronous Proximal Policy Optimization (APPO) -This file defines the distributed Algorithm class for the asynchronous version -of proximal policy optimization (APPO). -See `appo_[tf|torch]_policy.py` for the definition of the policy loss. +The algorithm is described in [1] (under the name of "IMPACT"): Detailed documentation: https://docs.ray.io/en/master/rllib-algorithms.html#appo + +[1] IMPACT: Importance Weighted Asynchronous Architectures with Clipped Target Networks. +Luo et al. 2020 +https://arxiv.org/pdf/1912.00167 """ from typing import Optional, Type @@ -108,18 +108,19 @@ def __init__(self, algo_class=None): self.kl_coeff = 1.0 self.kl_target = 0.01 self.target_worker_clipping = 2.0 - # TODO (sven): Activate once v-trace sequences in non-RNN batch are solved. - # If we switch this on right now, the shuffling would destroy the rollout - # sequences (non-zero-padded!) needed in the batch for v-trace. - # self.shuffle_batch_per_epoch = True + + # Circular replay buffer settings. + # Used in [1] for discrete action tasks: + # `circular_buffer_num_batches=4` and `circular_buffer_iterations_per_batch=2` + # For cont. action tasks: + # `circular_buffer_num_batches=16` and `circular_buffer_iterations_per_batch=20` + self.circular_buffer_num_batches = 4 + self.circular_buffer_iterations_per_batch = 2 # Override some of IMPALAConfig's default values with APPO-specific values. self.num_env_runners = 2 self.min_time_s_per_iteration = 10 self.target_network_update_freq = 1 - self.learner_queue_size = 16 - self.learner_queue_timeout = 300 - self.max_sample_requests_in_flight_per_worker = 2 self.broadcast_interval = 1 self.grad_clip = 40.0 # Note: Only when using enable_rl_module_and_learner=True can the clipping mode @@ -145,6 +146,8 @@ def __init__(self, algo_class=None): self.minibatch_buffer_size = 1 # @OldAPIStack self.replay_proportion = 0.0 # @OldAPIStack self.replay_buffer_num_slots = 100 # @OldAPIStack + self.learner_queue_size = 16 # @OldAPIStack + self.learner_queue_timeout = 300 # @OldAPIStack # Deprecated keys. self.target_update_frequency = DEPRECATED_VALUE @@ -164,6 +167,8 @@ def training( tau: Optional[float] = NotProvided, target_network_update_freq: Optional[int] = NotProvided, target_worker_clipping: Optional[float] = NotProvided, + circular_buffer_num_batches: Optional[int] = NotProvided, + circular_buffer_iterations_per_batch: Optional[int] = NotProvided, # Deprecated keys. target_update_frequency=DEPRECATED_VALUE, **kwargs, @@ -197,6 +202,14 @@ def training( target_worker_clipping: The maximum value for the target-worker-clipping used for computing the IS ratio, described in [1] IS = min(π(i) / π(target), ρ) * (π / π(i)) + circular_buffer_num_batches: The number of train batches that fit + into the circular buffer. Each such train batch can be sampled for + training max. `circular_buffer_iterations_per_batch` times. + circular_buffer_iterations_per_batch: The number of times any train + batch in the circular buffer can be sampled for training. A batch gets + evicted from the buffer either if it's the oldest batch in the buffer + and a new batch is added OR if the batch reaches this max. number of + being sampled. Returns: This updated AlgorithmConfig object. @@ -233,9 +246,50 @@ def training( self.target_network_update_freq = target_network_update_freq if target_worker_clipping is not NotProvided: self.target_worker_clipping = target_worker_clipping + if circular_buffer_num_batches is not NotProvided: + self.circular_buffer_num_batches = circular_buffer_num_batches + if circular_buffer_iterations_per_batch is not NotProvided: + self.circular_buffer_iterations_per_batch = ( + circular_buffer_iterations_per_batch + ) return self + @override(IMPALAConfig) + def validate(self) -> None: + super().validate() + + # On new API stack, circular buffer should be used, not `minibatch_buffer_size`. + if self.enable_rl_module_and_learner: + if self.minibatch_buffer_size != 1 or self.replay_proportion != 0.0: + raise ValueError( + "`minibatch_buffer_size/replay_proportion` not valid on new API " + "stack with APPO! " + "Use `circular_buffer_num_batches` for the number of train batches " + "in the circular buffer. To change the maximum number of times " + "any batch may be sampled, set " + "`circular_buffer_iterations_per_batch`." + ) + if self.num_multi_gpu_tower_stacks != 1: + raise ValueError( + "`num_multi_gpu_tower_stacks` not supported on new API stack with " + "APPO! In order to train on multi-GPU, use " + "`config.learners(num_learners=[number of GPUs], " + "num_gpus_per_learner=1)`. To scale the throughput of batch-to-GPU-" + "pre-loading on each of your `Learners`, set " + "`num_gpu_loader_threads` to a higher number (recommended values: " + "1-8)." + ) + if self.learner_queue_size != 16: + raise ValueError( + "`learner_queue_size` not supported on new API stack with " + "APPO! In order set the size of the circular buffer (which acts as " + "a 'learner queue'), use " + "`config.training(circular_buffer_num_batches=..)`. To change the " + "maximum number of times any batch may be sampled, set " + "`config.training(circular_buffer_iterations_per_batch=..)`." + ) + @override(IMPALAConfig) def get_default_learner_class(self): if self.framework_str == "torch": diff --git a/rllib/algorithms/appo/appo_learner.py b/rllib/algorithms/appo/appo_learner.py index 7b4cf2b14d8f..920d7b7ea992 100644 --- a/rllib/algorithms/appo/appo_learner.py +++ b/rllib/algorithms/appo/appo_learner.py @@ -2,6 +2,7 @@ from typing import Any, Dict, Optional from ray.rllib.algorithms.appo.appo import APPOConfig +from ray.rllib.algorithms.appo.utils import CircularBuffer from ray.rllib.algorithms.impala.impala_learner import IMPALALearner from ray.rllib.core.learner.learner import Learner from ray.rllib.core.learner.utils import update_target_network @@ -28,6 +29,11 @@ class APPOLearner(IMPALALearner): @override(IMPALALearner) def build(self): + self._learner_thread_in_queue = CircularBuffer( + num_batches=self.config.circular_buffer_num_batches, + iterations_per_batch=self.config.circular_buffer_iterations_per_batch, + ) + super().build() # Make target networks. diff --git a/rllib/algorithms/appo/utils.py b/rllib/algorithms/appo/utils.py index cbd2efe82161..9a4f1e66d0a9 100644 --- a/rllib/algorithms/appo/utils.py +++ b/rllib/algorithms/appo/utils.py @@ -1,12 +1,99 @@ +""" +[1] IMPACT: Importance Weighted Asynchronous Architectures with Clipped Target Networks. +Luo et al. 2020 +https://arxiv.org/pdf/1912.00167 +""" +from collections import deque +import random +import threading +import time + from ray.rllib.models.catalog import ModelCatalog from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.utils.annotations import OldAPIStack POLICY_SCOPE = "func" TARGET_POLICY_SCOPE = "target_func" -# TODO (sven): Deprecate once APPO and IMPALA fully on RLModules/Learner APIs. +class CircularBuffer: + """A circular batch-wise buffer as described in [1] for APPO. + + The buffer holds at most N batches, which are sampled at random (uniformly). + If full and a new batch is added, the oldest batch is discarded. Also, each batch + currently in the buffer can be sampled at most K times (after which it is also + discarded). + """ + + def __init__(self, num_batches: int, iterations_per_batch: int): + # N from the paper (buffer size). + self.num_batches = num_batches + # K ("replay coefficient") from the paper. + self.iterations_per_batch = iterations_per_batch + + self._buffer = deque(maxlen=self.num_batches) + self._lock = threading.Lock() + + # The number of valid (not expired) entries in this buffer. + self._num_valid_batches = 0 + + def add(self, batch): + dropped_entry = None + dropped_ts = 0 + + # Add buffer and k=0 information to the deque. + with self._lock: + len_ = len(self._buffer) + if len_ == self.num_batches: + dropped_entry = self._buffer[0] + self._buffer.append([batch, 0]) + self._num_valid_batches += 1 + + # A valid entry (w/ a batch whose k has not been reach K yet) was dropped. + if dropped_entry is not None and dropped_entry[0] is not None: + dropped_ts += dropped_entry[0].env_steps() * ( + self.iterations_per_batch - dropped_entry[1] + ) + self._num_valid_batches -= 1 + + return dropped_ts + + def sample(self): + k = entry = batch = None + + while True: + # Only initially, the buffer may be empty -> Just wait for some time. + if len(self) == 0: + time.sleep(0.001) + continue + # Sample a random buffer index. + with self._lock: + entry = self._buffer[random.randint(0, len(self._buffer) - 1)] + batch, k = entry + # Ignore batches that have already been invalidated. + if batch is not None: + break + + # Increase k += 1 for this batch. + assert k is not None + entry[1] += 1 + + # This batch has been exhausted (k == K) -> Invalidate it in the buffer. + if k == self.iterations_per_batch - 1: + entry[0] = None + entry[1] = None + self._num_valid_batches += 1 + + # Return the sampled batch. + return batch + + def __len__(self) -> int: + """Returns the number of actually valid (non-expired) batches in the buffer.""" + return self._num_valid_batches + + +@OldAPIStack def make_appo_models(policy) -> ModelV2: """Builds model and target model for APPO. diff --git a/rllib/algorithms/impala/impala_learner.py b/rllib/algorithms/impala/impala_learner.py index c38315d543b7..1929f9f010d6 100644 --- a/rllib/algorithms/impala/impala_learner.py +++ b/rllib/algorithms/impala/impala_learner.py @@ -3,11 +3,12 @@ import queue import threading import time -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Union import tree # pip install dm_tree import ray +from ray.rllib.algorithms.appo.utils import CircularBuffer from ray.rllib.algorithms.impala.impala import LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY from ray.rllib.core.columns import Columns from ray.rllib.core.learner.learner import Learner @@ -71,7 +72,7 @@ def build(self) -> None: ): self._learner_connector.prepend(AddOneTsToEpisodesAndTruncate()) # Leave all batches on the CPU (they'll be moved to the GPU, if applicable, - # by the n GPU loader threads. + # by the n GPU loader threads). numpy_to_tensor_connector = self._learner_connector[NumpyToTensor][0] numpy_to_tensor_connector._device = "cpu" # TODO (sven): Provide API? @@ -80,7 +81,9 @@ def build(self) -> None: # on the "update queue" for the actual RLModule forward pass and loss # computations. self._gpu_loader_in_queue = queue.Queue() - self._learner_thread_in_queue = deque(maxlen=self.config.learner_queue_size) + # Default is to have a learner thread. + if not hasattr(self, "_learner_thread_in_queue"): + self._learner_thread_in_queue = deque(maxlen=self.config.learner_queue_size) self._learner_thread_out_queue = queue.Queue() # Create and start the GPU loader thread(s). @@ -103,9 +106,6 @@ def build(self) -> None: in_queue=self._learner_thread_in_queue, out_queue=self._learner_thread_out_queue, metrics_logger=self.metrics, - num_epochs=self.config.num_epochs, - minibatch_size=self.config.minibatch_size, - shuffle_batch_per_epoch=self.config.shuffle_batch_per_epoch, ) self._learner_thread.start() @@ -115,13 +115,6 @@ def update_from_episodes( episodes: List[EpisodeType], *, timesteps: Dict[str, Any], - # TODO (sven): Deprecate these in favor of config attributes for only those - # algos that actually need (and know how) to do minibatching. - minibatch_size: Optional[int] = None, - num_epochs: int = 1, - shuffle_batch_per_epoch: bool = False, - num_total_minibatches: int = 0, - reduce_fn=None, # Deprecated args. **kwargs, ) -> ResultDict: self.metrics.set_value( @@ -175,15 +168,25 @@ def update_from_episodes( self._gpu_loader_in_queue.qsize(), ) else: - # Enqueue to Learner thread's in-queue. - _LearnerThread.enqueue( - self._learner_thread_in_queue, - MultiAgentBatch( - {mid: SampleBatch(b) for mid, b in batch.items()}, - env_steps=env_steps, - ), - self.metrics, + ma_batch = MultiAgentBatch( + {mid: SampleBatch(b) for mid, b in batch.items()}, + env_steps=env_steps, ) + # Add the batch directly to the circular buffer. + if isinstance(self._learner_thread_in_queue, CircularBuffer): + ts_dropped = self._learner_thread_in_queue.add(ma_batch) + self.metrics.log_value( + (ALL_MODULES, LEARNER_THREAD_ENV_STEPS_DROPPED), + ts_dropped, + reduce="sum", + ) + else: + # Enqueue to Learner thread's in-queue. + _LearnerThread.enqueue( + self._learner_thread_in_queue, + ma_batch, + self.metrics, + ) # Return all queued result dicts thus far (after reducing over them). results = {} @@ -263,8 +266,17 @@ def _step(self) -> None: policy_batches={mid: SampleBatch(b) for mid, b in batch_on_gpu.items()}, env_steps=env_steps, ) - # Enqueue to Learner thread's in-queue. - _LearnerThread.enqueue(self._out_queue, ma_batch_on_gpu, self.metrics) + + if isinstance(self._out_queue, CircularBuffer): + ts_dropped = self._out_queue.add(ma_batch_on_gpu) + self.metrics.log_value( + (ALL_MODULES, LEARNER_THREAD_ENV_STEPS_DROPPED), + ts_dropped, + reduce="sum", + ) + else: + # Enqueue to Learner thread's in-queue. + _LearnerThread.enqueue(self._out_queue, ma_batch_on_gpu, self.metrics) class _LearnerThread(threading.Thread): @@ -275,9 +287,6 @@ def __init__( in_queue: deque, out_queue: queue.Queue, metrics_logger, - num_epochs, - minibatch_size, - shuffle_batch_per_epoch, ): super().__init__() self.daemon = True @@ -285,13 +294,9 @@ def __init__( self.stopped = False self._update_method = update_method - self._in_queue: deque = in_queue + self._in_queue: Union[deque, CircularBuffer] = in_queue self._out_queue: queue.Queue = out_queue - self._num_epochs = num_epochs - self._minibatch_size = minibatch_size - self._shuffle_batch_per_epoch = shuffle_batch_per_epoch - def run(self) -> None: while not self.stopped: self.step() @@ -299,14 +304,19 @@ def run(self) -> None: def step(self): # Get a new batch from the GPU-data (deque.pop -> newest item first). with self.metrics.log_time((ALL_MODULES, LEARNER_THREAD_IN_QUEUE_WAIT_TIMER)): - if not self._in_queue: - time.sleep(0.001) - return - # Consume from the left (oldest batches first). - # If we consumed from the right, we would run into the danger of learning - # from newer batches (left side) most times, BUT sometimes grabbing a - # really old batches (right area of deque). - ma_batch_on_gpu = self._in_queue.popleft() + # Get a new batch from the GPU-data (learner queue OR circular buffer). + if isinstance(self._in_queue, CircularBuffer): + ma_batch_on_gpu = self._in_queue.sample() + else: + # Queue is empty: Sleep a tiny bit to avoid CPU-thrashing. + if not self._in_queue: + time.sleep(0.001) + return + # Consume from the left (oldest batches first). + # If we consumed from the right, we would run into the danger of + # learning from newer batches (left side) most times, BUT sometimes + # grabbing older batches (right area of deque). + ma_batch_on_gpu = self._in_queue.popleft() # Call the update method on the batch. with self.metrics.log_time((ALL_MODULES, LEARNER_THREAD_UPDATE_TIMER)): @@ -321,9 +331,6 @@ def step(self): (ALL_MODULES, NUM_ENV_STEPS_SAMPLED_LIFETIME), default=0 ) }, - num_epochs=self._num_epochs, - minibatch_size=self._minibatch_size, - shuffle_batch_per_epoch=self._shuffle_batch_per_epoch, ) # We have to deepcopy the results dict, b/c we must avoid having a returned # Stats object sit in the queue and getting a new (possibly even tensor) diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index c26cd6a22a94..2b58743a52df 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -1350,15 +1350,6 @@ def _update_from_batch_or_episodes( {next(iter(self.module.keys())): batch}, env_steps=len(batch) ) - # TODO (sven): Remove this leftover hack here for the situation in which we - # did not go through the learner connector. - # Options: - # a) Either also pass given batches through the learner connector (even if - # episodes is None). (preferred solution) - # b) Get rid of the option to pass in a batch altogether. - # if episodes is None: - # batch = self._convert_batch_type(batch) - # Check the MultiAgentBatch, whether our RLModule contains all ModuleIDs # found in this batch. If not, throw an error. unknown_module_ids = set(batch.policy_batches.keys()) - set(self.module.keys()) diff --git a/rllib/tuned_examples/appo/cartpole_appo.py b/rllib/tuned_examples/appo/cartpole_appo.py index 0af651b6c607..a85a9120ba2a 100644 --- a/rllib/tuned_examples/appo/cartpole_appo.py +++ b/rllib/tuned_examples/appo/cartpole_appo.py @@ -16,6 +16,7 @@ APPOConfig() .environment("CartPole-v1") .training( + circular_buffer_iterations_per_batch=2, vf_loss_coeff=0.05, entropy_coeff=0.0, ) From 1297b3533c6cd65b8c99f8538fb7fd3bf2399488 Mon Sep 17 00:00:00 2001 From: dentiny Date: Wed, 20 Nov 2024 11:57:28 -0800 Subject: [PATCH 036/107] [core] C++ update for core worker (#48772) Apply some C++ updates and optimizations to core worker. Signed-off-by: dentiny --- src/ray/core_worker/core_worker.cc | 219 ++++++++++++++++++++++------- src/ray/core_worker/core_worker.h | 148 +++---------------- src/ray/util/util.cc | 8 +- src/ray/util/util.h | 24 ++-- 4 files changed, 204 insertions(+), 195 deletions(-) diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 9e8880d3aba3..622e6be11a84 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -126,8 +126,129 @@ std::optional TryGetLocalObjectLocation( } // namespace -CoreWorker::CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_id) - : options_(options), +TaskCounter::TaskCounter() { + counter_.SetOnChangeCallback( + [this](const std::tuple + &key) ABSL_EXCLUSIVE_LOCKS_REQUIRED(&mu_) mutable { + if (std::get<1>(key) != TaskStatusType::kRunning) { + return; + } + const auto &func_name = std::get<0>(key); + const auto is_retry = std::get<2>(key); + const int64_t running_total = counter_.Get(key); + const int64_t num_in_get = running_in_get_counter_.Get({func_name, is_retry}); + const int64_t num_in_wait = running_in_wait_counter_.Get({func_name, is_retry}); + const auto is_retry_label = is_retry ? "1" : "0"; + // RUNNING_IN_RAY_GET/WAIT are sub-states of RUNNING, so we need to subtract + // them out to avoid double-counting. + ray::stats::STATS_tasks.Record( + running_total - num_in_get - num_in_wait, + {{"State", rpc::TaskStatus_Name(rpc::TaskStatus::RUNNING)}, + {"Name", func_name}, + {"IsRetry", is_retry_label}, + {"JobId", job_id_}, + {"Source", "executor"}}); + // Negate the metrics recorded from the submitter process for these tasks. + ray::stats::STATS_tasks.Record( + -running_total, + {{"State", rpc::TaskStatus_Name(rpc::TaskStatus::SUBMITTED_TO_WORKER)}, + {"Name", func_name}, + {"IsRetry", is_retry_label}, + {"JobId", job_id_}, + {"Source", "executor"}}); + // Record sub-state for get. + ray::stats::STATS_tasks.Record( + num_in_get, + {{"State", rpc::TaskStatus_Name(rpc::TaskStatus::RUNNING_IN_RAY_GET)}, + {"Name", func_name}, + {"IsRetry", is_retry_label}, + {"JobId", job_id_}, + {"Source", "executor"}}); + // Record sub-state for wait. + ray::stats::STATS_tasks.Record( + num_in_wait, + {{"State", rpc::TaskStatus_Name(rpc::TaskStatus::RUNNING_IN_RAY_WAIT)}, + {"Name", func_name}, + {"IsRetry", is_retry_label}, + {"JobId", job_id_}, + {"Source", "executor"}}); + }); +} + +void TaskCounter::RecordMetrics() { + absl::MutexLock l(&mu_); + counter_.FlushOnChangeCallbacks(); + if (IsActor()) { + float running = 0.0; + float in_get = 0.0; + float in_wait = 0.0; + float idle = 0.0; + if (running_in_wait_counter_.Total() > 0) { + in_wait = 1.0; + } else if (running_in_get_counter_.Total() > 0) { + in_get = 1.0; + } else if (num_tasks_running_ > 0) { + running = 1.0; + } else { + idle = 1.0; + } + ray::stats::STATS_actors.Record(idle, + {{"State", "IDLE"}, + {"Name", actor_name_}, + {"Source", "executor"}, + {"JobId", job_id_}}); + ray::stats::STATS_actors.Record(running, + {{"State", "RUNNING_TASK"}, + {"Name", actor_name_}, + {"Source", "executor"}, + {"JobId", job_id_}}); + ray::stats::STATS_actors.Record(in_get, + {{"State", "RUNNING_IN_RAY_GET"}, + {"Name", actor_name_}, + {"Source", "executor"}, + {"JobId", job_id_}}); + ray::stats::STATS_actors.Record(in_wait, + {{"State", "RUNNING_IN_RAY_WAIT"}, + {"Name", actor_name_}, + {"Source", "executor"}, + {"JobId", job_id_}}); + } +} + +void TaskCounter::SetMetricStatus(const std::string &func_name, + rpc::TaskStatus status, + bool is_retry) { + absl::MutexLock l(&mu_); + // Add a no-op increment to counter_ so that + // it will invoke a callback upon RecordMetrics. + counter_.Increment({func_name, TaskStatusType::kRunning, is_retry}, 0); + if (status == rpc::TaskStatus::RUNNING_IN_RAY_GET) { + running_in_get_counter_.Increment({func_name, is_retry}); + } else if (status == rpc::TaskStatus::RUNNING_IN_RAY_WAIT) { + running_in_wait_counter_.Increment({func_name, is_retry}); + } else { + RAY_CHECK(false) << "Unexpected status " << rpc::TaskStatus_Name(status); + } +} + +void TaskCounter::UnsetMetricStatus(const std::string &func_name, + rpc::TaskStatus status, + bool is_retry) { + absl::MutexLock l(&mu_); + // Add a no-op decrement to counter_ so that + // it will invoke a callback upon RecordMetrics. + counter_.Decrement({func_name, TaskStatusType::kRunning, is_retry}, 0); + if (status == rpc::TaskStatus::RUNNING_IN_RAY_GET) { + running_in_get_counter_.Decrement({func_name, is_retry}); + } else if (status == rpc::TaskStatus::RUNNING_IN_RAY_WAIT) { + running_in_wait_counter_.Decrement({func_name, is_retry}); + } else { + RAY_LOG(FATAL) << "Unexpected status " << rpc::TaskStatus_Name(status); + } +} + +CoreWorker::CoreWorker(CoreWorkerOptions options, const WorkerID &worker_id) + : options_(std::move(options)), get_call_site_(RayConfig::instance().record_ref_creation_sites() ? options_.get_lang_stack : nullptr), @@ -339,7 +460,7 @@ CoreWorker::CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_ << "max_pending_lease_requests_per_scheduling_category can't be 0"; lease_request_rate_limiter_ = std::make_shared( - /*kMinConcurrentLeaseCap*/ 10); + /*min_concurrent_lease_cap_*/ 10); } // Register a callback to monitor add/removed nodes. @@ -516,14 +637,13 @@ CoreWorker::CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_ actor_creator_ = std::make_shared(gcs_client_); - actor_task_submitter_ = std::shared_ptr( - new ActorTaskSubmitter(*core_worker_client_pool_, - *memory_store_, - *task_manager_, - *actor_creator_, - on_excess_queueing, - io_service_, - reference_counter_)); + actor_task_submitter_ = std::make_shared(*core_worker_client_pool_, + *memory_store_, + *task_manager_, + *actor_creator_, + on_excess_queueing, + io_service_, + reference_counter_); auto node_addr_factory = [this](const NodeID &node_id) { absl::optional addr; @@ -564,15 +684,16 @@ CoreWorker::CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_ uint64_t object_size) { reference_counter_->ReportLocalityData(object_id, locations, object_size); }; - future_resolver_.reset(new FutureResolver(memory_store_, - reference_counter_, - std::move(report_locality_data_callback), - core_worker_client_pool_, - rpc_address_)); + future_resolver_ = + std::make_unique(memory_store_, + reference_counter_, + std::move(report_locality_data_callback), + core_worker_client_pool_, + rpc_address_); // Unfortunately the raylet client has to be constructed after the receivers. if (task_receiver_ != nullptr) { - task_argument_waiter_.reset(new DependencyWaiterImpl(*local_raylet_client_)); + task_argument_waiter_ = std::make_unique(*local_raylet_client_); task_receiver_->Init(core_worker_client_pool_, rpc_address_, task_argument_waiter_); } @@ -592,13 +713,13 @@ CoreWorker::CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_ for (const auto &node_id : object_locations.value()) { absl::optional addr = node_addr_factory(node_id); if (addr.has_value()) { - locations.push_back(addr.value()); - } else { - // We're getting potentially stale locations directly from the reference - // counter, so the location might be a dead node. - RAY_LOG(DEBUG).WithField(object_id).WithField(node_id) - << "Object location is dead, not using it in the recovery of object"; + locations.emplace_back(std::move(addr.value())); + continue; } + // We're getting potentially stale locations directly from the reference + // counter, so the location might be a dead node. + RAY_LOG(DEBUG).WithField(object_id).WithField(node_id) + << "Object location is dead, not using it in the recovery of object"; } } callback(object_id, locations); @@ -1014,7 +1135,7 @@ void CoreWorker::RegisterToGcs(int64_t worker_launch_time_ms, std::chrono::system_clock::now().time_since_epoch()) .count(); worker_info.emplace("driver_id", worker_id.Binary()); - worker_info.emplace("start_time", std::to_string(start_time)); + worker_info.emplace("start_time", absl::StrFormat("%d", start_time)); if (!options_.driver_name.empty()) { worker_info.emplace("name", options_.driver_name); } @@ -1033,7 +1154,8 @@ void CoreWorker::RegisterToGcs(int64_t worker_launch_time_ms, worker_data->mutable_worker_address()->set_port(rpc_address_.port()); worker_data->mutable_worker_address()->set_worker_id(worker_id.Binary()); worker_data->set_worker_type(options_.worker_type); - worker_data->mutable_worker_info()->insert(worker_info.begin(), worker_info.end()); + worker_data->mutable_worker_info()->insert(std::make_move_iterator(worker_info.begin()), + std::make_move_iterator(worker_info.end())); worker_data->set_is_alive(true); worker_data->set_pid(pid_); @@ -1067,7 +1189,7 @@ void CoreWorker::InternalHeartbeat() { absl::MutexLock lock(&mutex_); while (!to_resubmit_.empty() && current_time_ms() > to_resubmit_.top().execution_time_ms) { - tasks_to_resubmit.push_back(std::move(to_resubmit_.top())); + tasks_to_resubmit.emplace_back(to_resubmit_.top()); to_resubmit_.pop(); } } @@ -1168,15 +1290,16 @@ Status CoreWorker::GetOwnerAddress(const ObjectID &object_id, std::vector CoreWorker::GetObjectRefs( const std::vector &object_ids) const { std::vector refs; + refs.reserve(object_ids.size()); for (const auto &object_id : object_ids) { rpc::ObjectReference ref; ref.set_object_id(object_id.Binary()); rpc::Address owner_address; if (reference_counter_->GetOwner(object_id, &owner_address)) { // NOTE(swang): Detached actors do not have an owner address set. - ref.mutable_owner_address()->CopyFrom(owner_address); + *ref.mutable_owner_address() = std::move(owner_address); } - refs.push_back(std::move(ref)); + refs.emplace_back(std::move(ref)); } return refs; } @@ -2746,7 +2869,7 @@ std::optional CoreWorker::GetLocalActorState( ActorID CoreWorker::DeserializeAndRegisterActorHandle(const std::string &serialized, const ObjectID &outer_object_id, bool add_local_ref) { - std::unique_ptr actor_handle(new ActorHandle(serialized)); + auto actor_handle = std::make_unique(serialized); return actor_manager_->RegisterActorHandle(std::move(actor_handle), outer_object_id, CurrentCallSite(), @@ -2801,20 +2924,16 @@ CoreWorker::ListNamedActors(bool all_namespaces) { stream << "There was timeout in getting the list of named actors, " "probably because the GCS server is dead or under high load ."; return std::make_pair(std::move(actors), Status::TimedOut(stream.str())); - } else if (!status.ok()) { - return std::make_pair(std::move(actors), status); - } else { - return std::make_pair(std::move(actors), status); } + return std::make_pair(std::move(actors), std::move(status)); } std::pair, Status> CoreWorker::GetNamedActorHandleLocalMode(const std::string &name) { auto it = local_mode_named_actor_registry_.find(name); if (it == local_mode_named_actor_registry_.end()) { - std::ostringstream stream; - stream << "Failed to look up actor with name '" << name; - return std::make_pair(nullptr, Status::NotFound(stream.str())); + std::string err_msg = absl::StrFormat("Failed to look up actor with name %s", name); + return std::make_pair(nullptr, Status::NotFound(std::move(err_msg))); } return std::make_pair(GetActorHandle(it->second), Status::OK()); @@ -2823,12 +2942,13 @@ CoreWorker::GetNamedActorHandleLocalMode(const std::string &name) { std::pair>, Status> CoreWorker::ListNamedActorsLocalMode() { std::vector> actors; + actors.reserve(local_mode_named_actor_registry_.size()); for (auto it = local_mode_named_actor_registry_.begin(); it != local_mode_named_actor_registry_.end(); it++) { - actors.push_back(std::make_pair(/*namespace=*/"", it->first)); + actors.emplace_back(/*namespace=*/"", it->first); } - return std::make_pair(actors, Status::OK()); + return std::make_pair(std::move(actors), Status::OK()); } const std::string CoreWorker::GetActorName() const { @@ -2858,7 +2978,8 @@ void CoreWorker::RunTaskExecutionLoop() { Exit(rpc::WorkerExitType::INTENDED_USER_EXIT, absl::StrCat("Worker exits by a signal. ", status.message()), nullptr); - } else if (status.IsUnexpectedSystemExit()) { + } + if (status.IsUnexpectedSystemExit()) { Exit( rpc::WorkerExitType::SYSTEM_ERROR, absl::StrCat("Worker exits unexpectedly by a signal. ", status.message()), @@ -3014,14 +3135,14 @@ Status CoreWorker::ExecuteTask( Status status; TaskType task_type = TaskType::NORMAL_TASK; if (task_spec.IsActorCreationTask()) { - RAY_CHECK(return_objects->size() > 0); + RAY_CHECK_GT(return_objects->size(), static_cast(0)); return_objects->pop_back(); task_type = TaskType::ACTOR_CREATION_TASK; SetActorId(task_spec.ActorCreationId()); task_counter_.BecomeActor(task_spec.FunctionDescriptor()->ClassName()); { - std::unique_ptr self_actor_handle( - new ActorHandle(task_spec.GetSerializedActorHandle())); + auto self_actor_handle = + std::make_unique(task_spec.GetSerializedActorHandle()); // Register the handle to the current actor itself. actor_manager_->RegisterActorHandle(std::move(self_actor_handle), ObjectID::Nil(), @@ -3063,9 +3184,9 @@ Status CoreWorker::ExecuteTask( defined_concurrency_groups, name_of_concurrency_group_to_execute, /*is_reattempt=*/task_spec.AttemptNumber() > 0, - /*is_streaming_generator*/ task_spec.IsStreamingGenerator(), - /*retry_exception*/ task_spec.ShouldRetryExceptions(), - /*generator_backpressure_num_objects*/ + /*is_streaming_generator=*/task_spec.IsStreamingGenerator(), + /*retry_exception=*/task_spec.ShouldRetryExceptions(), + /*generator_backpressure_num_objects=*/ task_spec.GeneratorBackpressureNumObjects()); // Get the reference counts for any IDs that we borrowed during this task, @@ -3078,7 +3199,7 @@ Status CoreWorker::ExecuteTask( if (!borrowed_ids.empty()) { reference_counter_->PopAndClearLocalBorrowers(borrowed_ids, borrowed_refs, &deleted); } - if (dynamic_return_objects != NULL) { + if (dynamic_return_objects != nullptr) { for (const auto &dynamic_return : *dynamic_return_objects) { reference_counter_->PopAndClearLocalBorrowers( {dynamic_return.first}, borrowed_refs, &deleted); @@ -3097,7 +3218,7 @@ Status CoreWorker::ExecuteTask( } if (!options_.is_local_mode) { - SetCurrentTaskId(TaskID::Nil(), /*attempt_number=*/0, ""); + SetCurrentTaskId(TaskID::Nil(), /*attempt_number=*/0, /*task_name=*/""); worker_context_.ResetCurrentTask(); } { @@ -4736,11 +4857,11 @@ void CoreWorker::UpdateTaskIsDebuggerPaused(const TaskID &task_id, ClusterSizeBasedLeaseRequestRateLimiter::ClusterSizeBasedLeaseRequestRateLimiter( size_t min_concurrent_lease_limit) - : kMinConcurrentLeaseCap(min_concurrent_lease_limit), num_alive_nodes_(0) {} + : min_concurrent_lease_cap_(min_concurrent_lease_limit), num_alive_nodes_(0) {} size_t ClusterSizeBasedLeaseRequestRateLimiter:: GetMaxPendingLeaseRequestsPerSchedulingCategory() { - return std::max(kMinConcurrentLeaseCap, num_alive_nodes_.load()); + return std::max(min_concurrent_lease_cap_, num_alive_nodes_.load()); } void ClusterSizeBasedLeaseRequestRateLimiter::OnNodeChanges( diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index c7f07800c700..bdf7c13f15d5 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -59,8 +59,7 @@ /// "RPC_SERVICE_HANDLER(CoreWorkerService, ExampleCall, 1)" /// 4) Add a method to the CoreWorker class below: "CoreWorker::HandleExampleCall" -namespace ray { -namespace core { +namespace ray::core { JobID GetProcessJobID(const CoreWorkerOptions &options); @@ -69,57 +68,10 @@ JobID GetProcessJobID(const CoreWorkerOptions &options); class TaskCounter { /// A task can only be one of the following state. Received state in particular /// covers from the point of RPC call to beginning execution. - enum TaskStatusType { kPending, kRunning, kFinished }; + enum class TaskStatusType { kPending, kRunning, kFinished }; public: - TaskCounter() { - counter_.SetOnChangeCallback( - [this](const std::tuple &key) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(&mu_) mutable { - if (std::get<1>(key) != kRunning) { - return; - } - auto func_name = std::get<0>(key); - auto is_retry = std::get<2>(key); - int64_t running_total = counter_.Get(key); - int64_t num_in_get = running_in_get_counter_.Get({func_name, is_retry}); - int64_t num_in_wait = running_in_wait_counter_.Get({func_name, is_retry}); - auto is_retry_label = is_retry ? "1" : "0"; - // RUNNING_IN_RAY_GET/WAIT are sub-states of RUNNING, so we need to subtract - // them out to avoid double-counting. - ray::stats::STATS_tasks.Record( - running_total - num_in_get - num_in_wait, - {{"State", rpc::TaskStatus_Name(rpc::TaskStatus::RUNNING)}, - {"Name", func_name}, - {"IsRetry", is_retry_label}, - {"JobId", job_id_}, - {"Source", "executor"}}); - // Negate the metrics recorded from the submitter process for these tasks. - ray::stats::STATS_tasks.Record( - -running_total, - {{"State", rpc::TaskStatus_Name(rpc::TaskStatus::SUBMITTED_TO_WORKER)}, - {"Name", func_name}, - {"IsRetry", is_retry_label}, - {"JobId", job_id_}, - {"Source", "executor"}}); - // Record sub-state for get. - ray::stats::STATS_tasks.Record( - num_in_get, - {{"State", rpc::TaskStatus_Name(rpc::TaskStatus::RUNNING_IN_RAY_GET)}, - {"Name", func_name}, - {"IsRetry", is_retry_label}, - {"JobId", job_id_}, - {"Source", "executor"}}); - // Record sub-state for wait. - ray::stats::STATS_tasks.Record( - num_in_wait, - {{"State", rpc::TaskStatus_Name(rpc::TaskStatus::RUNNING_IN_RAY_WAIT)}, - {"Name", func_name}, - {"IsRetry", is_retry_label}, - {"JobId", job_id_}, - {"Source", "executor"}}); - }); - } + TaskCounter(); void BecomeActor(const std::string &actor_name) { absl::MutexLock l(&mu_); @@ -133,95 +85,35 @@ class TaskCounter { bool IsActor() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&mu_) { return actor_name_.size() > 0; } - void RecordMetrics() { - absl::MutexLock l(&mu_); - counter_.FlushOnChangeCallbacks(); - if (IsActor()) { - float running = 0.0; - float in_get = 0.0; - float in_wait = 0.0; - float idle = 0.0; - if (running_in_wait_counter_.Total() > 0) { - in_wait = 1.0; - } else if (running_in_get_counter_.Total() > 0) { - in_get = 1.0; - } else if (num_tasks_running_ > 0) { - running = 1.0; - } else { - idle = 1.0; - } - ray::stats::STATS_actors.Record(idle, - {{"State", "IDLE"}, - {"Name", actor_name_}, - {"Source", "executor"}, - {"JobId", job_id_}}); - ray::stats::STATS_actors.Record(running, - {{"State", "RUNNING_TASK"}, - {"Name", actor_name_}, - {"Source", "executor"}, - {"JobId", job_id_}}); - ray::stats::STATS_actors.Record(in_get, - {{"State", "RUNNING_IN_RAY_GET"}, - {"Name", actor_name_}, - {"Source", "executor"}, - {"JobId", job_id_}}); - ray::stats::STATS_actors.Record(in_wait, - {{"State", "RUNNING_IN_RAY_WAIT"}, - {"Name", actor_name_}, - {"Source", "executor"}, - {"JobId", job_id_}}); - } - } + void RecordMetrics(); void IncPending(const std::string &func_name, bool is_retry) { absl::MutexLock l(&mu_); - counter_.Increment({func_name, kPending, is_retry}); + counter_.Increment({func_name, TaskStatusType::kPending, is_retry}); } void MovePendingToRunning(const std::string &func_name, bool is_retry) { absl::MutexLock l(&mu_); - counter_.Swap({func_name, kPending, is_retry}, {func_name, kRunning, is_retry}); + counter_.Swap({func_name, TaskStatusType::kPending, is_retry}, + {func_name, TaskStatusType::kRunning, is_retry}); num_tasks_running_++; } void MoveRunningToFinished(const std::string &func_name, bool is_retry) { absl::MutexLock l(&mu_); - counter_.Swap({func_name, kRunning, is_retry}, {func_name, kFinished, is_retry}); + counter_.Swap({func_name, TaskStatusType::kRunning, is_retry}, + {func_name, TaskStatusType::kFinished, is_retry}); num_tasks_running_--; - RAY_CHECK(num_tasks_running_ >= 0); + RAY_CHECK_GE(num_tasks_running_, 0); } void SetMetricStatus(const std::string &func_name, rpc::TaskStatus status, - bool is_retry) { - absl::MutexLock l(&mu_); - // Add a no-op increment to counter_ so that - // it will invoke a callback upon RecordMetrics. - counter_.Increment({func_name, TaskStatusType::kRunning, is_retry}, 0); - if (status == rpc::TaskStatus::RUNNING_IN_RAY_GET) { - running_in_get_counter_.Increment({func_name, is_retry}); - } else if (status == rpc::TaskStatus::RUNNING_IN_RAY_WAIT) { - running_in_wait_counter_.Increment({func_name, is_retry}); - } else { - RAY_CHECK(false) << "Unexpected status " << rpc::TaskStatus_Name(status); - } - } + bool is_retry); void UnsetMetricStatus(const std::string &func_name, rpc::TaskStatus status, - bool is_retry) { - absl::MutexLock l(&mu_); - // Add a no-op decrement to counter_ so that - // it will invoke a callback upon RecordMetrics. - counter_.Decrement({func_name, TaskStatusType::kRunning, is_retry}, 0); - if (status == rpc::TaskStatus::RUNNING_IN_RAY_GET) { - running_in_get_counter_.Decrement({func_name, is_retry}); - } else if (status == rpc::TaskStatus::RUNNING_IN_RAY_WAIT) { - running_in_wait_counter_.Decrement({func_name, is_retry}); - } else { - RAY_CHECK(false) << "Unexpected status " << rpc::TaskStatus_Name(status); - } - } + bool is_retry); private: mutable absl::Mutex mu_; @@ -274,7 +166,7 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { /// /// \param[in] options The various initialization options. /// \param[in] worker_id ID of this worker. - CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_id); + CoreWorker(CoreWorkerOptions options, const WorkerID &worker_id); CoreWorker(CoreWorker const &) = delete; @@ -402,11 +294,10 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { } bool GetCurrentTaskRetryExceptions() const { - if (!options_.is_local_mode) { - return worker_context_.GetCurrentTask()->ShouldRetryExceptions(); - } else { + if (options_.is_local_mode) { return false; } + return worker_context_.GetCurrentTask()->ShouldRetryExceptions(); } void SetWebuiDisplay(const std::string &key, const std::string &message); @@ -1100,9 +991,7 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { const ResourceMappingType GetResourceIDs() const; /// Create a profile event and push it the TaskEventBuffer when the event is destructed. - std::unique_ptr CreateProfileEvent( - - const std::string &event_name); + std::unique_ptr CreateProfileEvent(const std::string &event_name); int64_t GetNumTasksSubmitted() const { return normal_task_submitter_->GetNumTasksSubmitted(); @@ -1971,8 +1860,7 @@ class ClusterSizeBasedLeaseRequestRateLimiter : public LeaseRequestRateLimiter { void OnNodeChanges(const rpc::GcsNodeInfo &data); private: - const size_t kMinConcurrentLeaseCap; + const size_t min_concurrent_lease_cap_; std::atomic num_alive_nodes_; }; -} // namespace core -} // namespace ray +} // namespace ray::core diff --git a/src/ray/util/util.cc b/src/ray/util/util.cc index 478fec48d157..35b2d7c51144 100644 --- a/src/ray/util/util.cc +++ b/src/ray/util/util.cc @@ -35,11 +35,12 @@ #include "ray/util/filesystem.h" #include "ray/util/logging.h" +namespace { /// Uses sscanf() to read a token matching from the string, advancing the iterator. /// \param c_str A string iterator that is dereferenceable. (i.e.: c_str < string::end()) /// \param format The pattern. It must not produce any output. (e.g., use %*d, not %d.) /// \return The scanned prefix of the string, if any. -static std::string ScanToken(std::string::const_iterator &c_str, std::string format) { +std::string ScanToken(std::string::const_iterator &c_str, std::string format) { int i = 0; std::string result; format += "%n"; @@ -49,6 +50,7 @@ static std::string ScanToken(std::string::const_iterator &c_str, std::string for } return result; } +} // namespace std::string EndpointToUrl( const boost::asio::generic::basic_endpoint &ep, @@ -58,7 +60,7 @@ std::string EndpointToUrl( case AF_INET: { scheme = "tcp://"; boost::asio::ip::tcp::endpoint e(boost::asio::ip::tcp::v4(), 0); - RAY_CHECK(e.size() == ep.size()); + RAY_CHECK_EQ(e.size(), ep.size()); const sockaddr *src = ep.data(); sockaddr *dst = e.data(); *reinterpret_cast(dst) = *reinterpret_cast(src); @@ -70,7 +72,7 @@ std::string EndpointToUrl( case AF_INET6: { scheme = "tcp://"; boost::asio::ip::tcp::endpoint e(boost::asio::ip::tcp::v6(), 0); - RAY_CHECK(e.size() == ep.size()); + RAY_CHECK_EQ(e.size(), ep.size()); const sockaddr *src = ep.data(); sockaddr *dst = e.data(); *reinterpret_cast(dst) = *reinterpret_cast(src); diff --git a/src/ray/util/util.h b/src/ray/util/util.h index e7f7a1d96781..05ce88c7ee47 100644 --- a/src/ray/util/util.h +++ b/src/ray/util/util.h @@ -62,22 +62,13 @@ #endif // Boost forward-declarations (to avoid forcing slow header inclusions) -namespace boost { - -namespace asio { - -namespace generic { +namespace boost::asio::generic { template class basic_endpoint; - class stream_protocol; -} // namespace generic - -} // namespace asio - -} // namespace boost +} // namespace boost::asio::generic enum class CommandLineSyntax { System, POSIX, Windows }; @@ -302,12 +293,19 @@ inline void unsetEnv(const std::string &name) { RAY_CHECK_EQ(ret, 0) << "Failed to unset env var " << name; } +// Set [thread_name] to current thread; if it fails, error will be logged. +// NOTICE: It only works for macos and linux. inline void SetThreadName(const std::string &thread_name) { + int ret = 0; #if defined(__APPLE__) - pthread_setname_np(thread_name.c_str()); + ret = pthread_setname_np(thread_name.c_str()); #elif defined(__linux__) - pthread_setname_np(pthread_self(), thread_name.substr(0, 15).c_str()); + ret = pthread_setname_np(pthread_self(), thread_name.substr(0, 15).c_str()); #endif + if (ret < 0) { + RAY_LOG(ERROR) << "Fails to set thread name to " << thread_name << " since " + << strerror(errno); + } } inline std::string GetThreadName() { From d585b8ab27c45be5f58a96c0c829952e4f92660a Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Wed, 20 Nov 2024 12:36:00 -0800 Subject: [PATCH 037/107] [dashboard] properly encode query string (#48823) for tests against `/test/http_get` Signed-off-by: Lonnie Liu --- python/ray/dashboard/tests/test_dashboard.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/python/ray/dashboard/tests/test_dashboard.py b/python/ray/dashboard/tests/test_dashboard.py index 4b61accc53c3..19571d9f76b7 100644 --- a/python/ray/dashboard/tests/test_dashboard.py +++ b/python/ray/dashboard/tests/test_dashboard.py @@ -11,6 +11,7 @@ import time import warnings from unittest.mock import MagicMock +from urllib.parse import quote_plus import pytest import requests @@ -370,7 +371,9 @@ def test_http_get(enable_test_module, ray_start_with_dashboard): while True: time.sleep(3) try: - response = requests.get(webui_url + "/test/http_get?url=" + target_url) + response = requests.get( + webui_url + "/test/http_get?url=" + quote_plus(target_url) + ) response.raise_for_status() try: dump_info = response.json() @@ -385,7 +388,8 @@ def test_http_get(enable_test_module, ray_start_with_dashboard): http_port, grpc_port = ports response = requests.get( - f"http://{ip}:{http_port}" f"/test/http_get_from_agent?url={target_url}" + f"http://{ip}:{http_port}" + f"/test/http_get_from_agent?url={quote_plus(target_url)}" ) response.raise_for_status() try: From 3a604406e65b4890fab392a72f398a0b3464c44d Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Wed, 20 Nov 2024 15:41:24 -0600 Subject: [PATCH 038/107] [serve] Add tests for HTTP status code `is_error` logic (#48822) Adds tests for: https://github.com/ray-project/ray/pull/47896 (and some stylistic refactoring). --------- Signed-off-by: Edward Oakes --- python/ray/serve/_private/proxy.py | 21 ++-- python/ray/serve/tests/test_metrics.py | 161 ++++++++++++++++++++++++- 2 files changed, 170 insertions(+), 12 deletions(-) diff --git a/python/ray/serve/_private/proxy.py b/python/ray/serve/_private/proxy.py index 7b850b6ccf1c..54f6908072c6 100644 --- a/python/ray/serve/_private/proxy.py +++ b/python/ray/serve/_private/proxy.py @@ -1009,18 +1009,17 @@ async def send_request_to_replica( # the trailers message has been sent. if not asgi_message.get("more_trailers", False): response_generator.stop_checking_for_disconnect() - elif asgi_message["type"] == "websocket.disconnect": + elif asgi_message["type"] in [ + "websocket.close", + "websocket.disconnect", + ]: status_code = str(asgi_message["code"]) - - # Check based on standard WebSocket status codes - if status_code in ["1000", "1001"]: - # Normal closure or going away, no error - is_error = False - else: - # Other 1xxx codes are specified as errors - is_error = status_code.startswith("1") - - status = ResponseStatus(code=status_code, is_error=is_error) + status = ResponseStatus( + code=status_code, + # All status codes are considered errors aside from: + # 1000 (CLOSE_NORMAL), 1001 (CLOSE_GOING_AWAY). + is_error=status_code not in ["1000", "1001"], + ) response_generator.stop_checking_for_disconnect() yield asgi_message diff --git a/python/ray/serve/tests/test_metrics.py b/python/ray/serve/tests/test_metrics.py index 6b3e674e7907..6f64666a96ba 100644 --- a/python/ray/serve/tests/test_metrics.py +++ b/python/ray/serve/tests/test_metrics.py @@ -6,7 +6,11 @@ import grpc import pytest import requests -from fastapi import FastAPI +from fastapi import FastAPI, WebSocket +from starlette.requests import Request +from starlette.responses import PlainTextResponse +from websockets.exceptions import ConnectionClosed +from websockets.sync.client import connect import ray import ray.util.state as state_api @@ -583,6 +587,161 @@ def f(*args): print("serve_grpc_request_latency_ms_sum working as expected.") +def test_proxy_metrics_http_status_code_is_error(serve_start_shutdown): + """Verify that 2xx status codes aren't errors, others are.""" + + def check_request_count_metrics( + expected_error_count: int, + expected_success_count: int, + ): + resp = requests.get("http://127.0.0.1:9999").text + error_count = 0 + success_count = 0 + for line in resp.split("\n"): + if line.startswith("ray_serve_num_http_error_requests_total"): + error_count += int(float(line.split(" ")[-1])) + if line.startswith("ray_serve_num_http_requests_total"): + success_count += int(float(line.split(" ")[-1])) + + assert error_count == expected_error_count + assert success_count == expected_success_count + return True + + @serve.deployment + async def return_status_code(request: Request): + code = int((await request.body()).decode("utf-8")) + return PlainTextResponse("", status_code=code) + + serve.run(return_status_code.bind()) + + # 200 is not an error. + r = requests.get("http://127.0.0.1:8000/", data=b"200") + assert r.status_code == 200 + wait_for_condition( + check_request_count_metrics, + expected_error_count=0, + expected_success_count=1, + ) + + # 2xx is not an error. + r = requests.get("http://127.0.0.1:8000/", data=b"250") + assert r.status_code == 250 + wait_for_condition( + check_request_count_metrics, + expected_error_count=0, + expected_success_count=2, + ) + + # 3xx is an error. + r = requests.get("http://127.0.0.1:8000/", data=b"300") + assert r.status_code == 300 + wait_for_condition( + check_request_count_metrics, + expected_error_count=1, + expected_success_count=3, + ) + + # 4xx is an error. + r = requests.get("http://127.0.0.1:8000/", data=b"400") + assert r.status_code == 400 + wait_for_condition( + check_request_count_metrics, + expected_error_count=2, + expected_success_count=4, + ) + + # 5xx is an error. + r = requests.get("http://127.0.0.1:8000/", data=b"500") + assert r.status_code == 500 + wait_for_condition( + check_request_count_metrics, + expected_error_count=3, + expected_success_count=5, + ) + + +def test_proxy_metrics_websocket_status_code_is_error(serve_start_shutdown): + """Verify that status codes aisde from 1000 or 1001 are errors.""" + + def check_request_count_metrics( + expected_error_count: int, + expected_success_count: int, + ): + resp = requests.get("http://127.0.0.1:9999").text + error_count = 0 + success_count = 0 + for line in resp.split("\n"): + if line.startswith("ray_serve_num_http_error_requests_total"): + error_count += int(float(line.split(" ")[-1])) + if line.startswith("ray_serve_num_http_requests_total"): + success_count += int(float(line.split(" ")[-1])) + + assert error_count == expected_error_count + assert success_count == expected_success_count + return True + + fastapi_app = FastAPI() + + @serve.deployment + @serve.ingress(fastapi_app) + class WebSocketServer: + @fastapi_app.websocket("/") + async def accept_then_close(self, ws: WebSocket): + await ws.accept() + code = int(await ws.receive_text()) + await ws.close(code=code) + + serve.run(WebSocketServer.bind()) + + # Regular disconnect (1000) is not an error. + with connect("ws://localhost:8000/") as ws: + with pytest.raises(ConnectionClosed): + ws.send("1000") + ws.recv() + + wait_for_condition( + check_request_count_metrics, + expected_error_count=0, + expected_success_count=1, + ) + + # Goaway disconnect (1001) is not an error. + with connect("ws://localhost:8000/") as ws: + with pytest.raises(ConnectionClosed): + ws.send("1001") + ws.recv() + + wait_for_condition( + check_request_count_metrics, + expected_error_count=0, + expected_success_count=2, + ) + + # Other codes are errors. + with connect("ws://localhost:8000/") as ws: + with pytest.raises(ConnectionClosed): + ws.send("1011") + ws.recv() + + wait_for_condition( + check_request_count_metrics, + expected_error_count=1, + expected_success_count=3, + ) + + # Other codes are errors. + with connect("ws://localhost:8000/") as ws: + with pytest.raises(ConnectionClosed): + ws.send("3000") + ws.recv() + + wait_for_condition( + check_request_count_metrics, + expected_error_count=2, + expected_success_count=4, + ) + + def test_replica_metrics_fields(serve_start_shutdown): """Test replica metrics fields""" From 97a50f060d95efac749c509e27a03b837f97ba16 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Wed, 20 Nov 2024 14:39:25 -0800 Subject: [PATCH 039/107] [Release] Rename aws to advanced_configurations_json in compute config (#48809) Signed-off-by: Jiajun Yao --- .../compute_configs/aws_70b_v1.yaml | 4 ++-- .../compute_configs/aws_70b_v2.yaml | 4 ++-- .../compute_configs/aws_7b_or_13b.yaml | 2 +- .../aws_70b_v1.yaml | 4 ++-- .../aws_70b_v2.yaml | 4 ++-- .../04_finetuning_llms_with_deepspeed/aws_7b.yaml | 2 +- .../templates/testing/compute_configs/cpu/aws.yaml | 2 +- .../templates/testing/compute_configs/gpu/aws.yaml | 2 +- python/ray/autoscaler/aws/tests/aws_compute.yaml | 2 +- .../autoscaler/gcp/tests/single_node_32_cpu_gce.yaml | 2 +- .../dolly_v2_fsdp_compute_aws.yaml | 2 +- .../gptj_deepspeed_compute_aws.yaml | 2 +- .../gptj_deepspeed_compute_gce.yaml | 2 +- .../30b_deepspeed_compute.yaml | 2 +- .../vicuna_13b_deepspeed_compute_aws.yaml | 2 +- .../air_tests/air_benchmarks/compute_gpu_1_aws.yaml | 2 +- .../air_benchmarks/compute_gpu_4x4_aws.yaml | 2 +- .../air_benchmarks/mlperf-train/compute_cpu_16.yaml | 2 +- release/air_tests/horovod/compute_tpl_aws.yaml | 2 +- .../only_head_node_1gpu_64cpu.yaml | 2 +- .../distributed/many_nodes_tests/compute_config.yaml | 2 +- release/benchmarks/single_node.yaml | 2 +- release/dashboard/agent_stress_compute.yaml | 2 +- release/jobs_tests/compute_tpl_4_xlarge.yaml | 2 +- release/jobs_tests/compute_tpl_gce_4_xlarge.yaml | 4 ++-- release/jobs_tests/compute_tpl_gpu_node.yaml | 2 +- release/jobs_tests/compute_tpl_gpu_worker.yaml | 2 +- release/k8s_tests/compute_tpl.yaml | 2 +- .../long_running_distributed_tests/compute_tpl.yaml | 2 +- release/long_running_tests/many_ppo.yaml | 2 +- release/long_running_tests/many_ppo_gce.yaml | 2 +- release/long_running_tests/tpl_cpu_1.yaml | 2 +- release/long_running_tests/tpl_cpu_1_c5.yaml | 2 +- release/long_running_tests/tpl_cpu_1_c5_gce.yaml | 2 +- release/long_running_tests/tpl_cpu_1_gce.yaml | 2 +- release/long_running_tests/tpl_cpu_1_large.yaml | 2 +- release/long_running_tests/tpl_cpu_1_large_gce.yaml | 2 +- release/long_running_tests/tpl_cpu_2.yaml | 2 +- release/long_running_tests/tpl_cpu_3.yaml | 2 +- release/long_running_tests/tpl_cpu_3_gce.yaml | 2 +- release/long_running_tests/tpl_cpu_4.yaml | 2 +- release/long_running_tests/tpl_cpu_4_gce.yaml | 2 +- release/ml_user_tests/horovod/compute_tpl_aws.yaml | 2 +- release/ml_user_tests/horovod/compute_tpl_gce.yaml | 2 +- .../ml_user_tests/tune_rllib/compute_tpl_aws.yaml | 2 +- .../nightly_tests/chaos_test/compute_template.yaml | 2 +- .../nightly_tests/dask_on_ray/1tb_sort_compute.yaml | 2 +- .../chaos_dask_on_ray_stress_compute.yaml | 2 +- .../dask_on_ray_sort_compute_template.yaml | 2 +- .../dask_on_ray/dask_on_ray_stress_compute.yaml | 2 +- .../dask_on_ray/dask_on_ray_stress_compute_k8s.yaml | 2 +- .../large_scale_dask_on_ray_compute_template.yaml | 2 +- .../nightly_tests/dataset/compute_gpu_4x4_aws.yaml | 2 +- .../dataset/pipelined_ingestion_compute.yaml | 2 +- release/nightly_tests/dataset/shuffle_compute.yaml | 2 +- .../decision_tree/autoscaling_compute.yaml | 2 +- .../decision_tree/autoscaling_compute_gce.yaml | 4 ++-- .../nightly_tests/placement_group_tests/compute.yaml | 4 ++-- .../long_running_test_compute.yaml | 2 +- .../placement_group_tests/pg_perf_test_compute.yaml | 2 +- .../nightly_tests/shuffle/100tb_shuffle_compute.yaml | 2 +- ...datasets_large_scale_compute_small_instances.yaml | 2 +- .../shuffle/shuffle_compute_autoscaling.yaml | 2 +- .../shuffle/shuffle_compute_autoscaling_gce.yaml | 4 ++-- .../shuffle/shuffle_compute_large_scale.yaml | 2 +- .../nightly_tests/shuffle/shuffle_compute_multi.yaml | 2 +- .../shuffle/shuffle_compute_multi_gce.yaml | 4 ++-- .../shuffle/shuffle_compute_single.yaml | 2 +- .../stress_tests/placement_group_tests_compute.yaml | 2 +- .../placement_group_tests_compute_gce.yaml | 6 +++--- .../stress_tests/smoke_test_compute.yaml | 2 +- .../stress_test_threaded_actor_compute.yaml | 2 +- .../stress_tests/stress_tests_compute.yaml | 2 +- .../stress_tests/stress_tests_compute_large.yaml | 2 +- .../ray_release/cluster_manager/cluster_manager.py | 9 +++++++-- release/ray_release/tests/test_cluster_manager.py | 12 +++++++----- release/rllib_tests/1gpu_16cpus.yaml | 2 +- release/rllib_tests/1gpu_16cpus_gce.yaml | 2 +- release/rllib_tests/1gpu_24cpus.yaml | 2 +- release/rllib_tests/1gpu_24cpus_gce.yaml | 2 +- release/rllib_tests/1gpu_32cpus.yaml | 2 +- release/rllib_tests/1gpu_4cpus.yaml | 2 +- release/rllib_tests/1gpu_4cpus_gce.yaml | 2 +- release/rllib_tests/2gpus_32cpus.yaml | 2 +- release/rllib_tests/2gpus_32cpus_gce.yaml | 2 +- release/rllib_tests/2gpus_64cpus.yaml | 2 +- release/rllib_tests/2gpus_64cpus_gce.yaml | 2 +- release/rllib_tests/32cpus.yaml | 2 +- release/rllib_tests/32cpus_gce.yaml | 2 +- release/rllib_tests/4gpus_512_cpus_gce.yaml | 2 +- release/rllib_tests/4gpus_544_cpus.yaml | 2 +- release/rllib_tests/4gpus_64cpus.yaml | 2 +- release/rllib_tests/4gpus_64cpus_gce.yaml | 2 +- release/rllib_tests/4gpus_96cpus.yaml | 2 +- release/rllib_tests/4gpus_96cpus_gce.yaml | 2 +- release/rllib_tests/8gpus_96cpus.yaml | 2 +- .../multi_node_checkpointing_compute_config.yaml | 2 +- release/serve_tests/compute_tpl_32_cpu.yaml | 2 +- .../serve_tests/compute_tpl_32_cpu_autoscaling.yaml | 2 +- .../serve_tests/compute_tpl_8_cpu_autoscaling.yaml | 2 +- release/serve_tests/compute_tpl_gpu_node.yaml | 2 +- release/serve_tests/compute_tpl_single_node.yaml | 2 +- .../serve_tests/compute_tpl_single_node_32_cpu.yaml | 2 +- release/serve_tests/compute_tpl_single_node_k8s.yaml | 2 +- .../train_tests/colocate_trainer/compute_aws.yaml | 2 +- release/train_tests/horovod/compute_tpl_aws.yaml | 2 +- release/train_tests/horovod/compute_tpl_gce.yaml | 2 +- .../multinode_persistence/compute_aws.yaml | 2 +- .../xgboost_lightgbm/compute_aws_10workers.yaml | 2 +- .../xgboost_lightgbm/compute_aws_1worker.yaml | 2 +- release/tune_tests/cloud_tests/tpl_aws_1x4.yaml | 2 +- .../fault_tolerance_tests/tpl_aws_16x1.yaml | 2 +- .../fault_tolerance_tests/tpl_gce_16x1.yaml | 2 +- .../tune_tests/scalability_tests/tpl_1x32_hd.yaml | 2 +- .../scalability_tests/tpl_gce_1x32_hd.yaml | 2 +- 115 files changed, 138 insertions(+), 131 deletions(-) diff --git a/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_70b_v1.yaml b/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_70b_v1.yaml index 4d553edf1500..d82552cb5c15 100644 --- a/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_70b_v1.yaml +++ b/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_70b_v1.yaml @@ -4,7 +4,7 @@ head_node_type: name: head_node_type instance_type: g5.48xlarge resources: - custom_resources: + custom_resources: large_cpu_mem: 1 worker_node_types: @@ -14,7 +14,7 @@ worker_node_types: max_workers: 3 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_70b_v2.yaml b/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_70b_v2.yaml index 975c81fe5f3a..985939a018d5 100644 --- a/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_70b_v2.yaml +++ b/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_70b_v2.yaml @@ -4,7 +4,7 @@ head_node_type: name: head_node_type instance_type: g5.48xlarge resources: - custom_resources: + custom_resources: large_cpu_mem: 1 worker_node_types: @@ -20,7 +20,7 @@ worker_node_types: max_workers: 2 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_7b_or_13b.yaml b/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_7b_or_13b.yaml index a8fee017a51d..0231e4aa53bc 100644 --- a/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_7b_or_13b.yaml +++ b/doc/source/templates/04_finetuning_llms_with_deepspeed/compute_configs/aws_7b_or_13b.yaml @@ -12,7 +12,7 @@ worker_node_types: max_workers: 16 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_70b_v1.yaml b/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_70b_v1.yaml index 97441ceca4f7..57fa332f53c7 100644 --- a/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_70b_v1.yaml +++ b/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_70b_v1.yaml @@ -5,7 +5,7 @@ head_node_type: name: head_node_type instance_type: g5.48xlarge resources: - custom_resources: + custom_resources: large_cpu_mem: 1 worker_node_types: @@ -15,7 +15,7 @@ worker_node_types: max_workers: 3 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_70b_v2.yaml b/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_70b_v2.yaml index 9ff1c7d09aae..d176e25d9051 100644 --- a/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_70b_v2.yaml +++ b/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_70b_v2.yaml @@ -5,7 +5,7 @@ head_node_type: name: head_node_type instance_type: g5.48xlarge resources: - custom_resources: + custom_resources: large_cpu_mem: 1 worker_node_types: @@ -21,7 +21,7 @@ worker_node_types: max_workers: 2 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_7b.yaml b/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_7b.yaml index d8923e7ccad0..af1d8e1fa02c 100644 --- a/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_7b.yaml +++ b/doc/source/templates/testing/compute_configs/04_finetuning_llms_with_deepspeed/aws_7b.yaml @@ -13,7 +13,7 @@ worker_node_types: max_workers: 16 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/doc/source/templates/testing/compute_configs/cpu/aws.yaml b/doc/source/templates/testing/compute_configs/cpu/aws.yaml index 28b9115d2755..251368c99d42 100644 --- a/doc/source/templates/testing/compute_configs/cpu/aws.yaml +++ b/doc/source/templates/testing/compute_configs/cpu/aws.yaml @@ -13,7 +13,7 @@ worker_node_types: max_workers: 7 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/doc/source/templates/testing/compute_configs/gpu/aws.yaml b/doc/source/templates/testing/compute_configs/gpu/aws.yaml index 240dbea0e19e..d27020b4af2e 100644 --- a/doc/source/templates/testing/compute_configs/gpu/aws.yaml +++ b/doc/source/templates/testing/compute_configs/gpu/aws.yaml @@ -13,7 +13,7 @@ worker_node_types: max_workers: 3 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/python/ray/autoscaler/aws/tests/aws_compute.yaml b/python/ray/autoscaler/aws/tests/aws_compute.yaml index 1ef4e02ba1e8..8bf740d8eeed 100644 --- a/python/ray/autoscaler/aws/tests/aws_compute.yaml +++ b/python/ray/autoscaler/aws/tests/aws_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: IamInstanceProfile: {"Name": "ray-autoscaler-v1"} head_node_type: diff --git a/python/ray/autoscaler/gcp/tests/single_node_32_cpu_gce.yaml b/python/ray/autoscaler/gcp/tests/single_node_32_cpu_gce.yaml index c6d1a6729fa0..466d7fe8602c 100644 --- a/python/ray/autoscaler/gcp/tests/single_node_32_cpu_gce.yaml +++ b/python/ray/autoscaler/gcp/tests/single_node_32_cpu_gce.yaml @@ -19,7 +19,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_compute_aws.yaml b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_compute_aws.yaml index 7966578a31b1..9e6cabef573d 100644 --- a/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_compute_aws.yaml +++ b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_compute_aws.yaml @@ -12,7 +12,7 @@ worker_node_types: max_workers: 15 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_aws.yaml b/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_aws.yaml index e315fc0b9f88..6ed2aa738ed9 100644 --- a/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_aws.yaml +++ b/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_aws.yaml @@ -12,7 +12,7 @@ worker_node_types: max_workers: 7 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_gce.yaml b/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_gce.yaml index 7be4f970f0b1..be93c6d0aac6 100644 --- a/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_gce.yaml +++ b/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_gce.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 7 use_spot: false -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/air_examples/opt_deepspeed_batch_inference/30b_deepspeed_compute.yaml b/release/air_examples/opt_deepspeed_batch_inference/30b_deepspeed_compute.yaml index 1ac93d59eb91..a2b79c9cc489 100644 --- a/release/air_examples/opt_deepspeed_batch_inference/30b_deepspeed_compute.yaml +++ b/release/air_examples/opt_deepspeed_batch_inference/30b_deepspeed_compute.yaml @@ -7,7 +7,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/air_examples/vicuna_13b_lightning_deepspeed_finetuning/vicuna_13b_deepspeed_compute_aws.yaml b/release/air_examples/vicuna_13b_lightning_deepspeed_finetuning/vicuna_13b_deepspeed_compute_aws.yaml index 17f69c81a906..3fe5ec7c083e 100644 --- a/release/air_examples/vicuna_13b_lightning_deepspeed_finetuning/vicuna_13b_deepspeed_compute_aws.yaml +++ b/release/air_examples/vicuna_13b_lightning_deepspeed_finetuning/vicuna_13b_deepspeed_compute_aws.yaml @@ -12,7 +12,7 @@ worker_node_types: max_workers: 15 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/air_tests/air_benchmarks/compute_gpu_1_aws.yaml b/release/air_tests/air_benchmarks/compute_gpu_1_aws.yaml index df7c2a8958a0..150990710680 100644 --- a/release/air_tests/air_benchmarks/compute_gpu_1_aws.yaml +++ b/release/air_tests/air_benchmarks/compute_gpu_1_aws.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml b/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml index ee7d1436e7cf..c543315e24f3 100644 --- a/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml +++ b/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml @@ -14,7 +14,7 @@ worker_node_types: min_workers: 3 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/air_tests/air_benchmarks/mlperf-train/compute_cpu_16.yaml b/release/air_tests/air_benchmarks/mlperf-train/compute_cpu_16.yaml index be1577e57401..b45a2c038d78 100644 --- a/release/air_tests/air_benchmarks/mlperf-train/compute_cpu_16.yaml +++ b/release/air_tests/air_benchmarks/mlperf-train/compute_cpu_16.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/air_tests/horovod/compute_tpl_aws.yaml b/release/air_tests/horovod/compute_tpl_aws.yaml index d14997e25f0b..2ef09f059167 100644 --- a/release/air_tests/horovod/compute_tpl_aws.yaml +++ b/release/air_tests/horovod/compute_tpl_aws.yaml @@ -15,7 +15,7 @@ worker_node_types: min_workers: 1 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/benchmark-worker-startup/only_head_node_1gpu_64cpu.yaml b/release/benchmark-worker-startup/only_head_node_1gpu_64cpu.yaml index cddae87016f6..f377139d6f22 100644 --- a/release/benchmark-worker-startup/only_head_node_1gpu_64cpu.yaml +++ b/release/benchmark-worker-startup/only_head_node_1gpu_64cpu.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 0 -aws: +advanced_configurations_json: # Fix the volume size so that IOPS is constant even if the default changes. BlockDeviceMappings: - DeviceName: /dev/sda1 diff --git a/release/benchmarks/distributed/many_nodes_tests/compute_config.yaml b/release/benchmarks/distributed/many_nodes_tests/compute_config.yaml index 47f435a873ac..7f1e59497146 100644 --- a/release/benchmarks/distributed/many_nodes_tests/compute_config.yaml +++ b/release/benchmarks/distributed/many_nodes_tests/compute_config.yaml @@ -4,7 +4,7 @@ region: us-west-2 # NFS needs to be disabled for this test, since the test spawns too many nodes # and may hit the limit on the # of clients. -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/benchmarks/single_node.yaml b/release/benchmarks/single_node.yaml index 94297cbfbb8e..d94ea397f073 100644 --- a/release/benchmarks/single_node.yaml +++ b/release/benchmarks/single_node.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 0 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/dashboard/agent_stress_compute.yaml b/release/dashboard/agent_stress_compute.yaml index 340b63778529..5e3859905a07 100644 --- a/release/dashboard/agent_stress_compute.yaml +++ b/release/dashboard/agent_stress_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/jobs_tests/compute_tpl_4_xlarge.yaml b/release/jobs_tests/compute_tpl_4_xlarge.yaml index 7da54ffb0d6c..5f21711662d7 100644 --- a/release/jobs_tests/compute_tpl_4_xlarge.yaml +++ b/release/jobs_tests/compute_tpl_4_xlarge.yaml @@ -16,7 +16,7 @@ worker_node_types: max_workers: 4 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/jobs_tests/compute_tpl_gce_4_xlarge.yaml b/release/jobs_tests/compute_tpl_gce_4_xlarge.yaml index b2c81c32c83c..176c282a50f5 100644 --- a/release/jobs_tests/compute_tpl_gce_4_xlarge.yaml +++ b/release/jobs_tests/compute_tpl_gce_4_xlarge.yaml @@ -1,6 +1,6 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west1 -allowed_azs: +allowed_azs: - us-west1-c max_workers: 4 @@ -16,7 +16,7 @@ worker_node_types: max_workers: 4 use_spot: false -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/jobs_tests/compute_tpl_gpu_node.yaml b/release/jobs_tests/compute_tpl_gpu_node.yaml index 36a2a3e0ce8c..27700e794664 100644 --- a/release/jobs_tests/compute_tpl_gpu_node.yaml +++ b/release/jobs_tests/compute_tpl_gpu_node.yaml @@ -13,7 +13,7 @@ worker_node_types: max_workers: 1 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/jobs_tests/compute_tpl_gpu_worker.yaml b/release/jobs_tests/compute_tpl_gpu_worker.yaml index 15955339513f..a98edd4945dc 100644 --- a/release/jobs_tests/compute_tpl_gpu_worker.yaml +++ b/release/jobs_tests/compute_tpl_gpu_worker.yaml @@ -13,7 +13,7 @@ worker_node_types: max_workers: 1 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/k8s_tests/compute_tpl.yaml b/release/k8s_tests/compute_tpl.yaml index c29a684f9efd..a5a788a2eec7 100644 --- a/release/k8s_tests/compute_tpl.yaml +++ b/release/k8s_tests/compute_tpl.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_distributed_tests/compute_tpl.yaml b/release/long_running_distributed_tests/compute_tpl.yaml index 1fe5db1b9f87..68c144d651d3 100644 --- a/release/long_running_distributed_tests/compute_tpl.yaml +++ b/release/long_running_distributed_tests/compute_tpl.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 2 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_tests/many_ppo.yaml b/release/long_running_tests/many_ppo.yaml index 63c8145d992e..941dd0fa091d 100644 --- a/release/long_running_tests/many_ppo.yaml +++ b/release/long_running_tests/many_ppo.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_tests/many_ppo_gce.yaml b/release/long_running_tests/many_ppo_gce.yaml index 7be96bd65462..9981b725f770 100644 --- a/release/long_running_tests/many_ppo_gce.yaml +++ b/release/long_running_tests/many_ppo_gce.yaml @@ -11,7 +11,7 @@ head_node_type: worker_node_types: [] -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/long_running_tests/tpl_cpu_1.yaml b/release/long_running_tests/tpl_cpu_1.yaml index 0798e51be0d3..f09553f38347 100644 --- a/release/long_running_tests/tpl_cpu_1.yaml +++ b/release/long_running_tests/tpl_cpu_1.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_tests/tpl_cpu_1_c5.yaml b/release/long_running_tests/tpl_cpu_1_c5.yaml index 6711e8ea7b66..d6cd546b8951 100644 --- a/release/long_running_tests/tpl_cpu_1_c5.yaml +++ b/release/long_running_tests/tpl_cpu_1_c5.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_tests/tpl_cpu_1_c5_gce.yaml b/release/long_running_tests/tpl_cpu_1_c5_gce.yaml index 1f6a428ca7ec..52c35b2508e6 100644 --- a/release/long_running_tests/tpl_cpu_1_c5_gce.yaml +++ b/release/long_running_tests/tpl_cpu_1_c5_gce.yaml @@ -11,7 +11,7 @@ head_node_type: worker_node_types: [] -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/long_running_tests/tpl_cpu_1_gce.yaml b/release/long_running_tests/tpl_cpu_1_gce.yaml index 807ac26cac60..48eef743adfb 100644 --- a/release/long_running_tests/tpl_cpu_1_gce.yaml +++ b/release/long_running_tests/tpl_cpu_1_gce.yaml @@ -11,7 +11,7 @@ head_node_type: worker_node_types: [] -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/long_running_tests/tpl_cpu_1_large.yaml b/release/long_running_tests/tpl_cpu_1_large.yaml index ebe2058ab8b9..87e8548fc087 100644 --- a/release/long_running_tests/tpl_cpu_1_large.yaml +++ b/release/long_running_tests/tpl_cpu_1_large.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_tests/tpl_cpu_1_large_gce.yaml b/release/long_running_tests/tpl_cpu_1_large_gce.yaml index f9392a87032a..93c00da4d01e 100644 --- a/release/long_running_tests/tpl_cpu_1_large_gce.yaml +++ b/release/long_running_tests/tpl_cpu_1_large_gce.yaml @@ -11,7 +11,7 @@ head_node_type: worker_node_types: [] -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/long_running_tests/tpl_cpu_2.yaml b/release/long_running_tests/tpl_cpu_2.yaml index 7e249f7ec82e..94b54c63b6d1 100644 --- a/release/long_running_tests/tpl_cpu_2.yaml +++ b/release/long_running_tests/tpl_cpu_2.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 1 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_tests/tpl_cpu_3.yaml b/release/long_running_tests/tpl_cpu_3.yaml index 54ac50b78e38..4821923fe71c 100644 --- a/release/long_running_tests/tpl_cpu_3.yaml +++ b/release/long_running_tests/tpl_cpu_3.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 2 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_tests/tpl_cpu_3_gce.yaml b/release/long_running_tests/tpl_cpu_3_gce.yaml index e08b73838512..c9d24ec1dd71 100644 --- a/release/long_running_tests/tpl_cpu_3_gce.yaml +++ b/release/long_running_tests/tpl_cpu_3_gce.yaml @@ -16,7 +16,7 @@ worker_node_types: max_workers: 2 use_spot: false -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/long_running_tests/tpl_cpu_4.yaml b/release/long_running_tests/tpl_cpu_4.yaml index c08501e94c67..43e2adbdad8a 100644 --- a/release/long_running_tests/tpl_cpu_4.yaml +++ b/release/long_running_tests/tpl_cpu_4.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 3 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/long_running_tests/tpl_cpu_4_gce.yaml b/release/long_running_tests/tpl_cpu_4_gce.yaml index 4525893848e1..3a56f551ccc3 100644 --- a/release/long_running_tests/tpl_cpu_4_gce.yaml +++ b/release/long_running_tests/tpl_cpu_4_gce.yaml @@ -16,7 +16,7 @@ worker_node_types: max_workers: 3 use_spot: false -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/ml_user_tests/horovod/compute_tpl_aws.yaml b/release/ml_user_tests/horovod/compute_tpl_aws.yaml index 6c518ba272c1..61999ce38e8c 100644 --- a/release/ml_user_tests/horovod/compute_tpl_aws.yaml +++ b/release/ml_user_tests/horovod/compute_tpl_aws.yaml @@ -14,7 +14,7 @@ worker_node_types: min_workers: 3 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/ml_user_tests/horovod/compute_tpl_gce.yaml b/release/ml_user_tests/horovod/compute_tpl_gce.yaml index 2cad8d220fba..d2d46997fd4f 100644 --- a/release/ml_user_tests/horovod/compute_tpl_gce.yaml +++ b/release/ml_user_tests/horovod/compute_tpl_gce.yaml @@ -16,7 +16,7 @@ worker_node_types: min_workers: 3 use_spot: false -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/ml_user_tests/tune_rllib/compute_tpl_aws.yaml b/release/ml_user_tests/tune_rllib/compute_tpl_aws.yaml index c4166af67959..376fd90539c7 100644 --- a/release/ml_user_tests/tune_rllib/compute_tpl_aws.yaml +++ b/release/ml_user_tests/tune_rllib/compute_tpl_aws.yaml @@ -20,7 +20,7 @@ worker_node_types: max_workers: 2 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/chaos_test/compute_template.yaml b/release/nightly_tests/chaos_test/compute_template.yaml index 4421ed956e29..f91504fb6937 100644 --- a/release/nightly_tests/chaos_test/compute_template.yaml +++ b/release/nightly_tests/chaos_test/compute_template.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: IamInstanceProfile: {"Name": "ray-autoscaler-v1"} head_node_type: diff --git a/release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml b/release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml index 80c5ea1325da..7bc19c0bf2f9 100644 --- a/release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml +++ b/release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml b/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml index e249486f0377..838abd890c33 100644 --- a/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml +++ b/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/dask_on_ray/dask_on_ray_sort_compute_template.yaml b/release/nightly_tests/dask_on_ray/dask_on_ray_sort_compute_template.yaml index da67eec060c4..e87043b3d435 100644 --- a/release/nightly_tests/dask_on_ray/dask_on_ray_sort_compute_template.yaml +++ b/release/nightly_tests/dask_on_ray/dask_on_ray_sort_compute_template.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute.yaml b/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute.yaml index e249486f0377..838abd890c33 100644 --- a/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute.yaml +++ b/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute_k8s.yaml b/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute_k8s.yaml index 387ce28c725a..6e891770737b 100644 --- a/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute_k8s.yaml +++ b/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute_k8s.yaml @@ -1,7 +1,7 @@ cloud_id: cld_HSrCZdMCYDe1NmMCJhYRgQ4p region: us-west-2 -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/nightly_tests/dask_on_ray/large_scale_dask_on_ray_compute_template.yaml b/release/nightly_tests/dask_on_ray/large_scale_dask_on_ray_compute_template.yaml index aea2d4f78b07..67838b838f5d 100644 --- a/release/nightly_tests/dask_on_ray/large_scale_dask_on_ray_compute_template.yaml +++ b/release/nightly_tests/dask_on_ray/large_scale_dask_on_ray_compute_template.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/dataset/compute_gpu_4x4_aws.yaml b/release/nightly_tests/dataset/compute_gpu_4x4_aws.yaml index df0eb98234ea..e56edf8bbf28 100644 --- a/release/nightly_tests/dataset/compute_gpu_4x4_aws.yaml +++ b/release/nightly_tests/dataset/compute_gpu_4x4_aws.yaml @@ -14,7 +14,7 @@ worker_node_types: min_workers: 3 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/dataset/pipelined_ingestion_compute.yaml b/release/nightly_tests/dataset/pipelined_ingestion_compute.yaml index b8b25b2def6c..0ed874893d1d 100644 --- a/release/nightly_tests/dataset/pipelined_ingestion_compute.yaml +++ b/release/nightly_tests/dataset/pipelined_ingestion_compute.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 999 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/dataset/shuffle_compute.yaml b/release/nightly_tests/dataset/shuffle_compute.yaml index eb7aacc0b8e7..b776f7edfa4c 100644 --- a/release/nightly_tests/dataset/shuffle_compute.yaml +++ b/release/nightly_tests/dataset/shuffle_compute.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 999 -aws: +advanced_configurations_json: IamInstanceProfile: {"Name": "ray-autoscaler-v1"} BlockDeviceMappings: - DeviceName: /dev/sda1 diff --git a/release/nightly_tests/decision_tree/autoscaling_compute.yaml b/release/nightly_tests/decision_tree/autoscaling_compute.yaml index 4eb361e1bcae..3031267a1bd8 100644 --- a/release/nightly_tests/decision_tree/autoscaling_compute.yaml +++ b/release/nightly_tests/decision_tree/autoscaling_compute.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 10 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/decision_tree/autoscaling_compute_gce.yaml b/release/nightly_tests/decision_tree/autoscaling_compute_gce.yaml index b54d1d50f812..5b7072d5b30f 100644 --- a/release/nightly_tests/decision_tree/autoscaling_compute_gce.yaml +++ b/release/nightly_tests/decision_tree/autoscaling_compute_gce.yaml @@ -1,11 +1,11 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west1 -allowed_azs: +allowed_azs: - us-west1-c max_workers: 10 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/nightly_tests/placement_group_tests/compute.yaml b/release/nightly_tests/placement_group_tests/compute.yaml index d0fe68b4c17c..3baa53f9f36c 100644 --- a/release/nightly_tests/placement_group_tests/compute.yaml +++ b/release/nightly_tests/placement_group_tests/compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: @@ -20,7 +20,7 @@ worker_node_types: use_spot: false - name: fake_gpu_node instance_type: m5.4xlarge - min_workers: 0 + min_workers: 0 max_workers: 2 use_spot: false resources: diff --git a/release/nightly_tests/placement_group_tests/long_running_test_compute.yaml b/release/nightly_tests/placement_group_tests/long_running_test_compute.yaml index cc3005c09c5b..d990178123f0 100644 --- a/release/nightly_tests/placement_group_tests/long_running_test_compute.yaml +++ b/release/nightly_tests/placement_group_tests/long_running_test_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/placement_group_tests/pg_perf_test_compute.yaml b/release/nightly_tests/placement_group_tests/pg_perf_test_compute.yaml index 8764e0f6c4df..a3e1852cc568 100644 --- a/release/nightly_tests/placement_group_tests/pg_perf_test_compute.yaml +++ b/release/nightly_tests/placement_group_tests/pg_perf_test_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/shuffle/100tb_shuffle_compute.yaml b/release/nightly_tests/shuffle/100tb_shuffle_compute.yaml index 14a02cfa030e..17ed94f5d623 100644 --- a/release/nightly_tests/shuffle/100tb_shuffle_compute.yaml +++ b/release/nightly_tests/shuffle/100tb_shuffle_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/shuffle/datasets_large_scale_compute_small_instances.yaml b/release/nightly_tests/shuffle/datasets_large_scale_compute_small_instances.yaml index b6f95b050839..f6a658058a5e 100644 --- a/release/nightly_tests/shuffle/datasets_large_scale_compute_small_instances.yaml +++ b/release/nightly_tests/shuffle/datasets_large_scale_compute_small_instances.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/shuffle/shuffle_compute_autoscaling.yaml b/release/nightly_tests/shuffle/shuffle_compute_autoscaling.yaml index 38091a3f12b6..57653d15408a 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_autoscaling.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_autoscaling.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/shuffle/shuffle_compute_autoscaling_gce.yaml b/release/nightly_tests/shuffle/shuffle_compute_autoscaling_gce.yaml index 4a7af41b667c..30d141e8b544 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_autoscaling_gce.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_autoscaling_gce.yaml @@ -1,9 +1,9 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west1 -allowed_azs: +allowed_azs: - us-west1-c -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/nightly_tests/shuffle/shuffle_compute_large_scale.yaml b/release/nightly_tests/shuffle/shuffle_compute_large_scale.yaml index 27268db12a0d..7b2a779f6cd0 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_large_scale.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_large_scale.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/shuffle/shuffle_compute_multi.yaml b/release/nightly_tests/shuffle/shuffle_compute_multi.yaml index a726988aeda0..841d019c9545 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_multi.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_multi.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 3 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/shuffle/shuffle_compute_multi_gce.yaml b/release/nightly_tests/shuffle/shuffle_compute_multi_gce.yaml index 1332e3390e97..ff95d850892d 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_multi_gce.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_multi_gce.yaml @@ -1,11 +1,11 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west1 -allowed_azs: +allowed_azs: - us-west1-c max_workers: 3 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/nightly_tests/shuffle/shuffle_compute_single.yaml b/release/nightly_tests/shuffle/shuffle_compute_single.yaml index df8d84edc81f..16b4bec73a91 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_single.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_single.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 0 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/stress_tests/placement_group_tests_compute.yaml b/release/nightly_tests/stress_tests/placement_group_tests_compute.yaml index 280587323949..9b5476d95624 100644 --- a/release/nightly_tests/stress_tests/placement_group_tests_compute.yaml +++ b/release/nightly_tests/stress_tests/placement_group_tests_compute.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 5 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/stress_tests/placement_group_tests_compute_gce.yaml b/release/nightly_tests/stress_tests/placement_group_tests_compute_gce.yaml index 0e0285848708..b22a790f1b18 100644 --- a/release/nightly_tests/stress_tests/placement_group_tests_compute_gce.yaml +++ b/release/nightly_tests/stress_tests/placement_group_tests_compute_gce.yaml @@ -1,11 +1,11 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west1 -allowed_azs: +allowed_azs: - us-west1-c max_workers: 5 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: @@ -25,7 +25,7 @@ worker_node_types: max_workers: 5 use_spot: false resources: - cpu: 2 + cpu: 2 custom_resources: pg_custom: 666 diff --git a/release/nightly_tests/stress_tests/smoke_test_compute.yaml b/release/nightly_tests/stress_tests/smoke_test_compute.yaml index bc1844203b9f..9ae9ea54cfe4 100644 --- a/release/nightly_tests/stress_tests/smoke_test_compute.yaml +++ b/release/nightly_tests/stress_tests/smoke_test_compute.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 4 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/stress_tests/stress_test_threaded_actor_compute.yaml b/release/nightly_tests/stress_tests/stress_test_threaded_actor_compute.yaml index 93a576e66333..88ba8049b087 100644 --- a/release/nightly_tests/stress_tests/stress_test_threaded_actor_compute.yaml +++ b/release/nightly_tests/stress_tests/stress_test_threaded_actor_compute.yaml @@ -1,7 +1,7 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/stress_tests/stress_tests_compute.yaml b/release/nightly_tests/stress_tests/stress_tests_compute.yaml index abede244ae28..9b85c0723e98 100644 --- a/release/nightly_tests/stress_tests/stress_tests_compute.yaml +++ b/release/nightly_tests/stress_tests/stress_tests_compute.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 100 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/nightly_tests/stress_tests/stress_tests_compute_large.yaml b/release/nightly_tests/stress_tests/stress_tests_compute_large.yaml index 4d8d2fdeef36..53aa4e77c3d6 100644 --- a/release/nightly_tests/stress_tests/stress_tests_compute_large.yaml +++ b/release/nightly_tests/stress_tests/stress_tests_compute_large.yaml @@ -3,7 +3,7 @@ region: us-west-2 max_workers: 6 -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/ray_release/cluster_manager/cluster_manager.py b/release/ray_release/cluster_manager/cluster_manager.py index fac34cc00eb6..3f42da467f91 100644 --- a/release/ray_release/cluster_manager/cluster_manager.py +++ b/release/ray_release/cluster_manager/cluster_manager.py @@ -108,8 +108,13 @@ def _annotate_cluster_compute( return cluster_compute cluster_compute = cluster_compute.copy() - aws = cluster_compute.get("aws", {}) - cluster_compute["aws"] = add_tags_to_aws_config( + if "aws" in cluster_compute: + raise ValueError( + "aws field is invalid in compute config, " + "use advanced_configurations_json instead" + ) + aws = cluster_compute.get("advanced_configurations_json", {}) + cluster_compute["advanced_configurations_json"] = add_tags_to_aws_config( aws, extra_tags, RELEASE_AWS_RESOURCE_TYPES_TO_TRACK_FOR_BILLING ) return cluster_compute diff --git a/release/ray_release/tests/test_cluster_manager.py b/release/ray_release/tests/test_cluster_manager.py index 170d9b71c1cb..1b652754a96f 100644 --- a/release/ray_release/tests/test_cluster_manager.py +++ b/release/ray_release/tests/test_cluster_manager.py @@ -275,19 +275,20 @@ def testClusterComputeExtraTags(self): # All ResourceTypes as in # ray_release.aws.RELEASE_AWS_RESOURCE_TYPES_TO_TRACK_FOR_BILLING target_cluster_compute = TEST_CLUSTER_COMPUTE.copy() - target_cluster_compute["aws"] = { + target_cluster_compute["advanced_configurations_json"] = { "TagSpecifications": [ {"ResourceType": "instance", "Tags": [{"Key": "foo", "Value": "bar"}]}, {"ResourceType": "volume", "Tags": [{"Key": "foo", "Value": "bar"}]}, ] } self.assertEqual( - self.cluster_manager.cluster_compute["aws"], target_cluster_compute["aws"] + self.cluster_manager.cluster_compute["advanced_configurations_json"], + target_cluster_compute["advanced_configurations_json"], ) # Test merging with already existing tags cluster_compute_with_tags = TEST_CLUSTER_COMPUTE.copy() - cluster_compute_with_tags["aws"] = { + cluster_compute_with_tags["advanced_configurations_json"] = { "TagSpecifications": [ {"ResourceType": "fake", "Tags": []}, {"ResourceType": "instance", "Tags": [{"Key": "key", "Value": "val"}]}, @@ -299,7 +300,7 @@ def testClusterComputeExtraTags(self): # All ResourceTypes as in RELEASE_AWS_RESOURCE_TYPES_TO_TRACK_FOR_BILLING target_cluster_compute = TEST_CLUSTER_COMPUTE.copy() - target_cluster_compute["aws"] = { + target_cluster_compute["advanced_configurations_json"] = { "TagSpecifications": [ {"ResourceType": "fake", "Tags": []}, { @@ -313,7 +314,8 @@ def testClusterComputeExtraTags(self): ] } self.assertEqual( - self.cluster_manager.cluster_compute["aws"], target_cluster_compute["aws"] + self.cluster_manager.cluster_compute["advanced_configurations_json"], + target_cluster_compute["advanced_configurations_json"], ) @patch("time.sleep", lambda *a, **kw: None) diff --git a/release/rllib_tests/1gpu_16cpus.yaml b/release/rllib_tests/1gpu_16cpus.yaml index 2a0cdea1c0b3..1b11511cdcb8 100644 --- a/release/rllib_tests/1gpu_16cpus.yaml +++ b/release/rllib_tests/1gpu_16cpus.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/1gpu_16cpus_gce.yaml b/release/rllib_tests/1gpu_16cpus_gce.yaml index f0ad9d505d4a..26b262ab8bed 100644 --- a/release/rllib_tests/1gpu_16cpus_gce.yaml +++ b/release/rllib_tests/1gpu_16cpus_gce.yaml @@ -19,7 +19,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/1gpu_24cpus.yaml b/release/rllib_tests/1gpu_24cpus.yaml index af4def71489d..d8d8cb4b866a 100644 --- a/release/rllib_tests/1gpu_24cpus.yaml +++ b/release/rllib_tests/1gpu_24cpus.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 1 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/1gpu_24cpus_gce.yaml b/release/rllib_tests/1gpu_24cpus_gce.yaml index ec79552e4984..11e5dc4283d6 100644 --- a/release/rllib_tests/1gpu_24cpus_gce.yaml +++ b/release/rllib_tests/1gpu_24cpus_gce.yaml @@ -24,7 +24,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/1gpu_32cpus.yaml b/release/rllib_tests/1gpu_32cpus.yaml index 660791a6cc2f..d1b1349f284f 100644 --- a/release/rllib_tests/1gpu_32cpus.yaml +++ b/release/rllib_tests/1gpu_32cpus.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/1gpu_4cpus.yaml b/release/rllib_tests/1gpu_4cpus.yaml index fa1b042a9260..dcc8baf66e9b 100644 --- a/release/rllib_tests/1gpu_4cpus.yaml +++ b/release/rllib_tests/1gpu_4cpus.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/1gpu_4cpus_gce.yaml b/release/rllib_tests/1gpu_4cpus_gce.yaml index b239d699f91c..7613f5062cfa 100644 --- a/release/rllib_tests/1gpu_4cpus_gce.yaml +++ b/release/rllib_tests/1gpu_4cpus_gce.yaml @@ -19,7 +19,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/2gpus_32cpus.yaml b/release/rllib_tests/2gpus_32cpus.yaml index 1c74596c4c8a..02065ef9dc8f 100644 --- a/release/rllib_tests/2gpus_32cpus.yaml +++ b/release/rllib_tests/2gpus_32cpus.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/2gpus_32cpus_gce.yaml b/release/rllib_tests/2gpus_32cpus_gce.yaml index 7086e8d82fbf..fe56a4b11161 100644 --- a/release/rllib_tests/2gpus_32cpus_gce.yaml +++ b/release/rllib_tests/2gpus_32cpus_gce.yaml @@ -24,7 +24,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/2gpus_64cpus.yaml b/release/rllib_tests/2gpus_64cpus.yaml index 67392db81700..bd7f534c1fdf 100644 --- a/release/rllib_tests/2gpus_64cpus.yaml +++ b/release/rllib_tests/2gpus_64cpus.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 1 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/2gpus_64cpus_gce.yaml b/release/rllib_tests/2gpus_64cpus_gce.yaml index 825124fabbad..484cbf999bd8 100644 --- a/release/rllib_tests/2gpus_64cpus_gce.yaml +++ b/release/rllib_tests/2gpus_64cpus_gce.yaml @@ -28,7 +28,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/32cpus.yaml b/release/rllib_tests/32cpus.yaml index f1e092047f78..d67b810f66e1 100644 --- a/release/rllib_tests/32cpus.yaml +++ b/release/rllib_tests/32cpus.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/32cpus_gce.yaml b/release/rllib_tests/32cpus_gce.yaml index c6d1a6729fa0..466d7fe8602c 100644 --- a/release/rllib_tests/32cpus_gce.yaml +++ b/release/rllib_tests/32cpus_gce.yaml @@ -19,7 +19,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/4gpus_512_cpus_gce.yaml b/release/rllib_tests/4gpus_512_cpus_gce.yaml index a42e4805795f..92ddfdde89e7 100644 --- a/release/rllib_tests/4gpus_512_cpus_gce.yaml +++ b/release/rllib_tests/4gpus_512_cpus_gce.yaml @@ -24,7 +24,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/4gpus_544_cpus.yaml b/release/rllib_tests/4gpus_544_cpus.yaml index dd30af32c19a..c4559737cc17 100644 --- a/release/rllib_tests/4gpus_544_cpus.yaml +++ b/release/rllib_tests/4gpus_544_cpus.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 5 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/4gpus_64cpus.yaml b/release/rllib_tests/4gpus_64cpus.yaml index c0f4c76ee300..60d03f122263 100644 --- a/release/rllib_tests/4gpus_64cpus.yaml +++ b/release/rllib_tests/4gpus_64cpus.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 1 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/4gpus_64cpus_gce.yaml b/release/rllib_tests/4gpus_64cpus_gce.yaml index 82b95a8b4fdc..a4453843a482 100644 --- a/release/rllib_tests/4gpus_64cpus_gce.yaml +++ b/release/rllib_tests/4gpus_64cpus_gce.yaml @@ -19,7 +19,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/4gpus_96cpus.yaml b/release/rllib_tests/4gpus_96cpus.yaml index 80916596a054..e699e2588b8b 100644 --- a/release/rllib_tests/4gpus_96cpus.yaml +++ b/release/rllib_tests/4gpus_96cpus.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/4gpus_96cpus_gce.yaml b/release/rllib_tests/4gpus_96cpus_gce.yaml index 5d1e5b00182c..df83eba5f0f7 100644 --- a/release/rllib_tests/4gpus_96cpus_gce.yaml +++ b/release/rllib_tests/4gpus_96cpus_gce.yaml @@ -19,7 +19,7 @@ gcp_advanced_configurations_json: initialize_params: disk_size_gb: 500 -#aws: +#advanced_configurations_json: # BlockDeviceMappings: # - DeviceName: /dev/sda1 # Ebs: diff --git a/release/rllib_tests/8gpus_96cpus.yaml b/release/rllib_tests/8gpus_96cpus.yaml index d9509c85dbda..614944b6d20c 100644 --- a/release/rllib_tests/8gpus_96cpus.yaml +++ b/release/rllib_tests/8gpus_96cpus.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/rllib_tests/multi_node_checkpointing_compute_config.yaml b/release/rllib_tests/multi_node_checkpointing_compute_config.yaml index 60784554811e..36f37a8738c7 100644 --- a/release/rllib_tests/multi_node_checkpointing_compute_config.yaml +++ b/release/rllib_tests/multi_node_checkpointing_compute_config.yaml @@ -14,7 +14,7 @@ worker_node_types: max_workers: 2 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/serve_tests/compute_tpl_32_cpu.yaml b/release/serve_tests/compute_tpl_32_cpu.yaml index adcb0ee49569..442ba4222534 100644 --- a/release/serve_tests/compute_tpl_32_cpu.yaml +++ b/release/serve_tests/compute_tpl_32_cpu.yaml @@ -20,7 +20,7 @@ worker_node_types: custom_resources: worker: 1 -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/serve_tests/compute_tpl_32_cpu_autoscaling.yaml b/release/serve_tests/compute_tpl_32_cpu_autoscaling.yaml index 001ff48fef96..c65682bf76b8 100644 --- a/release/serve_tests/compute_tpl_32_cpu_autoscaling.yaml +++ b/release/serve_tests/compute_tpl_32_cpu_autoscaling.yaml @@ -20,7 +20,7 @@ worker_node_types: max_workers: 35 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/serve_tests/compute_tpl_8_cpu_autoscaling.yaml b/release/serve_tests/compute_tpl_8_cpu_autoscaling.yaml index ab5b59006a93..851620488b20 100644 --- a/release/serve_tests/compute_tpl_8_cpu_autoscaling.yaml +++ b/release/serve_tests/compute_tpl_8_cpu_autoscaling.yaml @@ -26,7 +26,7 @@ worker_node_types: custom_resources: proxy: 1 -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/serve_tests/compute_tpl_gpu_node.yaml b/release/serve_tests/compute_tpl_gpu_node.yaml index c1450feb62d4..96f7dc057a45 100644 --- a/release/serve_tests/compute_tpl_gpu_node.yaml +++ b/release/serve_tests/compute_tpl_gpu_node.yaml @@ -13,7 +13,7 @@ worker_node_types: max_workers: 1 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/serve_tests/compute_tpl_single_node.yaml b/release/serve_tests/compute_tpl_single_node.yaml index c9f7f84179a0..5f46c956ab7d 100644 --- a/release/serve_tests/compute_tpl_single_node.yaml +++ b/release/serve_tests/compute_tpl_single_node.yaml @@ -10,7 +10,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/serve_tests/compute_tpl_single_node_32_cpu.yaml b/release/serve_tests/compute_tpl_single_node_32_cpu.yaml index c9e1ec1d0f7f..d40e806ea617 100644 --- a/release/serve_tests/compute_tpl_single_node_32_cpu.yaml +++ b/release/serve_tests/compute_tpl_single_node_32_cpu.yaml @@ -10,7 +10,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/serve_tests/compute_tpl_single_node_k8s.yaml b/release/serve_tests/compute_tpl_single_node_k8s.yaml index bbccdfb95a07..7dd79bb0adf1 100644 --- a/release/serve_tests/compute_tpl_single_node_k8s.yaml +++ b/release/serve_tests/compute_tpl_single_node_k8s.yaml @@ -12,7 +12,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/train_tests/colocate_trainer/compute_aws.yaml b/release/train_tests/colocate_trainer/compute_aws.yaml index e2542caf5000..abe241ebddfd 100644 --- a/release/train_tests/colocate_trainer/compute_aws.yaml +++ b/release/train_tests/colocate_trainer/compute_aws.yaml @@ -14,7 +14,7 @@ worker_node_types: min_workers: 3 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/train_tests/horovod/compute_tpl_aws.yaml b/release/train_tests/horovod/compute_tpl_aws.yaml index 8424f13fbf61..b7065014230c 100644 --- a/release/train_tests/horovod/compute_tpl_aws.yaml +++ b/release/train_tests/horovod/compute_tpl_aws.yaml @@ -15,7 +15,7 @@ worker_node_types: min_workers: 1 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/train_tests/horovod/compute_tpl_gce.yaml b/release/train_tests/horovod/compute_tpl_gce.yaml index 31730aac6e79..59e43741f983 100644 --- a/release/train_tests/horovod/compute_tpl_gce.yaml +++ b/release/train_tests/horovod/compute_tpl_gce.yaml @@ -17,7 +17,7 @@ worker_node_types: min_workers: 1 use_spot: false -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/train_tests/multinode_persistence/compute_aws.yaml b/release/train_tests/multinode_persistence/compute_aws.yaml index ad578f3feba0..a0e4116acafb 100644 --- a/release/train_tests/multinode_persistence/compute_aws.yaml +++ b/release/train_tests/multinode_persistence/compute_aws.yaml @@ -14,7 +14,7 @@ worker_node_types: min_workers: 3 use_spot: false -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/train_tests/xgboost_lightgbm/compute_aws_10workers.yaml b/release/train_tests/xgboost_lightgbm/compute_aws_10workers.yaml index 7888ba7cbec3..55fa05e163f0 100644 --- a/release/train_tests/xgboost_lightgbm/compute_aws_10workers.yaml +++ b/release/train_tests/xgboost_lightgbm/compute_aws_10workers.yaml @@ -17,7 +17,7 @@ worker_node_types: min_workers: 10 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/train_tests/xgboost_lightgbm/compute_aws_1worker.yaml b/release/train_tests/xgboost_lightgbm/compute_aws_1worker.yaml index 28d8b4a66016..8796876a7de5 100644 --- a/release/train_tests/xgboost_lightgbm/compute_aws_1worker.yaml +++ b/release/train_tests/xgboost_lightgbm/compute_aws_1worker.yaml @@ -17,7 +17,7 @@ worker_node_types: min_workers: 1 use_spot: false -aws: +advanced_configurations_json: BlockDeviceMappings: - DeviceName: /dev/sda1 Ebs: diff --git a/release/tune_tests/cloud_tests/tpl_aws_1x4.yaml b/release/tune_tests/cloud_tests/tpl_aws_1x4.yaml index 1672eb48ba78..0307acc970a5 100644 --- a/release/tune_tests/cloud_tests/tpl_aws_1x4.yaml +++ b/release/tune_tests/cloud_tests/tpl_aws_1x4.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/tune_tests/fault_tolerance_tests/tpl_aws_16x1.yaml b/release/tune_tests/fault_tolerance_tests/tpl_aws_16x1.yaml index abe801c950f1..302b975fa5a4 100644 --- a/release/tune_tests/fault_tolerance_tests/tpl_aws_16x1.yaml +++ b/release/tune_tests/fault_tolerance_tests/tpl_aws_16x1.yaml @@ -18,7 +18,7 @@ worker_node_types: use_spot: true # Required to allow nodes to terminate themselves. -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/tune_tests/fault_tolerance_tests/tpl_gce_16x1.yaml b/release/tune_tests/fault_tolerance_tests/tpl_gce_16x1.yaml index dbccfa496b2d..d99976a529e4 100644 --- a/release/tune_tests/fault_tolerance_tests/tpl_gce_16x1.yaml +++ b/release/tune_tests/fault_tolerance_tests/tpl_gce_16x1.yaml @@ -20,7 +20,7 @@ worker_node_types: use_spot: true ## Required to allow nodes to terminate themselves. -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: diff --git a/release/tune_tests/scalability_tests/tpl_1x32_hd.yaml b/release/tune_tests/scalability_tests/tpl_1x32_hd.yaml index 8fed22723702..de26a8b43484 100644 --- a/release/tune_tests/scalability_tests/tpl_1x32_hd.yaml +++ b/release/tune_tests/scalability_tests/tpl_1x32_hd.yaml @@ -9,7 +9,7 @@ head_node_type: worker_node_types: [] -aws: +advanced_configurations_json: TagSpecifications: - ResourceType: "instance" Tags: diff --git a/release/tune_tests/scalability_tests/tpl_gce_1x32_hd.yaml b/release/tune_tests/scalability_tests/tpl_gce_1x32_hd.yaml index 387f47a83437..60ade7f48b9c 100644 --- a/release/tune_tests/scalability_tests/tpl_gce_1x32_hd.yaml +++ b/release/tune_tests/scalability_tests/tpl_gce_1x32_hd.yaml @@ -11,7 +11,7 @@ head_node_type: worker_node_types: [] -#aws: +#advanced_configurations_json: # TagSpecifications: # - ResourceType: "instance" # Tags: From 422499baf2272001d26c6c567999176cef7f6013 Mon Sep 17 00:00:00 2001 From: dentiny Date: Wed, 20 Nov 2024 14:52:57 -0800 Subject: [PATCH 040/107] [core] Minor optimization and thread annotation for store client (#48504) Signed-off-by: hjiang --- doc/source/ray-overview/installation.rst | 6 +-- docker/base-deps/Dockerfile | 2 +- release/perf_metrics/metadata.json | 2 +- src/mock/ray/core_worker/core_worker.h | 26 ++-------- src/ray/core_worker/core_worker.cc | 9 ++-- .../store_client/in_memory_store_client.cc | 49 +++++++++---------- .../gcs/store_client/in_memory_store_client.h | 17 ++++--- 7 files changed, 45 insertions(+), 66 deletions(-) diff --git a/doc/source/ray-overview/installation.rst b/doc/source/ray-overview/installation.rst index 74fde96e48e6..97ff6a53e85a 100644 --- a/doc/source/ray-overview/installation.rst +++ b/doc/source/ray-overview/installation.rst @@ -441,8 +441,8 @@ Install Ray Java with Maven --------------------------- .. note:: - - All Ray Java APIs are experimental and only supported by the community. + + All Ray Java APIs are experimental and only supported by the community. Before installing Ray Java with Maven, you should install Ray Python with `pip install -U ray` . Note that the versions of Ray Java and Ray Python must match. Note that nightly Ray python wheels are also required if you want to install Ray Java snapshot version. @@ -506,7 +506,7 @@ Install Ray C++ .. note:: - All Ray C++ APIs are experimental and only supported by the community. + All Ray C++ APIs are experimental and only supported by the community. You can install and use Ray C++ API as follows. diff --git a/docker/base-deps/Dockerfile b/docker/base-deps/Dockerfile index 496d8a05e58a..ee7d7872dfb0 100644 --- a/docker/base-deps/Dockerfile +++ b/docker/base-deps/Dockerfile @@ -36,7 +36,7 @@ APT_PKGS=( libjemalloc-dev wget cmake - g++ + g++ zlib1g-dev ) if [[ "$AUTOSCALER" == "autoscaler" ]]; then diff --git a/release/perf_metrics/metadata.json b/release/perf_metrics/metadata.json index 2ef9c5cd543e..d4a423494576 100644 --- a/release/perf_metrics/metadata.json +++ b/release/perf_metrics/metadata.json @@ -1 +1 @@ -{"release_version": "2.39.0"} \ No newline at end of file +{"release_version": "2.39.0"} diff --git a/src/mock/ray/core_worker/core_worker.h b/src/mock/ray/core_worker/core_worker.h index 60817fb7af1c..ff1e1d7ab130 100644 --- a/src/mock/ray/core_worker/core_worker.h +++ b/src/mock/ray/core_worker/core_worker.h @@ -14,28 +14,11 @@ #pragma once #include "gmock/gmock.h" #include "mock/ray/gcs/gcs_client/gcs_client.h" -namespace ray { -namespace core { -class MockCoreWorkerOptions : public CoreWorkerOptions { - public: -}; - -} // namespace core -} // namespace ray - -namespace ray { -namespace core { - -class MockCoreWorkerProcess : public CoreWorkerProcess { - public: -}; - -} // namespace core -} // namespace ray +namespace ray::core { -namespace ray { -namespace core { +class MockCoreWorkerOptions : public CoreWorkerOptions {}; +class MockCoreWorkerProcess : public CoreWorkerProcess {}; class MockCoreWorker : public CoreWorker { public: @@ -179,5 +162,4 @@ class MockCoreWorker : public CoreWorker { (override)); }; -} // namespace core -} // namespace ray +} // namespace ray::core diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 622e6be11a84..5dcd799b8936 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -2584,9 +2584,8 @@ Status CoreWorker::CreatePlacementGroup( << ". It is probably " "because GCS server is dead or there's a high load there."; return Status::TimedOut(stream.str()); - } else { - return status; } + return status; } Status CoreWorker::RemovePlacementGroup(const PlacementGroupID &placement_group_id) { @@ -2600,9 +2599,8 @@ Status CoreWorker::RemovePlacementGroup(const PlacementGroupID &placement_group_ << ". It is probably " "because GCS server is dead or there's a high load there."; return Status::TimedOut(stream.str()); - } else { - return status; } + return status; } Status CoreWorker::WaitPlacementGroupReady(const PlacementGroupID &placement_group_id, @@ -2614,9 +2612,8 @@ Status CoreWorker::WaitPlacementGroupReady(const PlacementGroupID &placement_gro stream << "There was timeout in waiting for placement group " << placement_group_id << " creation."; return Status::TimedOut(stream.str()); - } else { - return status; } + return status; } Status CoreWorker::SubmitActorTask( diff --git a/src/ray/gcs/store_client/in_memory_store_client.cc b/src/ray/gcs/store_client/in_memory_store_client.cc index 39306b1254c9..1d1cb3451350 100644 --- a/src/ray/gcs/store_client/in_memory_store_client.cc +++ b/src/ray/gcs/store_client/in_memory_store_client.cc @@ -14,9 +14,7 @@ #include "ray/gcs/store_client/in_memory_store_client.h" -namespace ray { - -namespace gcs { +namespace ray::gcs { Status InMemoryStoreClient::AsyncPut(const std::string &table_name, const std::string &key, @@ -35,9 +33,10 @@ Status InMemoryStoreClient::AsyncPut(const std::string &table_name, table->records_[key] = data; inserted = true; } - if (callback != nullptr) { - main_io_service_.post([callback, inserted]() { callback(inserted); }, - "GcsInMemoryStore.Put"); + if (callback) { + main_io_service_.post( + [callback = std::move(callback), inserted]() { callback(inserted); }, + "GcsInMemoryStore.Put"); } return Status::OK(); } @@ -45,7 +44,7 @@ Status InMemoryStoreClient::AsyncPut(const std::string &table_name, Status InMemoryStoreClient::AsyncGet(const std::string &table_name, const std::string &key, const OptionalItemCallback &callback) { - RAY_CHECK(callback != nullptr); + RAY_CHECK(callback); auto table = GetOrCreateTable(table_name); absl::MutexLock lock(&(table->mutex_)); auto iter = table->records_.find(key); @@ -66,9 +65,10 @@ Status InMemoryStoreClient::AsyncGetAll( const std::string &table_name, const MapCallback &callback) { RAY_CHECK(callback); + auto result = absl::flat_hash_map(); auto table = GetOrCreateTable(table_name); absl::MutexLock lock(&(table->mutex_)); - auto result = absl::flat_hash_map(); + result.reserve(table->records_.size()); result.insert(table->records_.begin(), table->records_.end()); main_io_service_.post( [result = std::move(result), callback]() mutable { callback(std::move(result)); }, @@ -81,10 +81,10 @@ Status InMemoryStoreClient::AsyncMultiGet( const std::vector &keys, const MapCallback &callback) { RAY_CHECK(callback); + auto result = absl::flat_hash_map(); auto table = GetOrCreateTable(table_name); absl::MutexLock lock(&(table->mutex_)); - auto result = absl::flat_hash_map(); - for (auto &key : keys) { + for (const auto &key : keys) { auto it = table->records_.find(key); if (it == table->records_.end()) { continue; @@ -104,7 +104,7 @@ Status InMemoryStoreClient::AsyncDelete(const std::string &table_name, absl::MutexLock lock(&(table->mutex_)); auto num = table->records_.erase(key); if (callback != nullptr) { - main_io_service_.post([callback, num]() { callback(num > 0); }, + main_io_service_.post([callback = std::move(callback), num]() { callback(num > 0); }, "GcsInMemoryStore.Delete"); } return Status::OK(); @@ -120,7 +120,7 @@ Status InMemoryStoreClient::AsyncBatchDelete(const std::string &table_name, num += table->records_.erase(key); } if (callback != nullptr) { - main_io_service_.post([callback, num]() { callback(num); }, + main_io_service_.post([callback = std::move(callback), num]() { callback(num); }, "GcsInMemoryStore.BatchDelete"); } return Status::OK(); @@ -138,11 +138,10 @@ std::shared_ptr InMemoryStoreClient::GetOrCr auto iter = tables_.find(table_name); if (iter != tables_.end()) { return iter->second; - } else { - auto table = std::make_shared(); - tables_[table_name] = table; - return table; } + auto table = std::make_shared(); + tables_[table_name] = table; + return table; } Status InMemoryStoreClient::AsyncGetKeys( @@ -150,16 +149,18 @@ Status InMemoryStoreClient::AsyncGetKeys( const std::string &prefix, std::function)> callback) { RAY_CHECK(callback); - auto table = GetOrCreateTable(table_name); std::vector result; + auto table = GetOrCreateTable(table_name); absl::MutexLock lock(&(table->mutex_)); - for (auto &pair : table->records_) { - if (pair.first.find(prefix) == 0) { - result.push_back(pair.first); + for (const auto &[key, _] : table->records_) { + if (key.find(prefix) == 0) { + result.emplace_back(key); } } main_io_service_.post( - [result = std::move(result), callback]() mutable { callback(std::move(result)); }, + [result = std::move(result), callback = std::move(callback)]() mutable { + callback(std::move(result)); + }, "GcsInMemoryStore.Keys"); return Status::OK(); } @@ -171,11 +172,9 @@ Status InMemoryStoreClient::AsyncExists(const std::string &table_name, auto table = GetOrCreateTable(table_name); absl::MutexLock lock(&(table->mutex_)); bool result = table->records_.contains(key); - main_io_service_.post([result, callback]() mutable { callback(result); }, + main_io_service_.post([result, callback = std::move(callback)]() { callback(result); }, "GcsInMemoryStore.Exists"); return Status::OK(); } -} // namespace gcs - -} // namespace ray +} // namespace ray::gcs diff --git a/src/ray/gcs/store_client/in_memory_store_client.h b/src/ray/gcs/store_client/in_memory_store_client.h index a4ea7bc47ac6..6590d608f833 100644 --- a/src/ray/gcs/store_client/in_memory_store_client.h +++ b/src/ray/gcs/store_client/in_memory_store_client.h @@ -14,15 +14,17 @@ #pragma once +#include +#include +#include + #include "absl/container/flat_hash_map.h" #include "absl/synchronization/mutex.h" #include "ray/common/asio/instrumented_io_context.h" #include "ray/gcs/store_client/store_client.h" #include "src/ray/protobuf/gcs.pb.h" -namespace ray { - -namespace gcs { +namespace ray::gcs { /// \class InMemoryStoreClient /// Please refer to StoreClient for API semantics. @@ -77,7 +79,7 @@ class InMemoryStoreClient : public StoreClient { }; std::shared_ptr GetOrCreateTable( - const std::string &table_name); + const std::string &table_name) ABSL_LOCKS_EXCLUDED(mutex_); /// Mutex to protect the tables_ field. absl::Mutex mutex_; @@ -88,9 +90,8 @@ class InMemoryStoreClient : public StoreClient { /// of the callback. instrumented_io_context &main_io_service_; - int job_id_ = 0; + /// Current job id, auto-increment when request next-id. + int job_id_ ABSL_GUARDED_BY(mutex_) = 0; }; -} // namespace gcs - -} // namespace ray +} // namespace ray::gcs From 0da70aa366c0c4ebdc21647763b06d11ebbad22d Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Wed, 20 Nov 2024 15:05:17 -0800 Subject: [PATCH 041/107] [serve] refactor replica (#48744) ## Why are these changes needed? Pull out implementation of Serve replica into an implementation class `ReplicaBase` / `Replica`, and only keep public actor methods that need to be called from the controller in `ReplicaActor`. `ReplicaActor` calls into the replica implementation for most methods. - most serialization/deserialization is done in `ReplicaActor` - add private methods abstract methods `_on_request_cancelled`, `_on_request_failed`, `_on_initialized`. - make `_wrap_user_method_call` an abstract method that needs to be implemented. --------- Signed-off-by: Cindy Zhang --- python/ray/serve/_private/default_impl.py | 6 + python/ray/serve/_private/replica.py | 499 +++++++++++------- .../serve/tests/test_controller_recovery.py | 4 +- 3 files changed, 307 insertions(+), 202 deletions(-) diff --git a/python/ray/serve/_private/default_impl.py b/python/ray/serve/_private/default_impl.py index f47dfa85c178..489f0aaa25f9 100644 --- a/python/ray/serve/_private/default_impl.py +++ b/python/ray/serve/_private/default_impl.py @@ -56,6 +56,12 @@ def create_deployment_scheduler( ) +def create_replica_impl(**kwargs): + from ray.serve._private.replica import Replica + + return Replica(**kwargs) + + def create_dynamic_handle_options(**kwargs): return DynamicHandleOptions(**kwargs) diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index b90c837b6cc0..3f26136e7bbc 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -7,6 +7,7 @@ import threading import time import traceback +from abc import ABC, abstractmethod from contextlib import contextmanager from functools import wraps from importlib import import_module @@ -42,6 +43,7 @@ SERVE_LOGGER_NAME, SERVE_NAMESPACE, ) +from ray.serve._private.default_impl import create_replica_impl from ray.serve._private.http_util import ( ASGIAppReplicaWrapper, ASGIArgs, @@ -231,31 +233,25 @@ def _add_autoscaling_metrics_point(self) -> None: ) -class ReplicaActor: - """Actor definition for replicas of Ray Serve deployments. - - This class defines the interface that the controller and deployment handles - (i.e., from proxies and other replicas) use to interact with a replica. - +class ReplicaBase(ABC): + """ All interaction with the user-provided callable is done via the `UserCallableWrapper` class. """ - async def __init__( + def __init__( self, replica_id: ReplicaID, - serialized_deployment_def: bytes, - serialized_init_args: bytes, - serialized_init_kwargs: bytes, - deployment_config_proto_bytes: bytes, + deployment_def: Callable, + init_args: Tuple, + init_kwargs: Dict, + deployment_config: DeploymentConfig, version: DeploymentVersion, ): self._version = version self._replica_id = replica_id self._deployment_id = replica_id.deployment_id - self._deployment_config = DeploymentConfig.from_proto_bytes( - deployment_config_proto_bytes - ) + self._deployment_config = deployment_config self._component_name = f"{self._deployment_id.name}" if self._deployment_id.app_name: self._component_name = ( @@ -266,14 +262,10 @@ async def __init__( self._configure_logger_and_profilers(self._deployment_config.logging_config) self._event_loop = get_or_create_event_loop() - deployment_def = cloudpickle.loads(serialized_deployment_def) - if isinstance(deployment_def, str): - deployment_def = _load_deployment_def_from_import_path(deployment_def) - self._user_callable_wrapper = UserCallableWrapper( deployment_def, - cloudpickle.loads(serialized_init_args), - cloudpickle.loads(serialized_init_kwargs), + init_args, + init_kwargs, deployment_id=self._deployment_id, ) @@ -308,6 +300,7 @@ def _set_internal_replica_context(self, *, servable_object: Callable = None): def _configure_logger_and_profilers( self, logging_config: Union[None, Dict, LoggingConfig] ): + if logging_config is None: logging_config = {} if isinstance(logging_config, dict): @@ -330,15 +323,7 @@ def _configure_logger_and_profilers( component_id=self._component_id, ) - def push_proxy_handle(self, handle: ActorHandle): - pass - - def get_num_ongoing_requests(self) -> int: - """Fetch the number of ongoing requests at this replica (queue length). - - This runs on a separate thread (using a Ray concurrency group) so it will - not be blocked by user code. - """ + def get_num_ongoing_requests(self): return self._metrics_manager.get_num_ongoing_requests() def _maybe_get_asgi_route( @@ -375,27 +360,7 @@ def _maybe_get_asgi_route( return route @contextmanager - def _wrap_user_method_call( - self, request_metadata: RequestMetadata, request_args: Tuple[Any] - ): - """Context manager that wraps user method calls. - - 1) Sets the request context var with appropriate metadata. - 2) Records the access log message (if not disabled). - 3) Records per-request metrics via the metrics manager. - """ - route = self._maybe_get_asgi_route(request_metadata, request_args) - ray.serve.context._serve_request_context.set( - ray.serve.context._RequestContext( - route=route, - request_id=request_metadata.request_id, - _internal_request_id=request_metadata.internal_request_id, - app_name=self._deployment_id.app_name, - multiplexed_model_id=request_metadata.multiplexed_model_id, - grpc_context=request_metadata.grpc_context, - ) - ) - + def _handle_errors_and_metrics(self, request_metadata): start_time = time.time() user_exception = None try: @@ -403,20 +368,11 @@ def _wrap_user_method_call( yield except asyncio.CancelledError as e: user_exception = e - - # Recursively cancel child requests - requests_pending_assignment = ( - ray.serve.context._get_requests_pending_assignment( - request_metadata.internal_request_id - ) - ) - for task in requests_pending_assignment.values(): - task.cancel() + self._on_request_cancelled(request_metadata, e) except Exception as e: user_exception = e logger.exception("Request failed.") - if ray.util.pdb._is_ray_debugger_post_mortem_enabled(): - ray.util.pdb._post_mortem() + self._on_request_failed(request_metadata, e) finally: self._metrics_manager.dec_num_ongoing_requests() @@ -437,7 +393,7 @@ def _wrap_user_method_call( extra={"serve_access_log": True}, ) self._metrics_manager.record_request_metrics( - route=route, + route=request_metadata.route, status_str=status_str, latency_ms=latency_ms, was_error=user_exception is not None, @@ -516,13 +472,8 @@ def _enqueue_thread_safe(item: Any): wait_for_message_task.cancel() async def handle_request( - self, - pickled_request_metadata: bytes, - *request_args, - **request_kwargs, + self, request_metadata, *request_args, **request_kwargs ) -> Tuple[bytes, Any]: - """Entrypoint for `stream=False` calls.""" - request_metadata = pickle.loads(pickled_request_metadata) with self._wrap_user_method_call(request_metadata, request_args): return await asyncio.wrap_future( self._user_callable_wrapper.call_user_method( @@ -531,13 +482,8 @@ async def handle_request( ) async def handle_request_streaming( - self, - pickled_request_metadata: bytes, - *request_args, - **request_kwargs, + self, request_metadata, *request_args, **request_kwargs ) -> AsyncGenerator[Any, None]: - """Generator that is the entrypoint for all `stream=True` handle calls.""" - request_metadata = pickle.loads(pickled_request_metadata) with self._wrap_user_method_call(request_metadata, request_args): async for result in self._call_user_generator( request_metadata, @@ -547,24 +493,8 @@ async def handle_request_streaming( yield result async def handle_request_with_rejection( - self, - pickled_request_metadata: bytes, - *request_args, - **request_kwargs, - ) -> AsyncGenerator[Any, None]: - """Entrypoint for all requests with strict max_ongoing_requests enforcement. - - The first response from this generator is always a system message indicating - if the request was accepted (the replica has capacity for the request) or - rejected (the replica is already at max_ongoing_requests). - - For non-streaming requests, there will only be one more message, the unary - result of the user request handler. - - For streaming requests, the subsequent messages will be the results of the - user request handler (which must be a generator). - """ - request_metadata = pickle.loads(pickled_request_metadata) + self, request_metadata, *request_args, **request_kwargs + ): limit = self._deployment_config.max_ongoing_requests num_ongoing_requests = self.get_num_ongoing_requests() if num_ongoing_requests >= limit: @@ -573,21 +503,17 @@ async def handle_request_with_rejection( f"rejecting request {request_metadata.request_id}.", extra={"log_to_stderr": False}, ) - yield pickle.dumps( - ReplicaQueueLengthInfo( - accepted=False, num_ongoing_requests=num_ongoing_requests - ) + yield ReplicaQueueLengthInfo( + accepted=False, num_ongoing_requests=num_ongoing_requests ) return with self._wrap_user_method_call(request_metadata, request_args): - yield pickle.dumps( - ReplicaQueueLengthInfo( - accepted=True, - # NOTE(edoakes): `_wrap_user_method_call` will increment the number - # of ongoing requests to include this one, so re-fetch the value. - num_ongoing_requests=self.get_num_ongoing_requests(), - ) + yield ReplicaQueueLengthInfo( + accepted=True, + # NOTE(edoakes): `_wrap_user_method_call` will increment the number + # of ongoing requests to include this one, so re-fetch the value. + num_ongoing_requests=self.get_num_ongoing_requests(), ) if request_metadata.is_streaming: @@ -604,80 +530,23 @@ async def handle_request_with_rejection( ) ) - async def handle_request_from_java( - self, - proto_request_metadata: bytes, - *request_args, - **request_kwargs, - ) -> Any: - from ray.serve.generated.serve_pb2 import ( - RequestMetadata as RequestMetadataProto, - ) - - proto = RequestMetadataProto.FromString(proto_request_metadata) - request_metadata: RequestMetadata = RequestMetadata( - request_id=proto.request_id, - internal_request_id=proto.internal_request_id, - call_method=proto.call_method, - multiplexed_model_id=proto.multiplexed_model_id, - route=proto.route, - ) - with self._wrap_user_method_call(request_metadata, request_args): - return await asyncio.wrap_future( - self._user_callable_wrapper.call_user_method( - request_metadata, request_args, request_kwargs - ) - ) + @abstractmethod + async def _on_initialized(self): + raise NotImplementedError - async def is_allocated(self) -> str: - """poke the replica to check whether it's alive. - - When calling this method on an ActorHandle, it will complete as - soon as the actor has started running. We use this mechanism to - detect when a replica has been allocated a worker slot. - At this time, the replica can transition from PENDING_ALLOCATION - to PENDING_INITIALIZATION startup state. - - Returns: - The PID, actor ID, node ID, node IP, and log filepath id of the replica. - """ - - return ( - os.getpid(), - ray.get_runtime_context().get_actor_id(), - ray.get_runtime_context().get_worker_id(), - ray.get_runtime_context().get_node_id(), - ray.util.get_node_ip_address(), - get_component_logger_file_path(), - ) - - async def initialize_and_get_metadata( - self, - deployment_config: DeploymentConfig = None, - _after: Optional[Any] = None, - ) -> Tuple[DeploymentConfig, DeploymentVersion, Optional[float], Optional[int]]: - """Handles initializing the replica. - - Returns: 3-tuple containing - 1. DeploymentConfig of the replica - 2. DeploymentVersion of the replica - 3. Initialization duration in seconds - """ - # Unused `_after` argument is for scheduling: passing an ObjectRef - # allows delaying this call until after the `_after` call has returned. + async def initialize(self, deployment_config): try: # Ensure that initialization is only performed once. # When controller restarts, it will call this method again. async with self._user_callable_initialized_lock: - initialization_start_time = time.time() + self._initialization_start_time = time.time() if not self._user_callable_initialized: self._user_callable_asgi_app = await asyncio.wrap_future( self._user_callable_wrapper.initialize_callable() ) + await self._on_initialized() self._user_callable_initialized = True - self._set_internal_replica_context( - servable_object=self._user_callable_wrapper.user_callable - ) + if deployment_config: await asyncio.wrap_future( self._user_callable_wrapper.call_reconfigure( @@ -689,20 +558,10 @@ async def initialize_and_get_metadata( # an initial health check. If an initial health check fails, # consider it an initialization failure. await self.check_health() - - # Save the initialization latency if the replica is initializing - # for the first time. - if self._initialization_latency is None: - self._initialization_latency = time.time() - initialization_start_time - - return self._get_metadata() except Exception: raise RuntimeError(traceback.format_exc()) from None - async def reconfigure( - self, - deployment_config: DeploymentConfig, - ) -> Tuple[DeploymentConfig, DeploymentVersion, Optional[float], Optional[int]]: + async def reconfigure(self, deployment_config: DeploymentConfig): try: user_config_changed = ( deployment_config.user_config != self._deployment_config.user_config @@ -734,12 +593,10 @@ async def reconfigure( self._set_internal_replica_context( servable_object=self._user_callable_wrapper.user_callable ) - - return self._get_metadata() except Exception: raise RuntimeError(traceback.format_exc()) from None - def _get_metadata( + def get_metadata( self, ) -> Tuple[DeploymentConfig, DeploymentVersion, Optional[float], Optional[int]]: return ( @@ -749,26 +606,22 @@ def _get_metadata( self._port, ) - def _save_cpu_profile_data(self) -> str: - """Saves CPU profiling data, if CPU profiling is enabled. - - Logs a warning if CPU profiling is disabled. - """ + @abstractmethod + def _on_request_cancelled( + self, request_metadata: RequestMetadata, e: asyncio.CancelledError + ): + pass - if self.cpu_profiler is not None: - import marshal + @abstractmethod + def _on_request_failed(self, request_metadata: RequestMetadata, e: Exception): + pass - self.cpu_profiler.snapshot_stats() - with open(self.cpu_profiler_log, "wb") as f: - marshal.dump(self.cpu_profiler.stats, f) - logger.info(f'Saved CPU profile data to file "{self.cpu_profiler_log}"') - return self.cpu_profiler_log - else: - logger.error( - "Attempted to save CPU profile data, but failed because no " - "CPU profiler was running! Enable CPU profiling by enabling " - "the RAY_SERVE_ENABLE_CPU_PROFILING env var." - ) + @abstractmethod + @contextmanager + def _wrap_user_method_call( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ): + pass async def _drain_ongoing_requests(self): """Wait for any ongoing requests to finish. @@ -825,6 +678,252 @@ async def check_health(self): await asyncio.wrap_future(f) +class Replica(ReplicaBase): + async def _on_initialized(self): + self._set_internal_replica_context( + servable_object=self._user_callable_wrapper.user_callable + ) + + # Save the initialization latency if the replica is initializing + # for the first time. + if self._initialization_latency is None: + self._initialization_latency = time.time() - self._initialization_start_time + + def _on_request_cancelled( + self, request_metadata: RequestMetadata, e: asyncio.CancelledError + ): + """Recursively cancels child requests.""" + requests_pending_assignment = ( + ray.serve.context._get_requests_pending_assignment( + request_metadata.internal_request_id + ) + ) + for task in requests_pending_assignment.values(): + task.cancel() + + def _on_request_failed(self, request_metadata: RequestMetadata, e: Exception): + if ray.util.pdb._is_ray_debugger_post_mortem_enabled(): + ray.util.pdb._post_mortem() + + @contextmanager + def _wrap_user_method_call( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ): + """Context manager that wraps user method calls. + + 1) Sets the request context var with appropriate metadata. + 2) Records the access log message (if not disabled). + 3) Records per-request metrics via the metrics manager. + """ + route = self._maybe_get_asgi_route(request_metadata, request_args) + request_metadata.route = route + + ray.serve.context._serve_request_context.set( + ray.serve.context._RequestContext( + route=request_metadata.route, + request_id=request_metadata.request_id, + _internal_request_id=request_metadata.internal_request_id, + app_name=self._deployment_id.app_name, + multiplexed_model_id=request_metadata.multiplexed_model_id, + grpc_context=request_metadata.grpc_context, + ) + ) + + with self._handle_errors_and_metrics(request_metadata): + yield + + +class ReplicaActor: + """Actor definition for replicas of Ray Serve deployments. + + This class defines the interface that the controller and deployment handles + (i.e., from proxies and other replicas) use to interact with a replica. + + All interaction with the user-provided callable is done via the + `UserCallableWrapper` class. + """ + + async def __init__( + self, + replica_id: ReplicaID, + serialized_deployment_def: bytes, + serialized_init_args: bytes, + serialized_init_kwargs: bytes, + deployment_config_proto_bytes: bytes, + version: DeploymentVersion, + ): + deployment_config = DeploymentConfig.from_proto_bytes( + deployment_config_proto_bytes + ) + deployment_def = cloudpickle.loads(serialized_deployment_def) + if isinstance(deployment_def, str): + deployment_def = _load_deployment_def_from_import_path(deployment_def) + + self._replica_impl: ReplicaBase = create_replica_impl( + replica_id=replica_id, + deployment_def=deployment_def, + init_args=cloudpickle.loads(serialized_init_args), + init_kwargs=cloudpickle.loads(serialized_init_kwargs), + deployment_config=deployment_config, + version=version, + ) + + def push_proxy_handle(self, handle: ActorHandle): + pass + + def get_num_ongoing_requests(self) -> int: + """Fetch the number of ongoing requests at this replica (queue length). + + This runs on a separate thread (using a Ray concurrency group) so it will + not be blocked by user code. + """ + return self._replica_impl.get_num_ongoing_requests() + + async def is_allocated(self) -> str: + """poke the replica to check whether it's alive. + + When calling this method on an ActorHandle, it will complete as + soon as the actor has started running. We use this mechanism to + detect when a replica has been allocated a worker slot. + At this time, the replica can transition from PENDING_ALLOCATION + to PENDING_INITIALIZATION startup state. + + Returns: + The PID, actor ID, node ID, node IP, and log filepath id of the replica. + """ + + return ( + os.getpid(), + ray.get_runtime_context().get_actor_id(), + ray.get_runtime_context().get_worker_id(), + ray.get_runtime_context().get_node_id(), + ray.util.get_node_ip_address(), + get_component_logger_file_path(), + ) + + async def initialize_and_get_metadata( + self, deployment_config: DeploymentConfig = None, _after: Optional[Any] = None + ): + """Handles initializing the replica. + + Returns: 3-tuple containing + 1. DeploymentConfig of the replica + 2. DeploymentVersion of the replica + 3. Initialization duration in seconds + """ + # Unused `_after` argument is for scheduling: passing an ObjectRef + # allows delaying this call until after the `_after` call has returned. + await self._replica_impl.initialize(deployment_config) + return self._replica_impl.get_metadata() + + async def check_health(self): + await self._replica_impl.check_health() + + async def reconfigure( + self, deployment_config + ) -> Tuple[DeploymentConfig, DeploymentVersion, Optional[float], Optional[int]]: + await self._replica_impl.reconfigure(deployment_config) + return self._replica_impl.get_metadata() + + async def handle_request( + self, + pickled_request_metadata: bytes, + *request_args, + **request_kwargs, + ) -> Tuple[bytes, Any]: + """Entrypoint for `stream=False` calls.""" + request_metadata = pickle.loads(pickled_request_metadata) + return await self._replica_impl.handle_request( + request_metadata, *request_args, **request_kwargs + ) + + async def handle_request_streaming( + self, + pickled_request_metadata: bytes, + *request_args, + **request_kwargs, + ) -> AsyncGenerator[Any, None]: + """Generator that is the entrypoint for all `stream=True` handle calls.""" + request_metadata = pickle.loads(pickled_request_metadata) + async for result in self._replica_impl.handle_request_streaming( + request_metadata, *request_args, **request_kwargs + ): + yield result + + async def handle_request_with_rejection( + self, + pickled_request_metadata: bytes, + *request_args, + **request_kwargs, + ) -> AsyncGenerator[Any, None]: + """Entrypoint for all requests with strict max_ongoing_requests enforcement. + + The first response from this generator is always a system message indicating + if the request was accepted (the replica has capacity for the request) or + rejected (the replica is already at max_ongoing_requests). + + For non-streaming requests, there will only be one more message, the unary + result of the user request handler. + + For streaming requests, the subsequent messages will be the results of the + user request handler (which must be a generator). + """ + request_metadata = pickle.loads(pickled_request_metadata) + async for result in self._replica_impl.handle_request_with_rejection( + request_metadata, *request_args, **request_kwargs + ): + if isinstance(result, ReplicaQueueLengthInfo): + yield pickle.dumps(result) + else: + yield result + + async def handle_request_from_java( + self, + proto_request_metadata: bytes, + *request_args, + **request_kwargs, + ) -> Any: + from ray.serve.generated.serve_pb2 import ( + RequestMetadata as RequestMetadataProto, + ) + + proto = RequestMetadataProto.FromString(proto_request_metadata) + request_metadata: RequestMetadata = RequestMetadata( + request_id=proto.request_id, + internal_request_id=proto.internal_request_id, + call_method=proto.call_method, + multiplexed_model_id=proto.multiplexed_model_id, + route=proto.route, + ) + return await self._replica_impl.handle_request( + request_metadata, *request_args, **request_kwargs + ) + + async def perform_graceful_shutdown(self): + await self._replica_impl.perform_graceful_shutdown() + + def _save_cpu_profile_data(self) -> str: + """Saves CPU profiling data, if CPU profiling is enabled. + + Logs a warning if CPU profiling is disabled. + """ + + if self.cpu_profiler is not None: + import marshal + + self.cpu_profiler.snapshot_stats() + with open(self.cpu_profiler_log, "wb") as f: + marshal.dump(self.cpu_profiler.stats, f) + logger.info(f'Saved CPU profile data to file "{self.cpu_profiler_log}"') + return self.cpu_profiler_log + else: + logger.error( + "Attempted to save CPU profile data, but failed because no " + "CPU profiler was running! Enable CPU profiling by enabling " + "the RAY_SERVE_ENABLE_CPU_PROFILING env var." + ) + + class UserCallableWrapper: """Wraps a user-provided callable that is used to handle requests to a replica.""" @@ -997,7 +1096,7 @@ async def initialize_callable(self) -> Optional[ASGIApp]: def _raise_if_not_initialized(self, method_name: str): if self._callable is None: raise RuntimeError( - "`initialize_callable` must be called before `{method_name}`." + f"`initialize_callable` must be called before `{method_name}`." ) def call_user_health_check(self) -> Optional[concurrent.futures.Future]: diff --git a/python/ray/serve/tests/test_controller_recovery.py b/python/ray/serve/tests/test_controller_recovery.py index 0042323221b3..56674b9682dc 100644 --- a/python/ray/serve/tests/test_controller_recovery.py +++ b/python/ray/serve/tests/test_controller_recovery.py @@ -64,7 +64,7 @@ def __call__(self, *args): replica_version_hash = None for replica in deployment_dict[id]: - ref = replica.actor_handle._get_metadata.remote() + ref = replica.actor_handle.initialize_and_get_metadata.remote() _, version, _, _ = ray.get(ref) if replica_version_hash is None: replica_version_hash = hash(version) @@ -116,7 +116,7 @@ def __call__(self, *args): # Ensure recovered replica version has are the same for replica_name in recovered_replica_names: actor_handle = ray.get_actor(replica_name, namespace=SERVE_NAMESPACE) - ref = actor_handle._get_metadata.remote() + ref = actor_handle.initialize_and_get_metadata.remote() _, version, _, _ = ray.get(ref) assert replica_version_hash == hash( version From e73df11865d8973f2089cc6e285e3f3b4351dfb9 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Wed, 20 Nov 2024 17:34:23 -0800 Subject: [PATCH 042/107] [core] install core prerelease deps together (#48826) don't install one by one; otherwise, they might run into unintended dependency conflicts between each other. Signed-off-by: Lonnie Liu --- ci/env/install-core-prerelease-dependencies.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ci/env/install-core-prerelease-dependencies.sh b/ci/env/install-core-prerelease-dependencies.sh index 498ecd024581..55ba3b1e55f9 100755 --- a/ci/env/install-core-prerelease-dependencies.sh +++ b/ci/env/install-core-prerelease-dependencies.sh @@ -5,7 +5,5 @@ set -e # install all unbounded dependencies in setup.py for ray core # TOOD(scv119) reenable grpcio once https://github.com/grpc/grpc/issues/31885 is fixed. # TOOD(scv119) reenable jsonschema once https://github.com/ray-project/ray/issues/33411 is fixed. -for dependency in aiosignal frozenlist requests protobuf -do - python -m pip install -U --pre --upgrade-strategy=eager $dependency -done +DEPS=(aiosignal frozenlist requests protobuf) +python -m pip install -U --pre --upgrade-strategy=eager "${DEPS[@]}" From 154915d3f3e2d65fd7dd7c840ad8f77959bf54b8 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Wed, 20 Nov 2024 21:31:12 -0800 Subject: [PATCH 043/107] [gce launcher] change nightly and latest tests to manual (#48835) they are not testing against release candidate Signed-off-by: Lonnie Liu --- release/release_tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 91cb2e9331ea..d8aad5b4bf72 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -5067,7 +5067,7 @@ stable: true env: gce - frequency: nightly + frequency: manual team: clusters cluster: byod: {} @@ -5084,7 +5084,7 @@ stable: true env: gce - frequency: nightly + frequency: manual team: clusters cluster: byod: {} From 2dbd08a46f7f08ea614d8dd20fd0bca5682a3078 Mon Sep 17 00:00:00 2001 From: Alexey Kudinkin Date: Wed, 20 Nov 2024 23:02:26 -0800 Subject: [PATCH 044/107] [Arrow] Revisiting ChunkedArray combination protocol to avoid int32 offsets overflow (#48754) ## Why are these changes needed? > NOTE: This is a follow-up for https://github.com/ray-project/ray/pull/48487 This PR addresses challenges of handling batches larger than 2 GiB that are known to overflow standard Arrow types relying on int32 offsets. Changes --- - Unified all `ChunkedArray` combination attempts to call into `combine_chunked_array` - Introduced `_try_combine_chunks_safe` properly handling the cases when single PA `Array` would be exceeding 2 GiB threshold - Added tests - Minor clean ups --------- Signed-off-by: Alexey Kudinkin --- python/ray/air/data_batch_type.py | 8 +- python/ray/air/util/data_batch_conversion.py | 51 ++-- .../ray/air/util/tensor_extensions/arrow.py | 22 +- python/ray/data/BUILD | 4 +- python/ray/data/_internal/arrow_block.py | 39 ++- .../_internal/arrow_ops/transform_pyarrow.py | 172 +++++++++++- python/ray/data/_internal/batcher.py | 7 +- python/ray/data/tests/test_arrow_block.py | 253 +++++++++++++++++- python/ray/data/tests/test_binary.py | 63 ----- 9 files changed, 473 insertions(+), 146 deletions(-) diff --git a/python/ray/air/data_batch_type.py b/python/ray/air/data_batch_type.py index a6fad4591d35..5d5d09b3218e 100644 --- a/python/ray/air/data_batch_type.py +++ b/python/ray/air/data_batch_type.py @@ -2,6 +2,10 @@ if TYPE_CHECKING: import numpy - import pandas + import pandas # noqa: F401 + import pyarrow -DataBatchType = Union["numpy.ndarray", "pandas.DataFrame", Dict[str, "numpy.ndarray"]] +# TODO de-dup with ray.data.block.DataBatch +DataBatchType = Union[ + "numpy.ndarray", "pyarrow.Table" "pandas.DataFrame", Dict[str, "numpy.ndarray"] +] diff --git a/python/ray/air/util/data_batch_conversion.py b/python/ray/air/util/data_batch_conversion.py index 4fe7a8ab2ea9..1bf69b4b9398 100644 --- a/python/ray/air/util/data_batch_conversion.py +++ b/python/ray/air/util/data_batch_conversion.py @@ -6,9 +6,6 @@ from ray.air.constants import TENSOR_COLUMN_NAME from ray.air.data_batch_type import DataBatchType -from ray.air.util.tensor_extensions.arrow import ( - get_arrow_extension_fixed_shape_tensor_types, -) from ray.util.annotations import Deprecated, DeveloperAPI if TYPE_CHECKING: @@ -220,37 +217,31 @@ def _convert_batch_type_to_numpy( ) return data elif pyarrow is not None and isinstance(data, pyarrow.Table): - from ray.air.util.transform_pyarrow import ( - _concatenate_extension_column, - _is_column_extension_type, + from ray.air.util.tensor_extensions.arrow import ( + get_arrow_extension_fixed_shape_tensor_types, ) + from ray.data._internal.arrow_ops import transform_pyarrow - if data.column_names == [TENSOR_COLUMN_NAME] and ( - isinstance( - data.schema.types[0], get_arrow_extension_fixed_shape_tensor_types() + column_values_ndarrays = [] + + for col in data.columns: + # Combine columnar values arrays to make these contiguous + # (making them compatible with numpy format) + combined_array = transform_pyarrow.combine_chunked_array(col) + + column_values_ndarrays.append( + transform_pyarrow.to_numpy(combined_array, zero_copy_only=False) ) + + arrow_fixed_shape_tensor_types = get_arrow_extension_fixed_shape_tensor_types() + + # NOTE: This branch is here for backwards-compatibility + if data.column_names == [TENSOR_COLUMN_NAME] and ( + isinstance(data.schema.types[0], arrow_fixed_shape_tensor_types) ): - # If representing a tensor dataset, return as a single numpy array. - # Example: ray.data.from_numpy(np.arange(12).reshape((3, 2, 2))) - # Arrow’s incorrect concatenation of extension arrays: - # https://issues.apache.org/jira/browse/ARROW-16503 - return _concatenate_extension_column(data[TENSOR_COLUMN_NAME]).to_numpy( - zero_copy_only=False - ) - else: - output_dict = {} - for col_name in data.column_names: - col = data[col_name] - if col.num_chunks == 0: - col = pyarrow.array([], type=col.type) - elif _is_column_extension_type(col): - # Arrow’s incorrect concatenation of extension arrays: - # https://issues.apache.org/jira/browse/ARROW-16503 - col = _concatenate_extension_column(col) - else: - col = col.combine_chunks() - output_dict[col_name] = col.to_numpy(zero_copy_only=False) - return output_dict + return column_values_ndarrays[0] + + return dict(zip(data.column_names, column_values_ndarrays)) elif isinstance(data, pd.DataFrame): return _convert_pandas_to_batch_type(data, BatchFormat.NUMPY) else: diff --git a/python/ray/air/util/tensor_extensions/arrow.py b/python/ray/air/util/tensor_extensions/arrow.py index 1dc1a4984826..ebe01c792458 100644 --- a/python/ray/air/util/tensor_extensions/arrow.py +++ b/python/ray/air/util/tensor_extensions/arrow.py @@ -29,9 +29,15 @@ # Minimum version of Arrow that supports subclassable ExtensionScalars. # TODO(Clark): Remove conditional definition once we only support Arrow 9.0.0+. MIN_PYARROW_VERSION_SCALAR_SUBCLASS = parse_version("9.0.0") +# Minimum version supporting `zero_copy_only` flag in `ChunkedArray.to_numpy` +MIN_PYARROW_VERSION_CHUNKED_ARRAY_TO_NUMPY_ZERO_COPY_ONLY = parse_version("13.0.0") NUM_BYTES_PER_UNICODE_CHAR = 4 +# NOTE: Overflow threshold in bytes for most Arrow types using int32 as +# its offsets +INT32_OVERFLOW_THRESHOLD = 2 * GiB + logger = logging.getLogger(__name__) @@ -212,7 +218,7 @@ def _infer_pyarrow_type(column_values: np.ndarray) -> Optional[pa.DataType]: inferred_pa_dtype = pa.infer_type(column_values) - def _len_gt_2gb(obj: Any) -> bool: + def _len_gt_overflow_threshold(obj: Any) -> bool: # NOTE: This utility could be seeing objects other than strings or bytes in # cases when column contains non-scalar non-homogeneous object types as # column values, therefore making Arrow unable to infer corresponding @@ -221,16 +227,16 @@ def _len_gt_2gb(obj: Any) -> bool: # # Check out test cases for this method for an additional context. if isinstance(obj, (str, bytes)): - return len(obj) > 2 * GiB + return len(obj) > INT32_OVERFLOW_THRESHOLD return False if pa.types.is_binary(inferred_pa_dtype) and any( - [_len_gt_2gb(v) for v in column_values] + [_len_gt_overflow_threshold(v) for v in column_values] ): return pa.large_binary() elif pa.types.is_string(inferred_pa_dtype) and any( - [_len_gt_2gb(v) for v in column_values] + [_len_gt_overflow_threshold(v) for v in column_values] ): return pa.large_string() @@ -569,7 +575,13 @@ def from_numpy( # Stack ndarrays and pass through to ndarray handling logic below. try: arr = np.stack(arr, axis=0) - except ValueError: + except ValueError as ve: + logger.warning( + f"Failed to stack lists due to: {ve}; " + f"falling back to using np.array(..., dtype=object)", + exc_info=ve, + ) + # ndarray stacking may fail if the arrays are heterogeneously-shaped. arr = np.array(arr, dtype=object) if not isinstance(arr, np.ndarray): diff --git a/python/ray/data/BUILD b/python/ray/data/BUILD index e5b7845d9454..d46db0940c6e 100644 --- a/python/ray/data/BUILD +++ b/python/ray/data/BUILD @@ -99,7 +99,7 @@ py_test( py_test( name = "test_arrow_block", - size = "small", + size = "medium", srcs = ["tests/test_arrow_block.py"], tags = ["team:data", "exclusive"], deps = ["//:ray_lib", ":conftest"], @@ -163,7 +163,7 @@ py_test( py_test( name = "test_binary", - size = "medium", + size = "small", srcs = ["tests/test_binary.py"], tags = ["team:data", "exclusive"], deps = ["//:ray_lib", ":conftest"], diff --git a/python/ray/data/_internal/arrow_block.py b/python/ray/data/_internal/arrow_block.py index 02acc2eb5769..1473b8fb6e3b 100644 --- a/python/ray/data/_internal/arrow_block.py +++ b/python/ray/data/_internal/arrow_block.py @@ -219,7 +219,7 @@ def _build_tensor_row( def slice(self, start: int, end: int, copy: bool = False) -> "pyarrow.Table": view = self._table.slice(start, end - start) if copy: - view = _copy_table(view) + view = transform_pyarrow.combine_chunks(view) return view def random_shuffle(self, random_seed: Optional[int]) -> "pyarrow.Table": @@ -245,11 +245,6 @@ def to_pandas(self) -> "pandas.DataFrame": def to_numpy( self, columns: Optional[Union[str, List[str]]] = None ) -> Union[np.ndarray, Dict[str, np.ndarray]]: - from ray.air.util.transform_pyarrow import ( - _concatenate_extension_column, - _is_column_extension_type, - ) - if columns is None: columns = self._table.column_names should_be_single_ndarray = False @@ -267,23 +262,24 @@ def to_numpy( f"{column_names_set}" ) - arrays = [] - for column in columns: - array = self._table[column] - if _is_column_extension_type(array): - array = _concatenate_extension_column(array) - elif array.num_chunks == 0: - array = pyarrow.array([], type=array.type) - else: - array = array.combine_chunks() - arrays.append(array.to_numpy(zero_copy_only=False)) + column_values_ndarrays = [] + + for col_name in columns: + col = self._table[col_name] + + # Combine columnar values arrays to make these contiguous + # (making them compatible with numpy format) + combined_array = transform_pyarrow.combine_chunked_array(col) + + column_values_ndarrays.append( + transform_pyarrow.to_numpy(combined_array, zero_copy_only=False) + ) if should_be_single_ndarray: assert len(columns) == 1 - arrays = arrays[0] + return column_values_ndarrays[0] else: - arrays = dict(zip(columns, arrays)) - return arrays + return dict(zip(columns, column_values_ndarrays)) def to_arrow(self) -> "pyarrow.Table": return self._table @@ -652,8 +648,3 @@ def gen(): def block_type(self) -> BlockType: return BlockType.ARROW - - -def _copy_table(table: "pyarrow.Table") -> "pyarrow.Table": - """Copy the provided Arrow table.""" - return transform_pyarrow.combine_chunks(table) diff --git a/python/ray/data/_internal/arrow_ops/transform_pyarrow.py b/python/ray/data/_internal/arrow_ops/transform_pyarrow.py index d51f9a990ad1..a71a1eae6f61 100644 --- a/python/ray/data/_internal/arrow_ops/transform_pyarrow.py +++ b/python/ray/data/_internal/arrow_ops/transform_pyarrow.py @@ -1,8 +1,14 @@ from typing import TYPE_CHECKING, List, Union +import numpy as np from packaging.version import parse as parse_version from ray._private.utils import _get_pyarrow_version +from ray.air.util.tensor_extensions.arrow import ( + INT32_OVERFLOW_THRESHOLD, + MIN_PYARROW_VERSION_CHUNKED_ARRAY_TO_NUMPY_ZERO_COPY_ONLY, + PYARROW_VERSION, +) try: import pyarrow @@ -328,24 +334,164 @@ def concat_and_sort( return take_table(ret, indices) +def to_numpy( + array: Union["pyarrow.Array", "pyarrow.ChunkedArray"], + *, + zero_copy_only: bool = True, +) -> np.ndarray: + """Wrapper for `Array`s and `ChunkedArray`s `to_numpy` API, + handling API divergence b/w Arrow versions""" + + import pyarrow as pa + + if isinstance(array, pa.Array): + return array.to_numpy(zero_copy_only=zero_copy_only) + elif isinstance(array, pa.ChunkedArray): + if PYARROW_VERSION >= MIN_PYARROW_VERSION_CHUNKED_ARRAY_TO_NUMPY_ZERO_COPY_ONLY: + return array.to_numpy(zero_copy_only=zero_copy_only) + else: + return array.to_numpy() + else: + raise ValueError( + f"Either of `Array` or `ChunkedArray` was expected, got {type(array)}" + ) + + def combine_chunks(table: "pyarrow.Table") -> "pyarrow.Table": - """This is pyarrow.Table.combine_chunks() - with support for extension types. + """This is counterpart for Pyarrow's `Table.combine_chunks` that's using + extended `ChunkedArray` combination protocol. - This will create a new table by combining the chunks the input table has. + For more details check out `combine_chunked_array` py-doc """ + + new_column_values_arrays = [] + + for col in table.columns: + new_column_values_arrays.append(combine_chunked_array(col)) + + return pyarrow.Table.from_arrays(new_column_values_arrays, schema=table.schema) + + +def combine_chunked_array( + array: "pyarrow.ChunkedArray", +) -> Union["pyarrow.Array", "pyarrow.ChunkedArray"]: + """This is counterpart for Pyarrow's `ChunkedArray.combine_chunks` that additionally + + 1. Handles `ExtensionType`s (like ArrowTensorType, ArrowTensorTypeV2, + ArrowPythonObjectType, etc) + + 2. Making sure `ChunkedArray`s comprising provided `Table` are combined + safely, ie avoiding overflows of Arrow's internal offsets (using int32 for + most of its native types, other than "large" kind). + + For more details check py-doc of `_try_combine_chunks_safe` method. + """ + + import pyarrow as pa + from ray.air.util.transform_pyarrow import ( _concatenate_extension_column, _is_column_extension_type, ) - cols = table.columns - new_cols = [] - for col in cols: - if _is_column_extension_type(col): - # Extension arrays don't support concatenation. - arr = _concatenate_extension_column(col) - else: - arr = col.combine_chunks() - new_cols.append(arr) - return pyarrow.Table.from_arrays(new_cols, schema=table.schema) + assert isinstance( + array, pa.ChunkedArray + ), f"Expected `ChunkedArray`, got {type(array)}" + + if _is_column_extension_type(array): + # Arrow `ExtensionArray`s can't be concatenated via `combine_chunks`, + # hence require manual concatenation + return _concatenate_extension_column(array) + elif len(array.chunks) == 0: + # NOTE: In case there's no chunks, we need to explicitly create + # an empty array since calling into `combine_chunks` would fail + # due to it expecting at least 1 chunk to be present + return pa.array([], type=array.type) + else: + return _try_combine_chunks_safe(array) + + +def _try_combine_chunks_safe( + array: "pyarrow.ChunkedArray", max_chunk_size=INT32_OVERFLOW_THRESHOLD +) -> Union["pyarrow.Array", "pyarrow.ChunkedArray"]: + """This method provides a safe way of combining `ChunkedArray`s exceeding 2 GiB + in size, which aren't using "large_*" types (and therefore relying on int32 + offsets). + + When handling provided `ChunkedArray` this method will be either + + - Relying on PyArrow's default `combine_chunks` (therefore returning single + contiguous `Array`) in cases when + - Array's total size is < 2 GiB + - Array's underlying type is of "large" kind (ie using one of the + `large_*` type family) + - Safely combining subsets of tasks such that resulting `Array`s to not + exceed 2 GiB in size (therefore returning another `ChunkedArray` albeit + with potentially smaller number of chunks that have resulted from clumping + the original ones) + + Returns: + - pa.Array if it's possible to combine provided pa.ChunkedArray into single + contiguous array + - pa.ChunkedArray (albeit with chunks re-combined) if it's not possible to + produce single pa.Array + """ + + import pyarrow as pa + + from ray.air.util.transform_pyarrow import _is_column_extension_type + + assert not _is_column_extension_type( + array + ), f"Arrow `ExtensionType`s are not accepted (got {array.type})" + + int64_type_predicates = [ + pa.types.is_large_list, + pa.types.is_large_string, + pa.types.is_large_binary, + pa.types.is_large_unicode, + ] + + if array.nbytes < max_chunk_size or any( + p(array.type) for p in int64_type_predicates + ): + # It's safe to combine provided `ChunkedArray` in either of 2 cases: + # - It's cumulative size is < 2 GiB + # - It's of 'large' kind (ie one using int64 offsets internally) + return array.combine_chunks() + + # In this case it's actually *NOT* safe to try to directly combine + # Arrow's `ChunkedArray` and is impossible to produce single, contiguous + # `Array` since + # - It's estimated to hold > 2 GiB + # - Its type is not of the "large" kind (and hence is using int32 + # offsets internally, which would overflow) + # + # In this case instead of combining into single contiguous array, we + # instead just "clump" existing chunks into bigger ones, but no bigger + # than 2 GiB each. + # + # NOTE: This branch actually returns `ChunkedArray` and not an `Array` + + # To stay under 2 GiB limit we are slicing provided list of chunks into + # slices no larger than 2 GiB (as compared to just directly using `concat_arrays`) + slices = [] + + cur_slice_start = 0 + cur_slice_size_bytes = 0 + + for i, chunk in enumerate(array.chunks): + chunk_size = chunk.nbytes + + if cur_slice_size_bytes + chunk_size > max_chunk_size: + slices.append(array.chunks[cur_slice_start:i]) + + cur_slice_start = i + cur_slice_size_bytes = 0 + + cur_slice_size_bytes += chunk_size + + # Add remaining chunks as last slice + slices.append(array.chunks[cur_slice_start:]) + + return pa.chunked_array([pa.concat_arrays(s) for s in slices]) diff --git a/python/ray/data/_internal/batcher.py b/python/ray/data/_internal/batcher.py index 104e3c7ae51d..d27ed089f03f 100644 --- a/python/ray/data/_internal/batcher.py +++ b/python/ray/data/_internal/batcher.py @@ -11,7 +11,7 @@ # See https://github.com/ray-project/ray/issues/31108 for more details. # TODO(jjyao): remove this once # https://github.com/apache/arrow/issues/35126 is resolved. -MIN_NUM_CHUNKS_TO_TRIGGER_COMBINE_CHUNKS = 2 +MIN_NUM_CHUNKS_TO_TRIGGER_COMBINE_CHUNKS = 10 # Delay compaction until the shuffle buffer has reached this ratio over the min # shuffle buffer size. Setting this to 1 minimizes memory usage, at the cost of @@ -130,10 +130,7 @@ def next_batch(self) -> Block: # the leftovers. leftover.append(block) elif accessor.num_rows() <= needed: - # We need this entire block to fill out a batch. - # We need to call `accessor.slice()` to ensure - # the subsequent block's type are the same. - output.add_block(accessor.slice(0, accessor.num_rows(), copy=False)) + output.add_block(accessor.to_block()) needed -= accessor.num_rows() else: if ( diff --git a/python/ray/data/tests/test_arrow_block.py b/python/ray/data/tests/test_arrow_block.py index e72e6f2c4fc4..22fe9f7ef7a0 100644 --- a/python/ray/data/tests/test_arrow_block.py +++ b/python/ray/data/tests/test_arrow_block.py @@ -1,15 +1,266 @@ +import gc +import os +import sys import types +from tempfile import TemporaryDirectory +from typing import Union import numpy as np import pyarrow as pa import pytest +from pyarrow import parquet as pq import ray from ray._private.test_utils import run_string_as_driver +from ray.air.util.tensor_extensions.arrow import ArrowTensorArray +from ray.data import DataContext from ray.data._internal.arrow_block import ArrowBlockAccessor +from ray.data._internal.arrow_ops.transform_pyarrow import combine_chunked_array +from ray.data._internal.util import GiB, MiB from ray.data.extensions.object_extension import _object_extension_type_allowed +@pytest.fixture(scope="module") +def parquet_dataset_single_column_gt_2gb(): + chunk_size = 256 * MiB + num_chunks = 10 + + total_column_size = chunk_size * 10 # ~2.5 GiB + + with TemporaryDirectory() as tmp_dir: + dataset_path = f"{tmp_dir}/large_parquet_chunk_{chunk_size}" + + # Create directory + os.mkdir(dataset_path) + + for i in range(num_chunks): + chunk = b"a" * chunk_size + + d = {"id": [i], "bin": [chunk]} + t = pa.Table.from_pydict(d) + + print(f">>> Table schema: {t.schema} (size={sys.getsizeof(t)})") + + filepath = f"{dataset_path}/chunk_{i}.parquet" + pq.write_table(t, filepath) + + print(f">>> Created a chunk #{i}") + + print(f">>> Created dataset at {dataset_path}") + + yield dataset_path, num_chunks, total_column_size + + print(f">>> Cleaning up dataset at {dataset_path}") + + +@pytest.fixture(scope="module") +def binary_dataset_single_file_gt_2gb(): + total_size = int(2.1 * GiB) + chunk_size = 256 * MiB + num_chunks = total_size // chunk_size + remainder = total_size % chunk_size + + with TemporaryDirectory() as tmp_dir: + dataset_path = f"{tmp_dir}/binary_dataset_gt_2gb_single_file" + + # Create directory + os.mkdir(dataset_path) + + with open(f"{dataset_path}/chunk.bin", "wb") as f: + for i in range(num_chunks): + f.write(b"a" * chunk_size) + + print(f">>> Written chunk #{i}") + + if remainder: + f.write(b"a" * remainder) + + print(f">>> Wrote chunked dataset at: {dataset_path}") + + yield dataset_path, total_size + + print(f">>> Cleaning up dataset: {dataset_path}") + + +@pytest.mark.parametrize( + "col_name", + [ + "bytes", + # TODO fix numpy conversion + # "text", + ], +) +def test_single_row_gt_2gb( + ray_start_regular_shared, + restore_data_context, + binary_dataset_single_file_gt_2gb, + col_name, +): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + + dataset_path, target_binary_size = binary_dataset_single_file_gt_2gb + + def _id(row): + bs = row[col_name] + assert round(len(bs) / GiB, 1) == round(target_binary_size / GiB, 1) + return row + + if col_name == "text": + ds = ray.data.read_text(dataset_path) + elif col_name == "bytes": + ds = ray.data.read_binary_files(dataset_path) + + total = ds.map(_id).count() + + assert total == 1 + + +@pytest.mark.parametrize( + "op", + [ + "map", + "map_batches", + ], +) +def test_arrow_batch_gt_2gb( + ray_start_regular_shared, + parquet_dataset_single_column_gt_2gb, + restore_data_context, + op, +): + # Disable (automatic) fallback to `ArrowPythonObjectType` extension type + DataContext.get_current().enable_fallback_to_arrow_object_ext_type = False + + dataset_path, num_rows, total_column_size = parquet_dataset_single_column_gt_2gb + + def _id(x): + return x + + ds = ray.data.read_parquet(dataset_path) + + if op == "map": + ds = ds.map(_id) + elif op == "map_batches": + # Combine all rows into a single batch using `map_batches` coercing to + # numpy format + ds = ds.map_batches( + _id, + batch_format="numpy", + batch_size=num_rows, + zero_copy_batch=False, + ) + + batch = ds.take_batch() + + total_binary_column_size = sum([len(b) for b in batch["bin"]]) + + print( + f">>> Batch:\n" + f"------\n" + "Column: 'id'" + f"Values: {batch['id']}\n" + f"------\n" + "Column: 'bin'" + f"Total: {total_binary_column_size / GiB} GiB\n" + f"Values: {[str(v)[:3] + ' x ' + str(len(v)) for v in batch['bin']]}\n" + ) + + assert total_binary_column_size == total_column_size + + # Clean up refs + del batch + del ds + # Force GC to free up object store memory + gc.collect() + + +@pytest.mark.parametrize( + "input_,expected_output", + [ + # Empty chunked array + (pa.chunked_array([], type=pa.int8()), pa.array([], type=pa.int8())), + # Fixed-shape tensors + ( + pa.chunked_array( + [ + ArrowTensorArray.from_numpy(np.arange(3).reshape(3, 1)), + ArrowTensorArray.from_numpy(np.arange(3).reshape(3, 1)), + ] + ), + ArrowTensorArray.from_numpy( + np.concatenate( + [ + np.arange(3).reshape(3, 1), + np.arange(3).reshape(3, 1), + ] + ) + ), + ), + # Ragged (variable-shaped) tensors + ( + pa.chunked_array( + [ + ArrowTensorArray.from_numpy(np.arange(3).reshape(3, 1)), + ArrowTensorArray.from_numpy(np.arange(5).reshape(5, 1)), + ] + ), + ArrowTensorArray.from_numpy( + np.concatenate( + [ + np.arange(3).reshape(3, 1), + np.arange(5).reshape(5, 1), + ] + ) + ), + ), + # Small (< 2 GiB) arrays + ( + pa.chunked_array( + [ + pa.array([1, 2, 3], type=pa.int16()), + pa.array([4, 5, 6], type=pa.int16()), + ] + ), + pa.array([1, 2, 3, 4, 5, 6], type=pa.int16()), + ), + ], +) +def test_combine_chunked_array_small( + input_, expected_output: Union[pa.Array, pa.ChunkedArray] +): + result = combine_chunked_array(input_) + + expected_output.equals(result) + + +def test_combine_chunked_array_large(): + """Verifies `combine_chunked_array` on arrays > 2 GiB""" + + # 144 MiB + ones_1gb = np.ones(shape=(550, 128, 128, 4), dtype=np.int32()).ravel() + + # Total ~2.15 GiB + input_ = pa.chunked_array( + [ + pa.array(ones_1gb), + ] + * 16 + ) + + assert round(input_.nbytes / GiB, 2) == 2.15 + + result = combine_chunked_array(input_) + + assert isinstance(result, pa.ChunkedArray) + assert len(result.chunks) == 2 + + # Should re-combine first provided 14 chunks into 1 + assert result.chunks[0].nbytes == sum([c.nbytes for c in input_.chunks[:14]]) + # Remaining 2 go into the second one + assert result.chunks[1].nbytes == sum([c.nbytes for c in input_.chunks[14:]]) + + def test_append_column(ray_start_regular_shared): animals = ["Flamingo", "Centipede"] num_legs = [2, 100] @@ -81,6 +332,4 @@ def fn2(batch): if __name__ == "__main__": - import sys - sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/data/tests/test_binary.py b/python/ray/data/tests/test_binary.py index ba0eb7b7a9ba..f1735da802f7 100644 --- a/python/ray/data/tests/test_binary.py +++ b/python/ray/data/tests/test_binary.py @@ -1,7 +1,5 @@ import os from io import BytesIO -from tempfile import TemporaryDirectory -from typing import Optional import pandas as pd import pyarrow as pa @@ -11,7 +9,6 @@ import ray from ray.data import Schema -from ray.data._internal.util import GiB, MiB from ray.data.datasource import ( BaseFileMetadataProvider, FastFileMetadataProvider, @@ -198,66 +195,6 @@ def skip_unpartitioned(kv_dict): ) -def _gen_chunked_binary( - dir_path: str, total_size: int, max_file_size: Optional[int] = None -): - # NOTE: This util is primed to be writing even single large binary files - # in chunks to reduce memory requirements while doing so - chunk_size = max_file_size or 256 * MiB - num_chunks = total_size // chunk_size - remainder = total_size % chunk_size - - if max_file_size is not None and max_file_size < total_size: - for i in range(num_chunks): - filename = f"part_{i}.bin" - with open(f"{dir_path}/{filename}", "wb") as f: - f.write(b"a" * chunk_size) - - print(f">>> Written file: {filename}") - - else: - with open(f"{dir_path}/chunk.bin", "wb") as f: - for i in range(num_chunks): - f.write(b"a" * chunk_size) - - print(f">>> Written chunk #{i}") - - if remainder: - f.write(b"a" * remainder) - - print(f">>> Wrote chunked dataset at: {dir_path}") - - -@pytest.mark.parametrize( - "col_name", - [ - "bytes", - # TODO fix numpy conversion - # "text", - ], -) -def test_single_row_gt_2gb(ray_start_regular_shared, col_name): - with TemporaryDirectory() as tmp_dir: - target_binary_size_gb = 2.1 - - # Write out single file > 2Gb - _gen_chunked_binary(tmp_dir, total_size=int(target_binary_size_gb * GiB)) - - def _id(row): - bs = row[col_name] - assert round(len(bs) / GiB, 1) == target_binary_size_gb - return row - - if col_name == "text": - ds = ray.data.read_text(tmp_dir) - elif col_name == "bytes": - ds = ray.data.read_binary_files(tmp_dir) - - total = ds.map(_id).count() - - assert total == 1 - - if __name__ == "__main__": import sys From e51cec6a6f7d2f9a2506aaa5543daad0d5ad35e9 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Thu, 21 Nov 2024 13:38:56 +0100 Subject: [PATCH 045/107] [RLlib] APPO enhancements (new API stack) vol 03: Fix target network update setting and logic. (#48802) --- rllib/algorithms/appo/appo.py | 39 ++++++++++++++++++--------- rllib/algorithms/appo/appo_learner.py | 33 +++++++++-------------- 2 files changed, 39 insertions(+), 33 deletions(-) diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index 3632ffab954b..b27e96b02d16 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -100,7 +100,6 @@ def __init__(self, algo_class=None): # __sphinx_doc_begin__ # APPO specific settings: self.vtrace = True - self.use_critic = True self.use_gae = True self.lambda_ = 1.0 self.clip_param = 0.4 @@ -120,7 +119,7 @@ def __init__(self, algo_class=None): # Override some of IMPALAConfig's default values with APPO-specific values. self.num_env_runners = 2 self.min_time_s_per_iteration = 10 - self.target_network_update_freq = 1 + self.target_network_update_freq = 2 self.broadcast_interval = 1 self.grad_clip = 40.0 # Note: Only when using enable_rl_module_and_learner=True can the clipping mode @@ -151,26 +150,27 @@ def __init__(self, algo_class=None): # Deprecated keys. self.target_update_frequency = DEPRECATED_VALUE + self.use_critic = DEPRECATED_VALUE @override(IMPALAConfig) def training( self, *, vtrace: Optional[bool] = NotProvided, - use_critic: Optional[bool] = NotProvided, use_gae: Optional[bool] = NotProvided, lambda_: Optional[float] = NotProvided, clip_param: Optional[float] = NotProvided, use_kl_loss: Optional[bool] = NotProvided, kl_coeff: Optional[float] = NotProvided, kl_target: Optional[float] = NotProvided, - tau: Optional[float] = NotProvided, target_network_update_freq: Optional[int] = NotProvided, + tau: Optional[float] = NotProvided, target_worker_clipping: Optional[float] = NotProvided, circular_buffer_num_batches: Optional[int] = NotProvided, circular_buffer_iterations_per_batch: Optional[int] = NotProvided, # Deprecated keys. target_update_frequency=DEPRECATED_VALUE, + use_critic=DEPRECATED_VALUE, **kwargs, ) -> "APPOConfig": """Sets the training related configuration. @@ -178,8 +178,6 @@ def training( Args: vtrace: Whether to use V-trace weighted advantages. If false, PPO GAE advantages will be used instead. - use_critic: Should use a critic as a baseline (otherwise don't use value - baseline; required for using GAE). Only applies if vtrace=False. use_gae: If true, use the Generalized Advantage Estimator (GAE) with a value function, see https://arxiv.org/pdf/1506.02438.pdf. Only applies if vtrace=False. @@ -189,9 +187,18 @@ def training( kl_coeff: Coefficient for weighting the KL-loss term. kl_target: Target term for the KL-term to reach (via adjusting the `kl_coeff` automatically). - tau: The factor by which to update the target policy network towards - the current policy network. Can range between 0 and 1. - e.g. updated_param = tau * current_param + (1 - tau) * target_param + target_network_update_freq: NOTE: This parameter is only applicable on + the new API stack. The frequency with which to update the target + policy network from the main trained policy network. The metric + used is `NUM_ENV_STEPS_TRAINED_LIFETIME` and the unit is `n` (see [1] + 4.1.1), where: `n = [circular_buffer_num_batches (N)] * + [circular_buffer_iterations_per_batch (K)] * [train batch size]` + For example, if you set `target_network_update_freq=2`, and N=4, K=2, + and `train_batch_size_per_learner=500`, then the target net is updated + every 2*4*2*500=8000 trained env steps (every 16 batch updates on each + learner). + The authors in [1] suggests that this setting is robust to a range of + choices (try values between 0.125 and 4). target_network_update_freq: The frequency to update the target policy and tune the kl loss coefficients that are used during training. After setting this parameter, the algorithm waits for at least @@ -199,6 +206,9 @@ def training( on before updating the target networks and tune the kl loss coefficients. NOTE: This parameter is only applicable when using the Learner API (enable_rl_module_and_learner=True). + tau: The factor by which to update the target policy network towards + the current policy network. Can range between 0 and 1. + e.g. updated_param = tau * current_param + (1 - tau) * target_param target_worker_clipping: The maximum value for the target-worker-clipping used for computing the IS ratio, described in [1] IS = min(π(i) / π(target), ρ) * (π / π(i)) @@ -220,14 +230,17 @@ def training( new="target_network_update_freq", error=True, ) + if use_critic != DEPRECATED_VALUE: + deprecation_warning( + old="use_critic", + error=True, + ) # Pass kwargs onto super's `training()` method. super().training(**kwargs) if vtrace is not NotProvided: self.vtrace = vtrace - if use_critic is not NotProvided: - self.use_critic = use_critic if use_gae is not NotProvided: self.use_gae = use_gae if lambda_ is not NotProvided: @@ -240,10 +253,10 @@ def training( self.kl_coeff = kl_coeff if kl_target is not NotProvided: self.kl_target = kl_target - if tau is not NotProvided: - self.tau = tau if target_network_update_freq is not NotProvided: self.target_network_update_freq = target_network_update_freq + if tau is not NotProvided: + self.tau = tau if target_worker_clipping is not NotProvided: self.target_worker_clipping = target_worker_clipping if circular_buffer_num_batches is not NotProvided: diff --git a/rllib/algorithms/appo/appo_learner.py b/rllib/algorithms/appo/appo_learner.py index 920d7b7ea992..431449893264 100644 --- a/rllib/algorithms/appo/appo_learner.py +++ b/rllib/algorithms/appo/appo_learner.py @@ -12,8 +12,9 @@ from ray.rllib.utils.annotations import override from ray.rllib.utils.lambda_defaultdict import LambdaDefaultDict from ray.rllib.utils.metrics import ( + ALL_MODULES, LAST_TARGET_UPDATE_TS, - NUM_ENV_STEPS_SAMPLED_LIFETIME, + NUM_ENV_STEPS_TRAINED_LIFETIME, NUM_MODULE_STEPS_TRAINED, NUM_TARGET_UPDATES, ) @@ -86,30 +87,22 @@ def after_gradient_based_update(self, *, timesteps: Dict[str, Any]) -> None: """Updates the target Q Networks.""" super().after_gradient_based_update(timesteps=timesteps) - timestep = timesteps.get(NUM_ENV_STEPS_SAMPLED_LIFETIME, 0) - # TODO (sven): Maybe we should have a `after_gradient_based_update` # method per module? + curr_timestep = self.metrics.peek((ALL_MODULES, NUM_ENV_STEPS_TRAINED_LIFETIME)) for module_id, module in self.module._rl_modules.items(): config = self.config.get_config_for_module(module_id) - # TODO (avnish) Using steps trained here instead of sampled ... I'm not sure - # why the other implementation uses sampled. - # The difference in steps sampled/trained is pretty - # much always going to be larger than self.config.num_epochs * - # self.config.minibatch_buffer_size unless the number of steps collected - # is really small. The thing is that the default rollout fragment length - # is 50, so the minibatch buffer size * num_epochs is going to be - # have to be 50 to even meet the threshold of having delayed target - # updates. - # We should instead have the target / kl threshold update be based off - # of the train_batch_size * some target update frequency * num_epochs. - last_update_ts_key = (module_id, LAST_TARGET_UPDATE_TS) - if timestep - self.metrics.peek( - last_update_ts_key, default=0 - ) >= config.target_network_update_freq and isinstance( - module.unwrapped(), TargetNetworkAPI + if isinstance(module.unwrapped(), TargetNetworkAPI) and ( + curr_timestep - self.metrics.peek(last_update_ts_key, default=0) + >= ( + config.target_network_update_freq + * config.circular_buffer_num_batches + * config.circular_buffer_iterations_per_batch + * config.total_train_batch_size + / (config.num_learners or 1) + ) ): for ( main_net, @@ -123,7 +116,7 @@ def after_gradient_based_update(self, *, timesteps: Dict[str, Any]) -> None: # Increase lifetime target network update counter by one. self.metrics.log_value((module_id, NUM_TARGET_UPDATES), 1, reduce="sum") # Update the (single-value -> window=1) last updated timestep metric. - self.metrics.log_value(last_update_ts_key, timestep, window=1) + self.metrics.log_value(last_update_ts_key, curr_timestep, window=1) if ( config.use_kl_loss From 569f7df9067c5654fb57ba7bc4792b3ba5aaa846 Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Thu, 21 Nov 2024 07:54:01 -0600 Subject: [PATCH 046/107] [serve] Improve access log messages (#48819) ## Why are these changes needed? Improves the Serve access log to include HTTP status information on the replica and better resemble standard log formats like `uvicorn`. I'm updated the log format to: - Include the HTTP method, route and status code for HTTP requests like: "GET / 200". - Use "CALL", the method name, and "OK"/"ERROR" for `DeploymentHandle` calls like: "CALL method_name OK". - Stop logging the `route` information on every message and isolate it to the access log message instead. This adds clutter and it can be tied to the access log message using the request ID. - Stop logging the filename and line number in every log message. This has been on my TODO list for awhile after internal discussion. ### Examples Calling a handle method: ``` INFO 2024-11-20 13:51:44,803 default_D lswbv98w d0cadb74-9fd8-4a68-9e9a-c9ed20b091f4 -- CALL method OK 1.3ms ``` Calling a basic HTTP route: ``` INFO 2024-11-20 13:53:00,197 default_A zosusx8c 26891433-a91a-4d27-b543-0936feb5f5c1 -- GET / 200 4.1ms ``` Calling a wildcard HTTP route (and non-200 status code): ``` INFO 2024-11-20 13:53:58,102 default_A iovmsu5e 85000f14-8e31-42a5-a1f4-0fa2d39c549a -- GET /{wildcard} 422 3.7ms ``` --------- Signed-off-by: Edward Oakes --- python/ray/serve/_private/constants.py | 3 +- python/ray/serve/_private/logging_utils.py | 6 +- python/ray/serve/_private/proxy.py | 2 + .../serve/_private/proxy_request_response.py | 3 +- python/ray/serve/_private/replica.py | 113 ++++++++++++++---- .../serve/tests/test_controller_recovery.py | 2 +- python/ray/serve/tests/test_logging.py | 113 ++++++++++++++++-- .../tests/unit/test_proxy_request_response.py | 2 +- 8 files changed, 197 insertions(+), 47 deletions(-) diff --git a/python/ray/serve/_private/constants.py b/python/ray/serve/_private/constants.py index 8fae61c95329..f4c2adac37d1 100644 --- a/python/ray/serve/_private/constants.py +++ b/python/ray/serve/_private/constants.py @@ -194,9 +194,8 @@ # Logging format with record key to format string dict SERVE_LOG_RECORD_FORMAT = { SERVE_LOG_REQUEST_ID: "%(request_id)s", - SERVE_LOG_ROUTE: "%(route)s", SERVE_LOG_APPLICATION: "%(application)s", - SERVE_LOG_MESSAGE: "%(filename)s:%(lineno)d - %(message)s", + SERVE_LOG_MESSAGE: "-- %(message)s", SERVE_LOG_LEVEL_NAME: "%(levelname)s", SERVE_LOG_TIME: "%(asctime)s", } diff --git a/python/ray/serve/_private/logging_utils.py b/python/ray/serve/_private/logging_utils.py index 207f0574c317..5081829670bc 100644 --- a/python/ray/serve/_private/logging_utils.py +++ b/python/ray/serve/_private/logging_utils.py @@ -141,8 +141,6 @@ def format(self, record: logging.LogRecord) -> str: record_formats_attrs = [] if SERVE_LOG_REQUEST_ID in record.__dict__: record_formats_attrs.append(SERVE_LOG_RECORD_FORMAT[SERVE_LOG_REQUEST_ID]) - if SERVE_LOG_ROUTE in record.__dict__: - record_formats_attrs.append(SERVE_LOG_RECORD_FORMAT[SERVE_LOG_ROUTE]) record_formats_attrs.append(SERVE_LOG_RECORD_FORMAT[SERVE_LOG_MESSAGE]) record_format += " ".join(record_formats_attrs) @@ -153,9 +151,9 @@ def format(self, record: logging.LogRecord) -> str: return formatter.format(record) -def access_log_msg(*, method: str, status: str, latency_ms: float): +def access_log_msg(*, method: str, route: str, status: str, latency_ms: float): """Returns a formatted message for an HTTP or ServeHandle access log.""" - return f"{method.upper()} {status.upper()} {latency_ms:.1f}ms" + return f"{method} {route} {status} {latency_ms:.1f}ms" def log_to_stderr_filter(record: logging.LogRecord) -> bool: diff --git a/python/ray/serve/_private/proxy.py b/python/ray/serve/_private/proxy.py index 54f6908072c6..2b5967a7a75f 100644 --- a/python/ray/serve/_private/proxy.py +++ b/python/ray/serve/_private/proxy.py @@ -454,9 +454,11 @@ async def proxy_request(self, proxy_request: ProxyRequest) -> ResponseGenerator: latency_ms = (time.time() - start_time) * 1000.0 if response_handler_info.should_record_access_log: + request_context = ray.serve.context._serve_request_context.get() logger.info( access_log_msg( method=proxy_request.method, + route=request_context.route, status=str(status.code), latency_ms=latency_ms, ), diff --git a/python/ray/serve/_private/proxy_request_response.py b/python/ray/serve/_private/proxy_request_response.py index 8050c4be215d..0ca2235fd3dd 100644 --- a/python/ray/serve/_private/proxy_request_response.py +++ b/python/ray/serve/_private/proxy_request_response.py @@ -58,7 +58,8 @@ def request_type(self) -> str: @property def method(self) -> str: - return self.scope.get("method", "websocket").upper() + # WebSocket messages don't have a 'method' field. + return self.scope.get("method", "WS").upper() @property def route_path(self) -> str: diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index 3f26136e7bbc..079e3039d5d0 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -11,7 +11,16 @@ from contextlib import contextmanager from functools import wraps from importlib import import_module -from typing import Any, AsyncGenerator, Callable, Dict, Optional, Tuple, Union +from typing import ( + Any, + AsyncGenerator, + Callable, + Dict, + Generator, + Optional, + Tuple, + Union, +) import starlette.responses from starlette.types import ASGIApp, Message @@ -233,12 +242,10 @@ def _add_autoscaling_metrics_point(self) -> None: ) -class ReplicaBase(ABC): - """ - All interaction with the user-provided callable is done via the - `UserCallableWrapper` class. - """ +StatusCodeCallback = Callable[[str], None] + +class ReplicaBase(ABC): def __init__( self, replica_id: ReplicaID, @@ -326,7 +333,7 @@ def _configure_logger_and_profilers( def get_num_ongoing_requests(self): return self._metrics_manager.get_num_ongoing_requests() - def _maybe_get_asgi_route( + def _maybe_get_http_route( self, request_metadata: RequestMetadata, request_args: Tuple[Any] ) -> Optional[str]: """Get the matched route string for ASGI apps to be used in logs & metrics. @@ -359,13 +366,36 @@ def _maybe_get_asgi_route( return route + def _maybe_get_http_method( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ) -> Optional[str]: + """Get the HTTP method to be used in logs & metrics. + + If this is not an HTTP request, returns None. + """ + if request_metadata.is_http_request: + req: StreamingHTTPRequest = request_args[0] + # WebSocket messages don't have a 'method' field. + return req.asgi_scope.get("method", "WS") + + return None + @contextmanager - def _handle_errors_and_metrics(self, request_metadata): + def _handle_errors_and_metrics( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ) -> Generator[StatusCodeCallback, None, None]: start_time = time.time() user_exception = None + + status_code = None + + def _status_code_callback(s: str): + nonlocal status_code + status_code = s + try: self._metrics_manager.inc_num_ongoing_requests() - yield + yield _status_code_callback except asyncio.CancelledError as e: user_exception = e self._on_request_cancelled(request_metadata, e) @@ -384,16 +414,21 @@ def _handle_errors_and_metrics(self, request_metadata): else: status_str = "ERROR" + http_method = self._maybe_get_http_method(request_metadata, request_args) + http_route = request_metadata.route + # Set in _wrap_user_method_call. logger.info( access_log_msg( - method=request_metadata.call_method, - status=status_str, + method=http_method or "CALL", + route=http_route or request_metadata.call_method, + # Prefer the HTTP status code if it was populated. + status=status_code or status_str, latency_ms=latency_ms, ), extra={"serve_access_log": True}, ) self._metrics_manager.record_request_metrics( - route=request_metadata.route, + route=http_route, status_str=status_str, latency_ms=latency_ms, was_error=user_exception is not None, @@ -407,6 +442,7 @@ async def _call_user_generator( request_metadata: RequestMetadata, request_args: Tuple[Any], request_kwargs: Dict[str, Any], + status_code_callback: StatusCodeCallback, ) -> AsyncGenerator[Any, None]: """Calls a user method for a streaming call and yields its results. @@ -432,6 +468,7 @@ def _enqueue_thread_safe(item: Any): ) ) + first_message_peeked = False while True: wait_for_message_task = self._event_loop.create_task( result_queue.wait_for_message() @@ -448,6 +485,16 @@ def _enqueue_thread_safe(item: Any): # and use vanilla pickle (we know it's safe because these messages # only contain primitive Python types). if request_metadata.is_http_request: + # Peek the first ASGI message to determine the status code. + if not first_message_peeked: + msg = messages[0] + first_message_peeked = True + if msg["type"] == "http.response.start": + # HTTP responses begin with exactly one + # "http.response.start" message containing the "status" + # field. Other response types like WebSockets may not. + status_code_callback(str(msg["status"])) + yield pickle.dumps(messages) else: for msg in messages: @@ -472,7 +519,7 @@ def _enqueue_thread_safe(item: Any): wait_for_message_task.cancel() async def handle_request( - self, request_metadata, *request_args, **request_kwargs + self, request_metadata: RequestMetadata, *request_args, **request_kwargs ) -> Tuple[bytes, Any]: with self._wrap_user_method_call(request_metadata, request_args): return await asyncio.wrap_future( @@ -482,18 +529,22 @@ async def handle_request( ) async def handle_request_streaming( - self, request_metadata, *request_args, **request_kwargs + self, request_metadata: RequestMetadata, *request_args, **request_kwargs ) -> AsyncGenerator[Any, None]: - with self._wrap_user_method_call(request_metadata, request_args): + """Generator that is the entrypoint for all `stream=True` handle calls.""" + with self._wrap_user_method_call( + request_metadata, request_args + ) as status_code_callback: async for result in self._call_user_generator( request_metadata, request_args, request_kwargs, + status_code_callback=status_code_callback, ): yield result async def handle_request_with_rejection( - self, request_metadata, *request_args, **request_kwargs + self, request_metadata: RequestMetadata, *request_args, **request_kwargs ): limit = self._deployment_config.max_ongoing_requests num_ongoing_requests = self.get_num_ongoing_requests() @@ -508,7 +559,9 @@ async def handle_request_with_rejection( ) return - with self._wrap_user_method_call(request_metadata, request_args): + with self._wrap_user_method_call( + request_metadata, request_args + ) as status_code_callback: yield ReplicaQueueLengthInfo( accepted=True, # NOTE(edoakes): `_wrap_user_method_call` will increment the number @@ -521,6 +574,7 @@ async def handle_request_with_rejection( request_metadata, request_args, request_kwargs, + status_code_callback=status_code_callback, ): yield result else: @@ -534,7 +588,7 @@ async def handle_request_with_rejection( async def _on_initialized(self): raise NotImplementedError - async def initialize(self, deployment_config): + async def initialize(self, deployment_config: DeploymentConfig): try: # Ensure that initialization is only performed once. # When controller restarts, it will call this method again. @@ -620,7 +674,7 @@ def _on_request_failed(self, request_metadata: RequestMetadata, e: Exception): @contextmanager def _wrap_user_method_call( self, request_metadata: RequestMetadata, request_args: Tuple[Any] - ): + ) -> Generator[StatusCodeCallback, None, None]: pass async def _drain_ongoing_requests(self): @@ -708,16 +762,16 @@ def _on_request_failed(self, request_metadata: RequestMetadata, e: Exception): @contextmanager def _wrap_user_method_call( self, request_metadata: RequestMetadata, request_args: Tuple[Any] - ): + ) -> Generator[StatusCodeCallback, None, None]: """Context manager that wraps user method calls. 1) Sets the request context var with appropriate metadata. 2) Records the access log message (if not disabled). 3) Records per-request metrics via the metrics manager. """ - route = self._maybe_get_asgi_route(request_metadata, request_args) - request_metadata.route = route - + request_metadata.route = self._maybe_get_http_route( + request_metadata, request_args + ) ray.serve.context._serve_request_context.set( ray.serve.context._RequestContext( route=request_metadata.route, @@ -729,8 +783,10 @@ def _wrap_user_method_call( ) ) - with self._handle_errors_and_metrics(request_metadata): - yield + with self._handle_errors_and_metrics( + request_metadata, request_args + ) as status_code_callback: + yield status_code_callback class ReplicaActor: @@ -1338,7 +1394,12 @@ async def call_user_method( ) except Exception: - if request_metadata.is_http_request and asgi_args is not None: + if ( + request_metadata.is_http_request + and asgi_args is not None + # If the callable is an ASGI app, it already sent a 500 status response. + and not is_asgi_app + ): await self._send_user_result_over_asgi( starlette.responses.Response( "Internal Server Error", status_code=500 diff --git a/python/ray/serve/tests/test_controller_recovery.py b/python/ray/serve/tests/test_controller_recovery.py index 56674b9682dc..51d641dbedfc 100644 --- a/python/ray/serve/tests/test_controller_recovery.py +++ b/python/ray/serve/tests/test_controller_recovery.py @@ -487,7 +487,7 @@ def check_proxy_handle_in_controller(): resp = requests.get("http://127.0.0.1:8000") assert resp.status_code == 200 wait_for_condition( - check_log_file, log_file=file_path, expected_regex=['.*"message":.*GET 200.*'] + check_log_file, log_file=file_path, expected_regex=['.*"message":.*GET / 200.*'] ) diff --git a/python/ray/serve/tests/test_logging.py b/python/ray/serve/tests/test_logging.py index b5b723a6d987..061dafd75fe0 100644 --- a/python/ray/serve/tests/test_logging.py +++ b/python/ray/serve/tests/test_logging.py @@ -14,13 +14,15 @@ import pytest import requests import starlette +from fastapi import FastAPI +from starlette.responses import PlainTextResponse import ray import ray.util.state as state_api from ray import serve from ray._private.ray_logging.formatters import JSONFormatter from ray._private.test_utils import wait_for_condition -from ray.serve._private.common import ReplicaID, ServeComponentType +from ray.serve._private.common import DeploymentID, ReplicaID, ServeComponentType from ray.serve._private.constants import SERVE_LOG_EXTRA_FIELDS, SERVE_LOGGER_NAME from ray.serve._private.logging_utils import ( ServeComponentFilter, @@ -97,6 +99,97 @@ def __call__(self): assert rotation_config["backup_count"] == backup_count +def test_http_access_log(serve_instance): + name = "deployment_name" + + fastapi_app = FastAPI() + + @serve.deployment(name=name) + @serve.ingress(fastapi_app) + class Handler: + def __init__(self): + self._replica_unique_id = serve.get_replica_context().replica_id.unique_id + + @fastapi_app.get("/") + def get_root(self): + return PlainTextResponse(self._replica_unique_id) + + @fastapi_app.post("/") + def post_root(self): + return PlainTextResponse(self._replica_unique_id) + + @fastapi_app.get("/{status}") + def template(self, status: str): + return PlainTextResponse(self._replica_unique_id, status_code=int(status)) + + @fastapi_app.put("/fail") + def fail(self): + raise RuntimeError("OOPS!") + + serve.run(Handler.bind()) + + f = io.StringIO() + with redirect_stderr(f): + + def check_log( + replica_id: ReplicaID, + method: str, + route: str, + status_code: str, + fail: bool = False, + ): + s = f.getvalue() + return all( + [ + name in s, + _get_expected_replica_log_content(replica_id) in s, + f"-- {method} {route} {status_code}" in s, + "ms" in s, + ("OOPS!" in s and "RuntimeError" in s) + if fail + else True, # Check for stacktrace. + ] + ) + + r = requests.get("http://localhost:8000/") + assert r.status_code == 200 + replica_id = ReplicaID(unique_id=r.text, deployment_id=DeploymentID(name=name)) + wait_for_condition( + check_log, replica_id=replica_id, method="GET", route="/", status_code="200" + ) + + r = requests.post("http://localhost:8000/") + assert r.status_code == 200 + wait_for_condition( + check_log, + replica_id=replica_id, + method="POST", + route="/", + status_code="200", + ) + + r = requests.get("http://localhost:8000/350") + assert r.status_code == 350 + wait_for_condition( + check_log, + replica_id=replica_id, + method="GET", + route="/{status}", + status_code="350", + ) + + r = requests.put("http://localhost:8000/fail") + assert r.status_code == 500 + wait_for_condition( + check_log, + replica_id=replica_id, + method="PUT", + route="/fail", + status_code="500", + fail=True, + ) + + def test_handle_access_log(serve_instance): name = "handler" @@ -122,7 +215,7 @@ def check_log(replica_id: ReplicaID, method_name: str, fail: bool = False): [ name in s, _get_expected_replica_log_content(replica_id) in s, - method_name.upper() in s, + method_name in s, ("ERROR" if fail else "OK") in s, "ms" in s, ("blah blah blah" in s and "RuntimeError" in s) @@ -288,15 +381,14 @@ def __call__(self, req: starlette.requests.Request): # Check the component log expected_log_infos = [ - f"{resp['request_id']} {resp['route']} replica.py", - f"{resp2['request_id']} {resp2['route']} replica.py", + f"{resp['request_id']} -- ", + f"{resp2['request_id']} -- ", ] # Check User log user_log_regexes = [ - f".*{resp['request_id']} {resp['route']}.* user func.*", - f".*{resp2['request_id']} {resp2['route']}.* user log " - "message from class method.*", + f".*{resp['request_id']} -- user func.*", + f".*{resp2['request_id']} -- user log.*" "message from class method.*", ] def check_log(): @@ -343,12 +435,9 @@ def check_log(): f'"component_name": "replica".*' ) else: - user_method_log_regex = ( - f".*{resp['request_id']} {resp['route']}.* user func.*" - ) + user_method_log_regex = f".*{resp['request_id']} -- user func.*" user_class_method_log_regex = ( - f".*{resp2['request_id']} {resp2['route']}.* " - "user log message from class method.*" + f".*{resp2['request_id']} -- .*" "user log message from class method.*" ) def check_log_file(log_file: str, expected_regex: list): diff --git a/python/ray/serve/tests/unit/test_proxy_request_response.py b/python/ray/serve/tests/unit/test_proxy_request_response.py index 70e2fdb2d581..7b2c4388b657 100644 --- a/python/ray/serve/tests/unit/test_proxy_request_response.py +++ b/python/ray/serve/tests/unit/test_proxy_request_response.py @@ -57,7 +57,7 @@ def test_method(self): """ proxy_request = self.create_asgi_proxy_request(scope={}) assert isinstance(proxy_request, ProxyRequest) - assert proxy_request.method == "WEBSOCKET" + assert proxy_request.method == "WS" method = "fake-method" proxy_request = self.create_asgi_proxy_request(scope={"method": method}) From a2a68b80d39ebe64d526ee1172e0be2a311273a6 Mon Sep 17 00:00:00 2001 From: Kai-Hsun Chen Date: Thu, 21 Nov 2024 11:35:53 -0800 Subject: [PATCH 047/107] [core][autoscaler] Make the head group name consistent with the head Pod's `ray.io/group` label. (#48840) ## Why are these changes needed? The value of the `ray.io/group` label in the head Pod is `headgroup`, whereas `KUBERAY_TYPE_HEAD` is `head-group`. image ## Related issue number ## Checks - [ ] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :( Signed-off-by: kaihsun --- .../kubernetes/configs/ray-cluster.gpu.yaml | 2 +- .../_private/kuberay/autoscaling_config.py | 2 +- .../autoscaler/_private/kuberay/node_provider.py | 14 ++++++-------- .../autoscaler/kuberay/ray-cluster.complete.yaml | 2 +- .../cloud_providers/kuberay/cloud_provider.py | 12 ++++-------- .../ray/autoscaler/v2/tests/test_node_provider.py | 9 +++------ .../ray/tests/kuberay/test_autoscaling_config.py | 4 ++-- .../tests/kuberay/test_kuberay_node_provider.py | 6 +++--- .../ray_v1alpha1_rayservice_template.yaml | 2 +- 9 files changed, 22 insertions(+), 31 deletions(-) diff --git a/doc/source/cluster/kubernetes/configs/ray-cluster.gpu.yaml b/doc/source/cluster/kubernetes/configs/ray-cluster.gpu.yaml index 5a2d01839e9b..b42a7cf10a06 100644 --- a/doc/source/cluster/kubernetes/configs/ray-cluster.gpu.yaml +++ b/doc/source/cluster/kubernetes/configs/ray-cluster.gpu.yaml @@ -12,7 +12,7 @@ spec: ######################headGroupSpec################################# # head group template and specs, (perhaps 'group' is not needed in the name) headGroupSpec: - # logical group name, for this called head-group, also can be functional + # logical group name, for this called headgroup, also can be functional # pod type head or worker # rayNodeType: head # Not needed since it is under the headgroup # the following params are used to complete the ray start: ray start --head --block ... diff --git a/python/ray/autoscaler/_private/kuberay/autoscaling_config.py b/python/ray/autoscaler/_private/kuberay/autoscaling_config.py index d74bb253560a..96aec03744b2 100644 --- a/python/ray/autoscaler/_private/kuberay/autoscaling_config.py +++ b/python/ray/autoscaler/_private/kuberay/autoscaling_config.py @@ -30,7 +30,7 @@ # Logical group name for the KubeRay head group. # Used as the name of the "head node type" by the autoscaler. -_HEAD_GROUP_NAME = "head-group" +_HEAD_GROUP_NAME = "headgroup" class AutoscalingConfigProducer: diff --git a/python/ray/autoscaler/_private/kuberay/node_provider.py b/python/ray/autoscaler/_private/kuberay/node_provider.py index 060e4794867d..5378347ba78a 100644 --- a/python/ray/autoscaler/_private/kuberay/node_provider.py +++ b/python/ray/autoscaler/_private/kuberay/node_provider.py @@ -38,8 +38,6 @@ # Kind label value indicating the pod is the worker. KUBERAY_KIND_WORKER = "worker" -# Group name (node type) to use for the head. -KUBERAY_TYPE_HEAD = "head-group" # KubeRay CRD version KUBERAY_CRD_VER = os.getenv("KUBERAY_CRD_VER", "v1alpha1") @@ -104,12 +102,12 @@ def kind_and_type(pod: Dict[str, Any]) -> Tuple[NodeKind, NodeType]: from a Ray pod's labels. """ labels = pod["metadata"]["labels"] - if labels[KUBERAY_LABEL_KEY_KIND] == KUBERAY_KIND_HEAD: - kind = NODE_KIND_HEAD - type = KUBERAY_TYPE_HEAD - else: - kind = NODE_KIND_WORKER - type = labels[KUBERAY_LABEL_KEY_TYPE] + kind = ( + NODE_KIND_HEAD + if labels[KUBERAY_LABEL_KEY_KIND] == KUBERAY_KIND_HEAD + else NODE_KIND_WORKER + ) + type = labels[KUBERAY_LABEL_KEY_TYPE] return kind, type diff --git a/python/ray/autoscaler/kuberay/ray-cluster.complete.yaml b/python/ray/autoscaler/kuberay/ray-cluster.complete.yaml index f9e1a6cef375..f51ac9d4c242 100644 --- a/python/ray/autoscaler/kuberay/ray-cluster.complete.yaml +++ b/python/ray/autoscaler/kuberay/ray-cluster.complete.yaml @@ -18,7 +18,7 @@ spec: serviceType: ClusterIP # the pod replicas in this group typed head (assuming there could be more than 1 in the future) replicas: 1 - # logical group name, for this called head-group, also can be functional + # logical group name, for this called headgroup, also can be functional # pod type head or worker # rayNodeType: head # Not needed since it is under the headgroup # the following params are used to complete the ray start: ray start --head --block --port=6379 ... diff --git a/python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py b/python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py index c685be58cf60..19e236cb4d19 100644 --- a/python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py +++ b/python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py @@ -55,13 +55,12 @@ def __init__( """ Args: cluster_name: The name of the RayCluster resource. - namespace: The namespace of the RayCluster resource. + provider_config: The namespace of the RayCluster. k8s_api_client: The client to the Kubernetes API server. This could be used to mock the Kubernetes API server for testing. """ self._cluster_name = cluster_name self._namespace = provider_config["namespace"] - self._head_node_type = provider_config["head_node_type"] self._k8s_api_client = k8s_api_client or KubernetesHttpApiClient( namespace=self._namespace @@ -478,26 +477,23 @@ def _fetch_instances(self) -> Dict[CloudInstanceId, CloudInstance]: # Ignore pods marked for termination. continue pod_name = pod["metadata"]["name"] - cloud_instance = self._cloud_instance_from_pod(pod, self._head_node_type) + cloud_instance = self._cloud_instance_from_pod(pod) if cloud_instance: cloud_instances[pod_name] = cloud_instance return cloud_instances @staticmethod - def _cloud_instance_from_pod( - pod: Dict[str, Any], head_node_type: NodeType - ) -> Optional[CloudInstance]: + def _cloud_instance_from_pod(pod: Dict[str, Any]) -> Optional[CloudInstance]: """ Convert a pod to a Ray CloudInstance. Args: pod: The pod resource dict. - head_node_type: The node type of the head node. """ labels = pod["metadata"]["labels"] if labels[KUBERAY_LABEL_KEY_KIND] == KUBERAY_KIND_HEAD: kind = NodeKind.HEAD - type = head_node_type + type = labels[KUBERAY_LABEL_KEY_TYPE] elif labels[KUBERAY_LABEL_KEY_KIND] == KUBERAY_KIND_WORKER: kind = NodeKind.WORKER type = labels[KUBERAY_LABEL_KEY_TYPE] diff --git a/python/ray/autoscaler/v2/tests/test_node_provider.py b/python/ray/autoscaler/v2/tests/test_node_provider.py index 5141891c0a36..02d84e376b8d 100644 --- a/python/ray/autoscaler/v2/tests/test_node_provider.py +++ b/python/ray/autoscaler/v2/tests/test_node_provider.py @@ -19,10 +19,7 @@ AUTOSCALER_MAX_LAUNCH_BATCH, ) from ray.autoscaler._private.fake_multi_node.node_provider import FakeMultiNodeProvider -from ray.autoscaler._private.kuberay.node_provider import ( - KUBERAY_TYPE_HEAD, - IKubernetesHttpApiClient, -) +from ray.autoscaler._private.kuberay.node_provider import IKubernetesHttpApiClient from ray.autoscaler.v2.instance_manager.cloud_providers.kuberay.cloud_provider import ( KubeRayProvider, ) @@ -372,7 +369,7 @@ def setUp(self): cluster_name="test", provider_config={ "namespace": "default", - "head_node_type": KUBERAY_TYPE_HEAD, + "head_node_type": "headgroup", }, k8s_api_client=self.mock_client, ) @@ -389,7 +386,7 @@ def test_get_nodes(self): "raycluster-autoscaler-head-8zsc8": CloudInstance( cloud_instance_id="raycluster-autoscaler-head-8zsc8", node_kind=NodeKind.HEAD, - node_type="head-group", + node_type="headgroup", is_running=True, ), # up-to-date status because the Ray container is in running status "raycluster-autoscaler-worker-small-group-dkz2r": CloudInstance( diff --git a/python/ray/tests/kuberay/test_autoscaling_config.py b/python/ray/tests/kuberay/test_autoscaling_config.py index 10fea2fb3b4e..6ae9ab7fc4fe 100644 --- a/python/ray/tests/kuberay/test_autoscaling_config.py +++ b/python/ray/tests/kuberay/test_autoscaling_config.py @@ -69,7 +69,7 @@ def _get_basic_autoscaling_config() -> dict: "type": "kuberay", }, "available_node_types": { - "head-group": { + "headgroup": { "max_workers": 0, "min_workers": 0, "node_config": {}, @@ -125,7 +125,7 @@ def _get_basic_autoscaling_config() -> dict: "cluster_synced_files": [], "file_mounts": {}, "file_mounts_sync_continuously": False, - "head_node_type": "head-group", + "head_node_type": "headgroup", "head_setup_commands": [], "head_start_ray_commands": [], "idle_timeout_minutes": 1.0, diff --git a/python/ray/tests/kuberay/test_kuberay_node_provider.py b/python/ray/tests/kuberay/test_kuberay_node_provider.py index 4d2f94c1d086..3d5ea52009cb 100644 --- a/python/ray/tests/kuberay/test_kuberay_node_provider.py +++ b/python/ray/tests/kuberay/test_kuberay_node_provider.py @@ -126,7 +126,7 @@ def test_create_node_cap_at_max( { "raycluster-autoscaler-head-8zsc8": NodeData( kind="head", - type="head-group", + type="headgroup", replica_index=None, ip="10.4.2.6", status="up-to-date", @@ -149,7 +149,7 @@ def test_create_node_cap_at_max( { "raycluster-autoscaler-head-8zsc8": NodeData( kind="head", - type="head-group", + type="headgroup", replica_index=None, ip="10.4.2.6", status="up-to-date", @@ -217,7 +217,7 @@ def mock_get(node_provider, path): { "raycluster-autoscaler-head-8zsc8": NodeData( kind="head", - type="head-group", + type="headgroup", replica_index=None, ip="10.4.2.6", status="up-to-date", diff --git a/release/k8s_tests/ray_v1alpha1_rayservice_template.yaml b/release/k8s_tests/ray_v1alpha1_rayservice_template.yaml index c47e9572ceb1..7bed4fc1066c 100644 --- a/release/k8s_tests/ray_v1alpha1_rayservice_template.yaml +++ b/release/k8s_tests/ray_v1alpha1_rayservice_template.yaml @@ -126,7 +126,7 @@ spec: serviceType: ClusterIP # the pod replicas in this group typed head (assuming there could be more than 1 in the future) replicas: 1 - # logical group name, for this called head-group, also can be functional + # logical group name, for this called headgroup, also can be functional # pod type head or worker # rayNodeType: head # Not needed since it is under the headgroup # the following params are used to complete the ray start: ray start --head --block --redis-port=6379 ... From bc8ae37e5f33491d368ebd09d02cee38a9d802bd Mon Sep 17 00:00:00 2001 From: Saihajpreet Singh Date: Thu, 21 Nov 2024 20:09:15 +0000 Subject: [PATCH 048/107] docs: fine tune llama with trainium (#48768) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce a new Ray Train example for AWS Trainium. ![CleanShot 2024-11-16 at 12 48 57@2x](https://github.com/user-attachments/assets/8b7d12d8-846f-497f-ba25-fd8a613f9007) Marked it as a community example as it is something we are collaborating with AWS Neuron team. ![CleanShot 2024-11-16 at 12 48 37@2x](https://github.com/user-attachments/assets/589d8ff3-fcb6-4b90-865d-006bcb4815a3) Docs screenshots Screenshot 2024-11-20 at 11 19 39 AM Screenshot 2024-11-20 at 11 19 47 AM Screenshot 2024-11-20 at 11 19 54 AM --------- Signed-off-by: Saihajpreet Singh Co-authored-by: Chris Zhang --- doc/source/custom_directives.py | 1 + doc/source/train/examples.yml | 12 +- .../train/examples/aws-trainium/llama3.rst | 103 ++++++++++++++++++ 3 files changed, 115 insertions(+), 1 deletion(-) create mode 100644 doc/source/train/examples/aws-trainium/llama3.rst diff --git a/doc/source/custom_directives.py b/doc/source/custom_directives.py index 6e81d401c833..2683160332d7 100644 --- a/doc/source/custom_directives.py +++ b/doc/source/custom_directives.py @@ -481,6 +481,7 @@ def key(cls: type) -> str: class Framework(ExampleEnum): """Framework type for example metadata.""" + AWSNEURON = "AWS Neuron" PYTORCH = "PyTorch" LIGHTNING = "Lightning" TRANSFORMERS = "Transformers" diff --git a/doc/source/train/examples.yml b/doc/source/train/examples.yml index 8b4f1c7cf8f2..0e7f6725e100 100644 --- a/doc/source/train/examples.yml +++ b/doc/source/train/examples.yml @@ -119,7 +119,17 @@ examples: contributor: community link: examples/intel_gaudi/llama_pretrain - - title: Fine-tune a Llama-2 text generation models with DeepSpeed and Hugging Face Accelerate + - title: Fine-tune Llama3.1 with AWS Trainium + frameworks: + - pytorch + - aws neuron + skill_level: advanced + use_cases: + - natural language processing + - large language models + contributor: community + link: examples/aws-trainium/llama3 + - title: Fine-tune a Llama-2 text generation model with DeepSpeed and Hugging Face Accelerate frameworks: - accelerate - deepspeed diff --git a/doc/source/train/examples/aws-trainium/llama3.rst b/doc/source/train/examples/aws-trainium/llama3.rst new file mode 100644 index 000000000000..ee7b89faf39e --- /dev/null +++ b/doc/source/train/examples/aws-trainium/llama3.rst @@ -0,0 +1,103 @@ +:orphan: + +Distributed fine-tuning of Llama 3.1 8B on AWS Trainium with Ray and PyTorch Lightning +====================================================================================== + + +This example demonstrates how to fine-tune the `Llama 3.1 8B `__ model on `AWS +Trainium `__ instances using Ray Train, PyTorch Lightning, and AWS Neuron SDK. + +AWS Trainium is the machine learning (ML) chip that AWS built for deep +learning (DL) training of 100B+ parameter models. `AWS Neuron +SDK `__ helps +developers train models on Trainium accelerators. + +Prepare the environment +----------------------- + +See `Setup EKS cluster and tools `__ for setting up an Amazon EKS cluster leveraging AWS Trainium instances. + +Create a Docker image +--------------------- +When the EKS cluster is ready, create an Amazon ECR repository for building and uploading the Docker image containing artifacts for fine-tuning a Llama3.1 8B model: + +1. Clone the repo. + +:: + + git clone https://github.com/aws-neuron/aws-neuron-eks-samples.git + +2. Go to the ``llama3.1_8B_finetune_ray_ptl_neuron`` directory. + +:: + + cd aws-neuron-eks-samples/llama3.1_8B_finetune_ray_ptl_neuron + +3. Trigger the script. + +:: + + chmod +x 0-kuberay-trn1-llama3-finetune-build-image.sh + ./0-kuberay-trn1-llama3-finetune-build-image.sh + +4. Enter the zone your cluster is running in, for example: us-east-2. + +5. Verify in the AWS console that the Amazon ECR service has the newly + created ``kuberay_trn1_llama3.1_pytorch2`` repository. + +6. Update the ECR image ARN in the manifest file used for creating the Ray cluster. + +Replace the and placeholders with actual values in the ``1-llama3-finetune-trn1-create-raycluster.yaml`` file using commands below to reflect the ECR image ARN created above: + +:: + + export AWS_ACCOUNT_ID= # for ex: 111222333444 + export REGION= # for ex: us-east-2 + sed -i "s//$AWS_ACCOUNT_ID/g" 1-llama3-finetune-trn1-create-raycluster.yaml + sed -i "s//$REGION/g" 1-llama3-finetune-trn1-create-raycluster.yaml + +Configuring Ray Cluster +----------------------- + +The ``llama3.1_8B_finetune_ray_ptl_neuron`` directory in the AWS Neuron samples repository simplifies the +Ray configuration. KubeRay provides a manifest that you can apply +to the cluster to set up the head and worker pods. + +Run the following command to set up the Ray cluster: + +:: + + kubectl apply -f 1-llama3-finetune-trn1-create-raycluster.yaml + + +Accessing Ray Dashboard +----------------------- +Port forward from the cluster to see the state of the Ray dashboard and +then view it on `http://localhost:8265 `__. +Run it in the background with the following command: + +:: + + kubectl port-forward service/kuberay-trn1-head-svc 8265:8265 & + +Launching Ray Jobs +------------------ + +The Ray cluster now ready to handle workloads. Initiate the data preparation and fine-tuning Ray jobs: + +1. Launch the Ray job for downloading the dolly-15k dataset and the Llama3.1 8B model artifacts: + +:: + + kubectl apply -f 2-llama3-finetune-trn1-rayjob-create-data.yaml + +2. When the job has executed successfully, run the following fine-tuning job: + +:: + + kubectl apply -f 3-llama3-finetune-trn1-rayjob-submit-finetuning-job.yaml + +3. Monitor the jobs via the Ray Dashboard + + +For detailed information on each of the steps above, see the `AWS documentation link `__. \ No newline at end of file From 8a0f8109161382a94ee5d733bc392a521d9f4cba Mon Sep 17 00:00:00 2001 From: zhilong <121425509+Bye-legumes@users.noreply.github.com> Date: Thu, 21 Nov 2024 16:41:55 -0500 Subject: [PATCH 049/107] [Data] Fix pandas memory calculation. (#46939) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Why are these changes needed? close https://github.com/ray-project/ray/issues/46785 Current the memory usage for pandas is not accurate when it's object, so we just implement to calculated it in recursion in case of nested. ## Related issue number closes https://github.com/ray-project/ray/issues/46785, closes https://github.com/ray-project/ray/issues/48506 ## Checks - [√] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [√] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [√] Unit tests - [ ] Release tests - [ ] This PR is not tested :( --------- Signed-off-by: zhilong Signed-off-by: Richard Liaw Co-authored-by: Richard Liaw --- python/ray/data/_internal/pandas_block.py | 70 ++++++- python/ray/data/tests/test_pandas_block.py | 229 ++++++++++++++++++++- 2 files changed, 296 insertions(+), 3 deletions(-) diff --git a/python/ray/data/_internal/pandas_block.py b/python/ray/data/_internal/pandas_block.py index 119469b46c1b..ff1686c1b355 100644 --- a/python/ray/data/_internal/pandas_block.py +++ b/python/ray/data/_internal/pandas_block.py @@ -1,5 +1,6 @@ import collections import heapq +import sys from typing import ( TYPE_CHECKING, Any, @@ -294,7 +295,74 @@ def num_rows(self) -> int: return self._table.shape[0] def size_bytes(self) -> int: - return int(self._table.memory_usage(index=True, deep=True).sum()) + from pandas.api.types import is_object_dtype + + from ray.data.extensions import TensorArrayElement, TensorDtype + + pd = lazy_import_pandas() + + def get_deep_size(obj): + """Calculates the memory size of objects, + including nested objects using an iterative approach.""" + seen = set() + total_size = 0 + objects = collections.deque([obj]) + while objects: + current = objects.pop() + + # Skip interning-eligible immutable objects + if isinstance(current, (str, bytes, int, float)): + size = sys.getsizeof(current) + total_size += size + continue + + # Check if the object has been seen before + if id(current) in seen: + continue + seen.add(id(current)) + + try: + size = sys.getsizeof(current) + except TypeError: + size = 0 + total_size += size + + # Handle specific cases + if isinstance(current, np.ndarray): + total_size += current.nbytes - size # Avoid double counting + elif isinstance(current, pd.DataFrame): + total_size += ( + current.memory_usage(index=True, deep=True).sum() - size + ) + elif isinstance(current, (list, tuple, set)): + objects.extend(current) + elif isinstance(current, dict): + objects.extend(current.keys()) + objects.extend(current.values()) + elif isinstance(current, TensorArrayElement): + objects.extend(current.to_numpy()) + return total_size + + # Get initial memory usage including deep introspection + memory_usage = self._table.memory_usage(index=True, deep=True) + + # TensorDtype for ray.air.util.tensor_extensions.pandas.TensorDtype + object_need_check = (TensorDtype,) + # Handle object columns separately + for column in self._table.columns: + # Check pandas object dtype and the extenstion dtype + if is_object_dtype(self._table[column].dtype) or isinstance( + self._table[column].dtype, object_need_check + ): + column_memory = 0 + for element in self._table[column]: + column_memory += get_deep_size(element) + memory_usage[column] = column_memory + + # Sum up total memory usage + total_memory_usage = memory_usage.sum() + + return int(total_memory_usage) def _zip(self, acc: BlockAccessor) -> "pandas.DataFrame": r = self.to_pandas().copy(deep=False) diff --git a/python/ray/data/tests/test_pandas_block.py b/python/ray/data/tests/test_pandas_block.py index 4585d0e2a133..5e83abbf04d7 100644 --- a/python/ray/data/tests/test_pandas_block.py +++ b/python/ray/data/tests/test_pandas_block.py @@ -1,4 +1,10 @@ +import pickle +import random +import sys + +import numpy as np import pandas as pd +import pyarrow as pa import pytest import ray @@ -48,7 +54,226 @@ def fn2(batch): assert isinstance(block, pd.DataFrame) -if __name__ == "__main__": - import sys +class TestSizeBytes: + def test_small(ray_start_regular_shared): + animals = ["Flamingo", "Centipede"] + block = pd.DataFrame({"animals": animals}) + + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + # check that memory usage is within 10% of the size_bytes + # For strings, Pandas seems to be fairly accurate, so let's use that. + memory_usage = block.memory_usage(index=True, deep=True).sum() + assert bytes_size == pytest.approx(memory_usage, rel=0.1), ( + bytes_size, + memory_usage, + ) + + def test_large_str(ray_start_regular_shared): + animals = [ + random.choice(["alligator", "crocodile", "centipede", "flamingo"]) + for i in range(100_000) + ] + block = pd.DataFrame({"animals": animals}) + block["animals"] = block["animals"].astype("string") + + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + memory_usage = block.memory_usage(index=True, deep=True).sum() + assert bytes_size == pytest.approx(memory_usage, rel=0.1), ( + bytes_size, + memory_usage, + ) + + def test_large_str_object(ray_start_regular_shared): + """Note - this test breaks if you refactor/move the list of animals.""" + num = 100_000 + animals = [ + random.choice(["alligator", "crocodile", "centipede", "flamingo"]) + for i in range(num) + ] + block = pd.DataFrame({"animals": animals}) + + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + memory_usage = sum([sys.getsizeof(animal) for animal in animals]) + + assert bytes_size == pytest.approx(memory_usage, rel=0.1), ( + bytes_size, + memory_usage, + ) + + def test_large_floats(ray_start_regular_shared): + animals = [random.random() for i in range(100_000)] + block = pd.DataFrame({"animals": animals}) + + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + memory_usage = pickle.dumps(block).__sizeof__() + # check that memory usage is within 10% of the size_bytes + assert bytes_size == pytest.approx(memory_usage, rel=0.1), ( + bytes_size, + memory_usage, + ) + + def test_bytes_object(ray_start_regular_shared): + def generate_data(batch): + for _ in range(8): + yield {"data": [[b"\x00" * 128 * 1024 * 128]]} + + ds = ( + ray.data.range(1, override_num_blocks=1) + .map_batches(generate_data, batch_size=1) + .map_batches(lambda batch: batch, batch_format="pandas") + ) + + true_value = 128 * 1024 * 128 * 8 + for bundle in ds.iter_internal_ref_bundles(): + size = bundle.size_bytes() + # assert that true_value is within 10% of bundle.size_bytes() + assert size == pytest.approx(true_value, rel=0.1), ( + size, + true_value, + ) + + def test_nested_numpy(ray_start_regular_shared): + size = 1024 + rows = 1_000 + data = [ + np.random.randint(size=size, low=0, high=100, dtype=np.int8) + for _ in range(rows) + ] + df = pd.DataFrame({"data": data}) + + block_accessor = PandasBlockAccessor.for_block(df) + block_size = block_accessor.size_bytes() + true_value = rows * size + assert block_size == pytest.approx(true_value, rel=0.1), ( + block_size, + true_value, + ) + + def test_nested_objects(ray_start_regular_shared): + size = 10 + rows = 10_000 + lists = [[random.randint(0, 100) for _ in range(size)] for _ in range(rows)] + data = {"lists": lists} + block = pd.DataFrame(data) + + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + # List overhead + 10 integers per list + true_size = rows * ( + sys.getsizeof([random.randint(0, 100) for _ in range(size)]) + size * 28 + ) + + assert bytes_size == pytest.approx(true_size, rel=0.1), ( + bytes_size, + true_size, + ) + + def test_mixed_types(ray_start_regular_shared): + rows = 10_000 + + data = { + "integers": [random.randint(0, 100) for _ in range(rows)], + "floats": [random.random() for _ in range(rows)], + "strings": [ + random.choice(["apple", "banana", "cherry"]) for _ in range(rows) + ], + "object": [b"\x00" * 128 for _ in range(rows)], + } + block = pd.DataFrame(data) + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + # Manually calculate the size + int_size = rows * 8 + float_size = rows * 8 + str_size = sum(sys.getsizeof(string) for string in data["strings"]) + object_size = rows * sys.getsizeof(b"\x00" * 128) + + true_size = int_size + float_size + str_size + object_size + assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size) + + def test_nested_lists_strings(ray_start_regular_shared): + rows = 5_000 + nested_lists = ["a"] * 3 + ["bb"] * 4 + ["ccc"] * 3 + data = { + "nested_lists": [nested_lists for _ in range(rows)], + } + block = pd.DataFrame(data) + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + # Manually calculate the size + list_overhead = sys.getsizeof(block["nested_lists"].iloc[0]) + sum( + [sys.getsizeof(x) for x in nested_lists] + ) + true_size = rows * list_overhead + assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size) + + @pytest.mark.parametrize("size", [10, 1024]) + def test_multi_level_nesting(ray_start_regular_shared, size): + rows = 1_000 + data = { + "complex": [ + {"list": [np.random.rand(size)], "value": {"key": "val"}} + for _ in range(rows) + ], + } + block = pd.DataFrame(data) + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + numpy_size = np.random.rand(size).nbytes + + values = ["list", "value", "key", "val"] + str_size = sum([sys.getsizeof(v) for v in values]) + + list_ref_overhead = sys.getsizeof([np.random.rand(size)]) + + dict_overhead1 = sys.getsizeof({"key": "val"}) + + dict_overhead3 = sys.getsizeof( + {"list": [np.random.rand(size)], "value": {"key": "val"}} + ) + + true_size = ( + numpy_size + str_size + list_ref_overhead + dict_overhead1 + dict_overhead3 + ) * rows + assert bytes_size == pytest.approx(true_size, rel=0.15), ( + bytes_size, + true_size, + ) + + def test_boolean(ray_start_regular_shared): + data = [random.choice([True, False, None]) for _ in range(100_000)] + block = pd.DataFrame({"flags": pd.Series(data, dtype="boolean")}) + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + # No object case + true_size = block.memory_usage(index=True, deep=True).sum() + assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size) + + def test_arrow(ray_start_regular_shared): + data = [ + random.choice(["alligator", "crocodile", "flamingo"]) for _ in range(50_000) + ] + arrow_dtype = pd.ArrowDtype(pa.string()) + block = pd.DataFrame({"animals": pd.Series(data, dtype=arrow_dtype)}) + block_accessor = PandasBlockAccessor.for_block(block) + bytes_size = block_accessor.size_bytes() + + true_size = block.memory_usage(index=True, deep=True).sum() + assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size) + + +if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__])) From 9482343f845a484ed541846fe5c26e4082aef837 Mon Sep 17 00:00:00 2001 From: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Date: Thu, 21 Nov 2024 13:54:34 -0800 Subject: [PATCH 050/107] [core] Fix test_output::test_disable_driver_logs_breakpoint (#48820) Signed-off-by: Rui Qiao --- python/ray/tests/test_output.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/python/ray/tests/test_output.py b/python/ray/tests/test_output.py index 4b84b8285534..505a02e84226 100644 --- a/python/ray/tests/test_output.py +++ b/python/ray/tests/test_output.py @@ -575,26 +575,27 @@ def test_disable_driver_logs_breakpoint(): @ray.remote def f(): while True: - start_time = time.time() - while time.time() - start_time < 1: + start_time = time.monotonic() + while time.monotonic() - start_time < 1: time.sleep(0.1) + print(f"slept {time.monotonic() - start_time} seconds") print("hello there") sys.stdout.flush() def kill(): - start_time = time.time() - while time.time() - start_time < 5: + start_time = time.monotonic() + while time.monotonic() - start_time < 5: time.sleep(0.1) sys.stdout.flush() - start_time = time.time() - while time.time() - start_time < 1: + start_time = time.monotonic() + while time.monotonic() - start_time < 1: time.sleep(0.1) os._exit(0) t = threading.Thread(target=kill) t.start() x = f.remote() -time.sleep(2) # Enough time to print one hello. +time.sleep(3) # Enough time to print one hello. breakpoint() # This should disable worker logs. """ @@ -602,7 +603,7 @@ def kill(): out_str = proc.stdout.read().decode("ascii") num_hello = out_str.count("hello") assert num_hello >= 1, out_str - assert num_hello < 3, out_str + assert num_hello <= 3, out_str assert "Temporarily disabling Ray worker logs" in out_str, out_str # TODO(ekl) nice to test resuming logs too, but it's quite complicated From 8e6d1102746e8635cbc6edcc28dc4d95031c2808 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Thu, 21 Nov 2024 15:58:41 -0600 Subject: [PATCH 051/107] [Data] Move release test config to separate file (#48852) Storing all of the Data release test configs in a separate file makes it easier to maintain the Data release tests. Signed-off-by: Balaji Veeramani --- doc/source/templates/README.md | 2 +- release/ray_release/config.py | 5 + release/ray_release/scripts/build_pipeline.py | 7 +- .../ray_release/scripts/get_test_summary.py | 7 +- release/ray_release/scripts/ray_bisect.py | 7 +- .../ray_release/scripts/run_release_test.py | 3 +- release/ray_release/tests/test_config.py | 1 + release/release_data_tests.yaml | 910 ++++++++++++++++++ release/release_tests.yaml | 910 ------------------ 9 files changed, 934 insertions(+), 918 deletions(-) create mode 100644 release/release_data_tests.yaml diff --git a/doc/source/templates/README.md b/doc/source/templates/README.md index 912d3174c75f..306b31bc3dc8 100644 --- a/doc/source/templates/README.md +++ b/doc/source/templates/README.md @@ -32,7 +32,7 @@ To add a template: Your template does not need to be a Jupyter notebook. It can also be presented as a Python script with `README` instructions of how to run. -2. Add a release test for the template in `release/release_tests.yaml` (for both AWS and GCE). +2. Add a release test for the template in `release/release_tests.yaml` (for both AWS and GCE). For Data tests, use `release/release_data_tests.yaml` instead. See the section on workspace templates for an example. Note that the cluster env and compute config are a little different for release tests. Use the files in the diff --git a/release/ray_release/config.py b/release/ray_release/config.py index 93b69e56601c..884292545f1d 100644 --- a/release/ray_release/config.py +++ b/release/ray_release/config.py @@ -38,6 +38,11 @@ RELEASE_TEST_SCHEMA_FILE = bazel_runfile("release/ray_release/schema.json") +RELEASE_TEST_CONFIG_FILES = [ + "release/release_tests.yaml", + "release/release_data_tests.yaml", +] + def read_and_validate_release_test_collection( config_files: List[str], diff --git a/release/ray_release/scripts/build_pipeline.py b/release/ray_release/scripts/build_pipeline.py index 735ddb2e8cdf..29e448d8f4fd 100644 --- a/release/ray_release/scripts/build_pipeline.py +++ b/release/ray_release/scripts/build_pipeline.py @@ -14,7 +14,10 @@ build_anyscale_base_byod_images, build_anyscale_custom_byod_image, ) -from ray_release.config import read_and_validate_release_test_collection +from ray_release.config import ( + read_and_validate_release_test_collection, + RELEASE_TEST_CONFIG_FILES, +) from ray_release.configs.global_config import init_global_config from ray_release.exception import ReleaseTestCLIError, ReleaseTestConfigError from ray_release.logger import logger @@ -92,7 +95,7 @@ def main( try: test_collection = read_and_validate_release_test_collection( - test_collection_file or ["release/release_tests.yaml"] + test_collection_file or RELEASE_TEST_CONFIG_FILES ) except ReleaseTestConfigError as e: raise ReleaseTestConfigError( diff --git a/release/ray_release/scripts/get_test_summary.py b/release/ray_release/scripts/get_test_summary.py index b4793a860d06..ccb5066713b2 100644 --- a/release/ray_release/scripts/get_test_summary.py +++ b/release/ray_release/scripts/get_test_summary.py @@ -5,7 +5,10 @@ import click from ray_release.buildkite.concurrency import get_test_resources -from ray_release.config import read_and_validate_release_test_collection +from ray_release.config import ( + read_and_validate_release_test_collection, + RELEASE_TEST_CONFIG_FILES, +) @click.command() @@ -26,7 +29,7 @@ def main(test_collection_file: Optional[str] = None, output: Optional[str] = Non output = output or os.path.join(os.path.dirname(__file__), "test_summary.csv") tests = read_and_validate_release_test_collection( - test_collection_file or ["release/release_tests.yaml"] + test_collection_file or RELEASE_TEST_CONFIG_FILES ) with open(output, "w") as f: diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index 46d3c521bb93..15ebd4c9d396 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -14,7 +14,10 @@ build_anyscale_base_byod_images, build_anyscale_custom_byod_image, ) -from ray_release.config import read_and_validate_release_test_collection +from ray_release.config import ( + read_and_validate_release_test_collection, + RELEASE_TEST_CONFIG_FILES, +) from ray_release.configs.global_config import init_global_config from ray_release.test import Test from ray_release.test_automation.release_state_machine import ReleaseTestStateMachine @@ -241,7 +244,7 @@ def _obtain_test_result( def _get_test(test_name: str, test_collection_file: Tuple[str]) -> Test: test_collection = read_and_validate_release_test_collection( - test_collection_file or ["release/release_tests.yaml"], + test_collection_file or RELEASE_TEST_CONFIG_FILES, ) return [test for test in test_collection if test["name"] == test_name][0] diff --git a/release/ray_release/scripts/run_release_test.py b/release/ray_release/scripts/run_release_test.py index 13830139554a..e03912b9f681 100644 --- a/release/ray_release/scripts/run_release_test.py +++ b/release/ray_release/scripts/run_release_test.py @@ -9,6 +9,7 @@ as_smoke_test, find_test, read_and_validate_release_test_collection, + RELEASE_TEST_CONFIG_FILES, ) from ray_release.configs.global_config import init_global_config from ray_release.env import DEFAULT_ENVIRONMENT, load_environment, populate_os_env @@ -114,7 +115,7 @@ def main( ) init_global_config(global_config_file) test_collection = read_and_validate_release_test_collection( - test_collection_file or ["release/release_tests.yaml"], + test_collection_file or RELEASE_TEST_CONFIG_FILES, test_definition_root, ) test = find_test(test_collection, test_name) diff --git a/release/ray_release/tests/test_config.py b/release/ray_release/tests/test_config.py index f922d6512d99..9b1f5adbb929 100644 --- a/release/ray_release/tests/test_config.py +++ b/release/ray_release/tests/test_config.py @@ -14,6 +14,7 @@ _TEST_COLLECTION_FILES = [ "release/release_tests.yaml", + "release/release_data_tests.yaml", "release/ray_release/tests/test_collection_data.yaml", ] diff --git a/release/release_data_tests.yaml b/release/release_data_tests.yaml new file mode 100644 index 000000000000..9b3c9d3bd7ac --- /dev/null +++ b/release/release_data_tests.yaml @@ -0,0 +1,910 @@ +############### +# Dataset tests +############### + +- name: parquet_metadata_resolution + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: single_node_benchmark_compute.yaml + + run: + # Expect the test to finish around 40 seconds. + timeout: 100 + script: python parquet_metadata_resolution.py --num-files 915 --cloud aws + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + run: + script: python parquet_metadata_resolution.py --num-files 915 --cloud gcp + +- name: stable_diffusion_benchmark + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + post_build_script: byod_stable_diffusion.sh + cluster_compute: stable_diffusion_benchmark_compute.yaml + + run: + timeout: 1800 + script: python stable_diffusion_benchmark.py + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: stable_diffusion_benchmark_compute_gce.yaml + +- name: streaming_data_ingest_benchmark_1tb + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: data_ingest_benchmark_compute.yaml + + run: + timeout: 300 + script: python data_ingest_benchmark.py --dataset-size-gb=1000 --num-workers=20 --streaming + wait_for_nodes: + num_nodes: 20 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: data_ingest_benchmark_compute_gce.yaml + +- name: streaming_data_ingest_benchmark_100gb_gpu + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: data_ingest_benchmark_compute_gpu.yaml + + run: + timeout: 300 + script: python data_ingest_benchmark.py --dataset-size-gb=100 --num-workers=4 --streaming --use-gpu + wait_for_nodes: + num_nodes: 3 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml + +# This test case will early stop the data ingestion iteration on the GPU actors. +# This is a common usage in PyTorch Lightning +# (https://lightning.ai/docs/pytorch/stable/common/trainer.html#limit-train-batches). +# There was a bug in Ray Data that caused GPU memoy leak (see #3.919). +# We add this test case to cover this scenario. +- name: streaming_data_ingest_benchmark_100gb_gpu_early_stop + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: data_ingest_benchmark_compute_gpu.yaml + + run: + timeout: 300 + script: python data_ingest_benchmark.py --dataset-size-gb=100 --num-workers=4 --streaming --use-gpu --early-stop + wait_for_nodes: + num_nodes: 3 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml + +- name: aggregate_benchmark + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: single_node_benchmark_compute.yaml + + run: + timeout: 1800 + script: python aggregate_benchmark.py + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + +- name: read_parquet_benchmark_single_node + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: single_node_benchmark_compute.yaml + + run: + # Expect the benchmark to finish in 400 seconds. + timeout: 400 + script: python read_parquet_benchmark.py + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + +- name: read_images_benchmark_single_node + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: single_node_benchmark_compute.yaml + + run: + timeout: 1800 + script: python read_images_benchmark.py --single-node + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + +# TODO: Re-enable this test once we fix https://github.com/ray-project/ray/issues/40686. +# - name: read_images_benchmark_multi_node +# group: data-tests +# working_dir: nightly_tests/dataset + +# frequency: nightly-3x +# team: data + +# cluster: +# byod: +# type: gpu +# cluster_compute: multi_node_read_images_benchmark_compute.yaml + +# run: +# timeout: 28800 +# script: python read_images_benchmark.py --multi-node + +# variations: +# - __suffix__: aws +# - __suffix__: gce +# env: gce +# frequency: manual +# cluster: +# cluster_compute: multi_node_read_images_benchmark_compute_gce.yaml + +- name: read_images_comparison_microbenchmark_single_node + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: single_worker_node_0_head_node_benchmark_compute.yaml + + run: + timeout: 1800 + script: bash run_image_loader_microbenchmark.sh + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + +- name: read_images_train_4_gpu + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: multi_node_train_4_workers.yaml + + run: + timeout: 18000 + script: python multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 2 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml + +- name: read_images_train_4_gpu_worker_chaos + group: data-tests + working_dir: nightly_tests + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: dataset/multi_node_train_4_workers.yaml + + run: + timeout: 18000 + prepare: python setup_chaos.py --kill-workers --kill-interval 100 --max-to-kill 3 --task-names "ReadImage->Map(wnid_to_index)->Map(crop_and_flip_image)" + script: python dataset/multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 1 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: ../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml + +- name: read_images_train_4_gpu_node_chaos + group: data-tests + working_dir: nightly_tests + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: dataset/multi_node_train_4_workers.yaml + + run: + timeout: 18000 + prepare: python setup_chaos.py --kill-interval 200 --max-to-kill 1 --task-names "_RayTrainWorker__execute.get_next" + script: python dataset/multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 1 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: ../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml + +- name: read_images_train_16_gpu + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: multi_node_train_16_workers.yaml + + run: + timeout: 18000 + script: python multi_node_train_benchmark.py --num-workers 16 --file-type image --use-gpu --num-epochs 2 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml + +- name: read_images_train_16_gpu_preserve_order + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: multi_node_train_16_workers.yaml + + run: + timeout: 18000 + script: python multi_node_train_benchmark.py --num-workers 16 --file-type image --preserve-order --use-gpu --num-epochs 2 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml + +- name: read_parquet_train_4_gpu + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: multi_node_train_4_workers.yaml + + run: + timeout: 3600 + script: python multi_node_train_benchmark.py --num-workers 4 --file-type parquet --target-worker-gb 50 --use-gpu + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml + +- name: read_parquet_train_16_gpu + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: multi_node_train_16_workers.yaml + + run: + timeout: 3600 + script: python multi_node_train_benchmark.py --num-workers 16 --file-type parquet --target-worker-gb 50 --use-gpu + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml + +- name: read_images_train_1_gpu_5_cpu + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: multi_node_train_1g5c.yaml + + run: + timeout: 2400 + script: python multi_node_train_benchmark.py --num-workers 1 --file-type image --use-gpu --num-epochs 2 --skip-train-model --prefetch-batches 16 --batch-size -1 --disable-locality-with-output + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: compute_gpu_1g5c_gce.yaml + +- name: read_tfrecords_benchmark_single_node + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + post_build_script: byod_install_mosaicml.sh + cluster_compute: single_node_benchmark_compute.yaml + + run: + # Expect the benchmark to finish around 30 minutes. + timeout: 2700 + script: python read_tfrecords_benchmark.py + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + +- name: map_batches_benchmark_single_node + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: single_node_benchmark_compute.yaml + + run: + # Expect the benchmark to finish around 30 minutes. + timeout: 2400 + script: python map_batches_benchmark.py + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + +- name: iter_tensor_batches_benchmark_single_node + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: single_node_benchmark_compute.yaml + + run: + # Expect the benchmark to finish around 30 minutes. + timeout: 2400 + script: python iter_tensor_batches_benchmark.py + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + +- name: iter_tensor_batches_benchmark_multi_node + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: multi_node_benchmark_compute.yaml + + run: + # Expect the benchmark to finish within 90 minutes. + timeout: 5400 + script: python iter_tensor_batches_benchmark.py --data-size-gb=10 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: multi_node_benchmark_compute_gce.yaml + +- name: iter_batches_benchmark_single_node + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: single_node_benchmark_compute.yaml + + run: + # Expect the benchmark to finish around 12 minutes. + timeout: 1080 + script: python iter_batches_benchmark.py + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + +- name: dataset_shuffle_random_shuffle_1tb + group: data-tests + working_dir: nightly_tests + + frequency: nightly + team: data + + cluster: + byod: + runtime_env: + - RAY_worker_killing_policy=retriable_lifo + pip: + - ray[default] + cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml + + run: + timeout: 7200 + script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle + wait_for_nodes: + num_nodes: 20 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml + +- name: dataset_shuffle_sort_1tb + group: data-tests + working_dir: nightly_tests + + frequency: nightly + team: data + stable: False + + cluster: + byod: + runtime_env: + - RAY_worker_killing_policy=retriable_lifo + pip: + - ray[default] + cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml + + run: + timeout: 7200 + script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 + wait_for_nodes: + num_nodes: 20 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml + +- name: dataset_shuffle_push_based_random_shuffle_1tb + group: data-tests + working_dir: nightly_tests + + stable: false + + frequency: nightly + team: data + + cluster: + byod: + runtime_env: + - RAY_worker_killing_policy=retriable_lifo + pip: + - ray[default] + cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml + + run: + timeout: 7200 + script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle + wait_for_nodes: + num_nodes: 20 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml + +- name: dataset_shuffle_push_based_sort_1tb + group: data-tests + working_dir: nightly_tests + + frequency: nightly + team: data + stable: False + + cluster: + byod: + runtime_env: + - RAY_worker_killing_policy=retriable_lifo + pip: + - ray[default] + cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml + + run: + timeout: 7200 + script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 + wait_for_nodes: + num_nodes: 20 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml + +- name: dataset_shuffle_push_based_random_shuffle_100tb + group: data-tests + working_dir: nightly_tests + stable: false + + frequency: weekly + team: data + cluster: + byod: + runtime_env: + - RAY_object_spilling_config={"type":"filesystem","params":{"directory_path":["/tmp/data0","/tmp/data1"]}} + post_build_script: byod_dataset_shuffle.sh + cluster_compute: shuffle/100tb_shuffle_compute.yaml + + run: + timeout: 28800 + script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=100000 --partition-size=1e9 --shuffle + wait_for_nodes: + num_nodes: 100 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: shuffle/100tb_shuffle_compute_gce.yaml + run: + timeout: 28800 + script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=40000 --partition-size=1e9 --shuffle + wait_for_nodes: + num_nodes: 100 + +############################ +# Batch Inference Benchmarks +############################ + +# 10 GB image classification raw images with 1 GPU. +# 1 g4dn.4xlarge +- name: torch_batch_inference_1_gpu_10gb_raw + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + cluster_compute: compute_gpu_1_cpu_16_aws.yaml + + run: + timeout: 500 + script: python gpu_batch_inference.py --data-directory=10G-image-data-synthetic-raw --data-format raw + + alert: default + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: compute_gpu_1_cpu_16_gce.yaml + +# 10 GB image classification parquet with 1 GPU. +# 1 g4dn.4xlarge +- name: torch_batch_inference_1_gpu_10gb_parquet + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + cluster_compute: compute_gpu_1_cpu_16_aws.yaml + + run: + timeout: 500 + script: python gpu_batch_inference.py --data-directory=10G-image-data-synthetic-raw-parquet --data-format parquet + + alert: default + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: compute_gpu_1_cpu_16_gce.yaml + + +# 300 GB image classification raw images with 16 GPUs +# 4 g4dn.12xlarge +- name: torch_batch_inference_16_gpu_300gb_raw + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + byod: + type: gpu + cluster_compute: compute_gpu_4x4_aws.yaml + + run: + timeout: 1000 + script: python gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw --data-format raw + + wait_for_nodes: + num_nodes: 4 + + alert: default + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: compute_gpu_4x4_gce.yaml + + +- name: chaos_torch_batch_inference_16_gpu_300gb_raw + group: data-tests + working_dir: nightly_tests + stable: false + + frequency: nightly + team: data + cluster: + byod: + type: gpu + cluster_compute: dataset/compute_gpu_4x4_aws.yaml + + run: + timeout: 1000 + prepare: python setup_chaos.py --max-to-kill 2 --kill-delay 30 + script: python dataset/gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw --data-format raw + + wait_for_nodes: + num_nodes: 4 + + alert: default + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: dataset/compute_gpu_4x4_gce.yaml + + +# 300 GB image classification parquet data with 16 GPUs +# 4 g4dn.12xlarge +- name: torch_batch_inference_16_gpu_300gb_parquet + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: compute_gpu_4x4_aws.yaml + + run: + timeout: 1000 + script: python gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw-parquet --data-format parquet + + wait_for_nodes: + num_nodes: 4 + + alert: default + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: compute_gpu_4x4_gce.yaml + +# 10 TB image classification parquet data with heterogenous cluster +# 10 g4dn.12xlarge, 10 m5.16xlarge +- name: torch_batch_inference_hetero_10tb_parquet + group: data-tests + working_dir: nightly_tests/dataset + + frequency: weekly + team: data + + cluster: + byod: + type: gpu + cluster_compute: compute_hetero_10x10_aws.yaml + + run: + timeout: 2000 + script: python gpu_batch_inference.py --data-directory 10T-image-data-synthetic-raw-parquet --data-format parquet + + wait_for_nodes: + num_nodes: 20 + + alert: default diff --git a/release/release_tests.yaml b/release/release_tests.yaml index d8aad5b4bf72..4ffa492e6cde 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -106,181 +106,6 @@ alert: default -############################ -# Batch Inference Benchmarks -############################ - -# 10 GB image classification raw images with 1 GPU. -# 1 g4dn.4xlarge -- name: torch_batch_inference_1_gpu_10gb_raw - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - cluster_compute: compute_gpu_1_cpu_16_aws.yaml - - run: - timeout: 500 - script: python gpu_batch_inference.py --data-directory=10G-image-data-synthetic-raw --data-format raw - - alert: default - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: compute_gpu_1_cpu_16_gce.yaml - -# 10 GB image classification parquet with 1 GPU. -# 1 g4dn.4xlarge -- name: torch_batch_inference_1_gpu_10gb_parquet - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - cluster_compute: compute_gpu_1_cpu_16_aws.yaml - - run: - timeout: 500 - script: python gpu_batch_inference.py --data-directory=10G-image-data-synthetic-raw-parquet --data-format parquet - - alert: default - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: compute_gpu_1_cpu_16_gce.yaml - - -# 300 GB image classification raw images with 16 GPUs -# 4 g4dn.12xlarge -- name: torch_batch_inference_16_gpu_300gb_raw - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - cluster_compute: compute_gpu_4x4_aws.yaml - - run: - timeout: 1000 - script: python gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw --data-format raw - - wait_for_nodes: - num_nodes: 4 - - alert: default - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: compute_gpu_4x4_gce.yaml - - -- name: chaos_torch_batch_inference_16_gpu_300gb_raw - group: data-tests - working_dir: nightly_tests - stable: false - - frequency: nightly - team: data - cluster: - byod: - type: gpu - cluster_compute: dataset/compute_gpu_4x4_aws.yaml - - run: - timeout: 1000 - prepare: python setup_chaos.py --max-to-kill 2 --kill-delay 30 - script: python dataset/gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw --data-format raw - - wait_for_nodes: - num_nodes: 4 - - alert: default - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: dataset/compute_gpu_4x4_gce.yaml - - -# 300 GB image classification parquet data with 16 GPUs -# 4 g4dn.12xlarge -- name: torch_batch_inference_16_gpu_300gb_parquet - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: compute_gpu_4x4_aws.yaml - - run: - timeout: 1000 - script: python gpu_batch_inference.py --data-directory 300G-image-data-synthetic-raw-parquet --data-format parquet - - wait_for_nodes: - num_nodes: 4 - - alert: default - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: compute_gpu_4x4_gce.yaml - -# 10 TB image classification parquet data with heterogenous cluster -# 10 g4dn.12xlarge, 10 m5.16xlarge -- name: torch_batch_inference_hetero_10tb_parquet - group: data-tests - working_dir: nightly_tests/dataset - - frequency: weekly - team: data - - cluster: - byod: - type: gpu - cluster_compute: compute_hetero_10x10_aws.yaml - - run: - timeout: 2000 - script: python gpu_batch_inference.py --data-directory 10T-image-data-synthetic-raw-parquet --data-format parquet - - wait_for_nodes: - num_nodes: 20 - - alert: default - ######################### # AIR release tests @@ -3876,741 +3701,6 @@ # stable: false -############### -# Dataset tests -############### - -- name: parquet_metadata_resolution - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: single_node_benchmark_compute.yaml - - run: - # Expect the test to finish around 40 seconds. - timeout: 100 - script: python parquet_metadata_resolution.py --num-files 915 --cloud aws - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - run: - script: python parquet_metadata_resolution.py --num-files 915 --cloud gcp - -- name: stable_diffusion_benchmark - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - post_build_script: byod_stable_diffusion.sh - cluster_compute: stable_diffusion_benchmark_compute.yaml - - run: - timeout: 1800 - script: python stable_diffusion_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: stable_diffusion_benchmark_compute_gce.yaml - -- name: streaming_data_ingest_benchmark_1tb - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: data_ingest_benchmark_compute.yaml - - run: - timeout: 300 - script: python data_ingest_benchmark.py --dataset-size-gb=1000 --num-workers=20 --streaming - wait_for_nodes: - num_nodes: 20 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: data_ingest_benchmark_compute_gce.yaml - -- name: streaming_data_ingest_benchmark_100gb_gpu - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: data_ingest_benchmark_compute_gpu.yaml - - run: - timeout: 300 - script: python data_ingest_benchmark.py --dataset-size-gb=100 --num-workers=4 --streaming --use-gpu - wait_for_nodes: - num_nodes: 3 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml - -# This test case will early stop the data ingestion iteration on the GPU actors. -# This is a common usage in PyTorch Lightning -# (https://lightning.ai/docs/pytorch/stable/common/trainer.html#limit-train-batches). -# There was a bug in Ray Data that caused GPU memoy leak (see #3.919). -# We add this test case to cover this scenario. -- name: streaming_data_ingest_benchmark_100gb_gpu_early_stop - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: data_ingest_benchmark_compute_gpu.yaml - - run: - timeout: 300 - script: python data_ingest_benchmark.py --dataset-size-gb=100 --num-workers=4 --streaming --use-gpu --early-stop - wait_for_nodes: - num_nodes: 3 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml - -- name: aggregate_benchmark - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: single_node_benchmark_compute.yaml - - run: - timeout: 1800 - script: python aggregate_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -- name: read_parquet_benchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: single_node_benchmark_compute.yaml - - run: - # Expect the benchmark to finish in 400 seconds. - timeout: 400 - script: python read_parquet_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -- name: read_images_benchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: single_node_benchmark_compute.yaml - - run: - timeout: 1800 - script: python read_images_benchmark.py --single-node - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -# TODO: Re-enable this test once we fix https://github.com/ray-project/ray/issues/40686. -# - name: read_images_benchmark_multi_node -# group: data-tests -# working_dir: nightly_tests/dataset - -# frequency: nightly-3x -# team: data - -# cluster: -# byod: -# type: gpu -# cluster_compute: multi_node_read_images_benchmark_compute.yaml - -# run: -# timeout: 28800 -# script: python read_images_benchmark.py --multi-node - -# variations: -# - __suffix__: aws -# - __suffix__: gce -# env: gce -# frequency: manual -# cluster: -# cluster_compute: multi_node_read_images_benchmark_compute_gce.yaml - -- name: read_images_comparison_microbenchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: single_worker_node_0_head_node_benchmark_compute.yaml - - run: - timeout: 1800 - script: bash run_image_loader_microbenchmark.sh - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -- name: read_images_train_4_gpu - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: multi_node_train_4_workers.yaml - - run: - timeout: 18000 - script: python multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 2 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml - -- name: read_images_train_4_gpu_worker_chaos - group: data-tests - working_dir: nightly_tests - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: dataset/multi_node_train_4_workers.yaml - - run: - timeout: 18000 - prepare: python setup_chaos.py --kill-workers --kill-interval 100 --max-to-kill 3 --task-names "ReadImage->Map(wnid_to_index)->Map(crop_and_flip_image)" - script: python dataset/multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 1 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: ../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml - -- name: read_images_train_4_gpu_node_chaos - group: data-tests - working_dir: nightly_tests - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: dataset/multi_node_train_4_workers.yaml - - run: - timeout: 18000 - prepare: python setup_chaos.py --kill-interval 200 --max-to-kill 1 --task-names "_RayTrainWorker__execute.get_next" - script: python dataset/multi_node_train_benchmark.py --num-workers 4 --file-type image --use-gpu --num-epochs 1 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: ../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml - -- name: read_images_train_16_gpu - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: multi_node_train_16_workers.yaml - - run: - timeout: 18000 - script: python multi_node_train_benchmark.py --num-workers 16 --file-type image --use-gpu --num-epochs 2 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml - -- name: read_images_train_16_gpu_preserve_order - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: multi_node_train_16_workers.yaml - - run: - timeout: 18000 - script: python multi_node_train_benchmark.py --num-workers 16 --file-type image --preserve-order --use-gpu --num-epochs 2 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml - -- name: read_parquet_train_4_gpu - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: multi_node_train_4_workers.yaml - - run: - timeout: 3600 - script: python multi_node_train_benchmark.py --num-workers 4 --file-type parquet --target-worker-gb 50 --use-gpu - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml - -- name: read_parquet_train_16_gpu - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: multi_node_train_16_workers.yaml - - run: - timeout: 3600 - script: python multi_node_train_benchmark.py --num-workers 16 --file-type parquet --target-worker-gb 50 --use-gpu - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: ../../air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml - -- name: read_images_train_1_gpu_5_cpu - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: multi_node_train_1g5c.yaml - - run: - timeout: 2400 - script: python multi_node_train_benchmark.py --num-workers 1 --file-type image --use-gpu --num-epochs 2 --skip-train-model --prefetch-batches 16 --batch-size -1 --disable-locality-with-output - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: compute_gpu_1g5c_gce.yaml - -- name: read_tfrecords_benchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: single_node_benchmark_compute.yaml - - run: - # Expect the benchmark to finish around 30 minutes. - timeout: 2700 - script: python read_tfrecords_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -- name: map_batches_benchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: single_node_benchmark_compute.yaml - - run: - # Expect the benchmark to finish around 30 minutes. - timeout: 2400 - script: python map_batches_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -- name: iter_tensor_batches_benchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: single_node_benchmark_compute.yaml - - run: - # Expect the benchmark to finish around 30 minutes. - timeout: 2400 - script: python iter_tensor_batches_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -- name: iter_tensor_batches_benchmark_multi_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: multi_node_benchmark_compute.yaml - - run: - # Expect the benchmark to finish within 90 minutes. - timeout: 5400 - script: python iter_tensor_batches_benchmark.py --data-size-gb=10 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: multi_node_benchmark_compute_gce.yaml - -- name: iter_batches_benchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: single_node_benchmark_compute.yaml - - run: - # Expect the benchmark to finish around 12 minutes. - timeout: 1080 - script: python iter_batches_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -- name: dataset_shuffle_random_shuffle_1tb - group: data-tests - working_dir: nightly_tests - - frequency: nightly - team: data - - cluster: - byod: - runtime_env: - - RAY_worker_killing_policy=retriable_lifo - pip: - - ray[default] - cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml - - run: - timeout: 7200 - script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle - wait_for_nodes: - num_nodes: 20 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - -- name: dataset_shuffle_sort_1tb - group: data-tests - working_dir: nightly_tests - - frequency: nightly - team: data - stable: False - - cluster: - byod: - runtime_env: - - RAY_worker_killing_policy=retriable_lifo - pip: - - ray[default] - cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml - - run: - timeout: 7200 - script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 - wait_for_nodes: - num_nodes: 20 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - -- name: dataset_shuffle_push_based_random_shuffle_1tb - group: data-tests - working_dir: nightly_tests - - stable: false - - frequency: nightly - team: data - - cluster: - byod: - runtime_env: - - RAY_worker_killing_policy=retriable_lifo - pip: - - ray[default] - cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml - - run: - timeout: 7200 - script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle - wait_for_nodes: - num_nodes: 20 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - -- name: dataset_shuffle_push_based_sort_1tb - group: data-tests - working_dir: nightly_tests - - frequency: nightly - team: data - stable: False - - cluster: - byod: - runtime_env: - - RAY_worker_killing_policy=retriable_lifo - pip: - - ray[default] - cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml - - run: - timeout: 7200 - script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 - wait_for_nodes: - num_nodes: 20 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - -- name: dataset_shuffle_push_based_random_shuffle_100tb - group: data-tests - working_dir: nightly_tests - stable: false - - frequency: weekly - team: data - cluster: - byod: - runtime_env: - - RAY_object_spilling_config={"type":"filesystem","params":{"directory_path":["/tmp/data0","/tmp/data1"]}} - post_build_script: byod_dataset_shuffle.sh - cluster_compute: shuffle/100tb_shuffle_compute.yaml - - run: - timeout: 28800 - script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=100000 --partition-size=1e9 --shuffle - wait_for_nodes: - num_nodes: 100 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: shuffle/100tb_shuffle_compute_gce.yaml - run: - timeout: 28800 - script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=40000 --partition-size=1e9 --shuffle - wait_for_nodes: - num_nodes: 100 ################## # Core Chaos tests From 7d081ece584262d4f85438f667fe331a19332aff Mon Sep 17 00:00:00 2001 From: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Date: Thu, 21 Nov 2024 16:57:59 -0800 Subject: [PATCH 052/107] [core] Fix typos (#48849) Fix typos Signed-off-by: Rui Qiao --- python/ray/_private/state.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/ray/_private/state.py b/python/ray/_private/state.py index bebca03c0044..b8af96491b08 100644 --- a/python/ray/_private/state.py +++ b/python/ray/_private/state.py @@ -525,7 +525,7 @@ def chrome_tracing_object_transfer_dump(self, filename=None): """Return a list of transfer events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file - by passing in "filename" or using using json.dump, and then load go to + by passing in "filename" or using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. @@ -748,7 +748,7 @@ def _live_node_ids(self): return set(self.total_resources_per_node().keys()) def available_resources_per_node(self): - """Returns a dictionary mapping node id to avaiable resources.""" + """Returns a dictionary mapping node id to available resources.""" self._check_connected() available_resources_by_id = {} @@ -952,7 +952,7 @@ def timeline(filename=None): variable prior to starting Ray, and set RAY_task_events_report_interval_ms=0 To view this information as a timeline, simply dump it as a json file by - passing in "filename" or using using json.dump, and then load go to + passing in "filename" or using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Args: @@ -970,7 +970,7 @@ def object_transfer_timeline(filename=None): """Return a list of transfer events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by - passing in "filename" or using using json.dump, and then load go to + passing in "filename" or using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. From d4c2c8d796c09a2e9f1f1dfaf1fbc90b762c6971 Mon Sep 17 00:00:00 2001 From: akyang-anyscale Date: Thu, 21 Nov 2024 17:16:31 -0800 Subject: [PATCH 053/107] [Serve] Propagate replica constructor error to deployment status message and print num retries left (#48531) ## Why are these changes needed? This change will surface the replica constructor error as soon as the replica constructor fails for whatever reason. The exception will be populated in the deployment status so that it's viewable from the ray dashboard. Additionally, the number of replica constructor retries left will also be updated in the error message. This will help users more quickly debug a deployment that is failing to start. ## Related issue number Closes https://github.com/ray-project/ray/issues/35604 Signed-off-by: akyang-anyscale --- python/ray/serve/_private/deployment_state.py | 44 ++++++++++++------- python/ray/serve/tests/test_cli.py | 28 ++++++++++++ .../test_config_files/deployment_fail_2.yaml | 3 ++ .../serve/tests/test_config_files/fail_2.py | 13 ++++++ .../serve/tests/unit/test_deployment_state.py | 10 ++--- 5 files changed, 76 insertions(+), 22 deletions(-) create mode 100644 python/ray/serve/tests/test_config_files/deployment_fail_2.yaml create mode 100644 python/ray/serve/tests/test_config_files/fail_2.py diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index 562fd62f62c6..09fa27876397 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -1384,6 +1384,13 @@ def deployment_name(self) -> str: def app_name(self) -> str: return self._id.app_name + @property + def _failed_to_start_threshold(self) -> int: + return min( + MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, + self._target_state.target_num_replicas * 3, + ) + def get_alive_replica_actor_ids(self) -> Set[str]: return {replica.actor_id for replica in self._replicas.get()} @@ -1845,11 +1852,10 @@ def scale_deployment_replicas( if to_add > 0: # Exponential backoff - failed_to_start_threshold = min( - MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, - self._target_state.target_num_replicas * 3, - ) - if self._replica_constructor_retry_counter >= failed_to_start_threshold: + if ( + self._replica_constructor_retry_counter + >= self._failed_to_start_threshold + ): # Wait 1, 2, 4, ... seconds before consecutive retries, with random # offset added to avoid synchronization if ( @@ -1909,17 +1915,13 @@ def check_curr_status(self) -> Tuple[bool, bool]: ) failed_to_start_count = self._replica_constructor_retry_counter - failed_to_start_threshold = min( - MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, - self._target_state.target_num_replicas * 3, - ) # Got to make a call to complete current deploy() goal after # start failure threshold reached, while we might still have # pending replicas in current goal. if ( - failed_to_start_count >= failed_to_start_threshold - and failed_to_start_threshold != 0 + failed_to_start_count >= self._failed_to_start_threshold + and self._failed_to_start_threshold != 0 ): if running_at_target_version_replica_cnt > 0: # At least one RUNNING replica at target state, partial @@ -2043,17 +2045,27 @@ def record_replica_startup_failure(self, error_msg: str): self._replica_constructor_retry_counter += 1 self._replica_constructor_error_msg = error_msg + retrying_msg = "Retrying" + if self._failed_to_start_threshold != 0: + remaining_retries = ( + self._failed_to_start_threshold + - self._replica_constructor_retry_counter + ) + retrying_msg += f" {remaining_retries} more time(s)" + + message = ( + f"A replica failed to start with exception. {retrying_msg}. Error:\n" + f"{error_msg}" + ) + self._curr_status_info = self._curr_status_info.update_message(message) + def update_replica_startup_backoff_time(self): """Updates the replica startup backoff time.""" # If replicas have failed enough times, execute exponential backoff # Wait 1, 2, 4, ... seconds before consecutive retries (or use a custom # backoff factor by setting EXPONENTIAL_BACKOFF_FACTOR) - failed_to_start_threshold = min( - MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, - self._target_state.target_num_replicas * 3, - ) - if self._replica_constructor_retry_counter > failed_to_start_threshold: + if self._replica_constructor_retry_counter > self._failed_to_start_threshold: self._backoff_time_s = min( EXPONENTIAL_BACKOFF_FACTOR * self._backoff_time_s, MAX_BACKOFF_TIME_S ) diff --git a/python/ray/serve/tests/test_cli.py b/python/ray/serve/tests/test_cli.py index 943024cfbe38..2b2c0ff279dd 100644 --- a/python/ray/serve/tests/test_cli.py +++ b/python/ray/serve/tests/test_cli.py @@ -606,6 +606,34 @@ def check_for_failed_deployment(): wait_for_condition(check_for_failed_deployment) +@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") +def test_status_constructor_retry_error(ray_start_stop): + """Deploys Serve deployment that errors out in constructor, checks that the + retry message is surfaced. + """ + + config_file_name = os.path.join( + os.path.dirname(__file__), "test_config_files", "deployment_fail_2.yaml" + ) + + subprocess.check_output(["serve", "deploy", config_file_name]) + + def check_for_failed_deployment(): + cli_output = subprocess.check_output( + ["serve", "status", "-a", "http://localhost:52365/"] + ) + status = yaml.safe_load(cli_output)["applications"][SERVE_DEFAULT_APP_NAME] + assert status["status"] == "DEPLOYING" + + deployment_status = status["deployments"]["A"] + assert deployment_status["status"] == "UPDATING" + assert deployment_status["status_trigger"] == "CONFIG_UPDATE_STARTED" + assert "ZeroDivisionError" in deployment_status["message"] + return True + + wait_for_condition(check_for_failed_deployment) + + @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_status_package_unavailable_in_controller(ray_start_stop): """Test that exceptions raised from packages that are installed on deployment actors diff --git a/python/ray/serve/tests/test_config_files/deployment_fail_2.yaml b/python/ray/serve/tests/test_config_files/deployment_fail_2.yaml new file mode 100644 index 000000000000..38da015b0bb3 --- /dev/null +++ b/python/ray/serve/tests/test_config_files/deployment_fail_2.yaml @@ -0,0 +1,3 @@ +applications: + - name: default + import_path: ray.serve.tests.test_config_files.fail_2.node diff --git a/python/ray/serve/tests/test_config_files/fail_2.py b/python/ray/serve/tests/test_config_files/fail_2.py new file mode 100644 index 000000000000..2e95aa93d98f --- /dev/null +++ b/python/ray/serve/tests/test_config_files/fail_2.py @@ -0,0 +1,13 @@ +import time + +from ray import serve + + +@serve.deployment +class A: + def __init__(self): + time.sleep(5) + 1 / 0 + + +node = A.bind() diff --git a/python/ray/serve/tests/unit/test_deployment_state.py b/python/ray/serve/tests/unit/test_deployment_state.py index dfeb9fc7524c..42facaf40282 100644 --- a/python/ray/serve/tests/unit/test_deployment_state.py +++ b/python/ray/serve/tests/unit/test_deployment_state.py @@ -2492,9 +2492,7 @@ def create_deployment_state( check_counts(ds1, total=3, by_state=[(ReplicaState.STOPPING, 3, None)]) assert ds1._replica_constructor_retry_counter == 3 - # An error message should show up after - # 3 * num_replicas startup failures. - assert "" == ds1.curr_status_info.message + assert "Retrying 6 more time(s)" in ds1.curr_status_info.message # Set all of ds1's replicas to stopped. for replica in ds1._replicas.get(): @@ -2512,7 +2510,7 @@ def create_deployment_state( assert ds1.curr_status_info.status == DeploymentStatus.UPDATING check_counts(ds1, total=3, by_state=[(ReplicaState.STOPPING, 3, None)]) assert ds1._replica_constructor_retry_counter == 6 - assert "" == ds1.curr_status_info.message + assert "Retrying 3 more time(s)" in ds1.curr_status_info.message # Set all of ds1's replicas to stopped. for replica in ds1._replicas.get(): @@ -2527,7 +2525,7 @@ def create_deployment_state( assert ds1.curr_status_info.status == DeploymentStatus.UPDATING check_counts(ds1, total=3, by_state=[(ReplicaState.STOPPING, 3, None)]) assert ds1._replica_constructor_retry_counter == 9 - assert "" == ds1.curr_status_info.message + assert "Retrying 0 more time(s)" in ds1.curr_status_info.message # Set all of ds1's replicas to stopped. for replica in ds1._replicas.get(): @@ -2540,7 +2538,7 @@ def create_deployment_state( assert ds1.curr_status_info.status == DeploymentStatus.UNHEALTHY check_counts(ds1, total=0) assert ds1._replica_constructor_retry_counter == 9 - assert "Replica scheduling failed" in ds1.curr_status_info.message + assert "The deployment failed to start" in ds1.curr_status_info.message def test_deploy_with_transient_constructor_failure(mock_deployment_state_manager): From 335bd668924e9e5c67b17444b4848f9c3983fe69 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Fri, 22 Nov 2024 02:28:25 +0100 Subject: [PATCH 054/107] [Data] Re-implement APIs like select_columns with PyArrow batch format (#48140) ## Related issue number Closes #48090 Prerequisite: https://github.com/ray-project/ray/issues/48575 --------- Signed-off-by: Artur Niederfahrenhorst Co-authored-by: Balaji Veeramani Co-authored-by: Alexey Kudinkin --- python/ray/data/block.py | 6 ++ python/ray/data/dataset.py | 100 ++++++++++++++++++++---- python/ray/data/iterator.py | 3 +- python/ray/data/tests/test_map.py | 97 +++++++++++++++++++++-- python/ray/data/tests/test_mongo.py | 53 ++++++------- python/ray/data/tests/test_object_gc.py | 3 +- 6 files changed, 208 insertions(+), 54 deletions(-) diff --git a/python/ray/data/block.py b/python/ray/data/block.py index 56a3c6dbc40d..fcab3feb67eb 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -72,6 +72,12 @@ class BlockType(Enum): # returned from batch UDFs. DataBatch = Union["pyarrow.Table", "pandas.DataFrame", Dict[str, np.ndarray]] +# User-facing data column type. This is the data type for data that is supplied to and +# returned from column UDFs. +DataBatchColumn = Union[ + "pyarrow.ChunkedArray", "pyarrow.Array", "pandas.Series", np.ndarray +] + # A class type that implements __call__. CallableClass = type diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index d576b8eb2ea7..779a5bd3295e 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -87,6 +87,7 @@ Block, BlockAccessor, DataBatch, + DataBatchColumn, T, U, UserDefinedFunction, @@ -529,7 +530,8 @@ def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: compute: This argument is deprecated. Use ``concurrency`` argument. batch_format: If ``"default"`` or ``"numpy"``, batches are ``Dict[str, numpy.ndarray]``. If ``"pandas"``, batches are - ``pandas.DataFrame``. + ``pandas.DataFrame``. If ``"pyarrow"``, batches are + ``pyarrow.Table``. zero_copy_batch: Whether ``fn`` should be provided zero-copy, read-only batches. If this is ``True`` and no copy is required for the ``batch_format`` conversion, the batch is a zero-copy, read-only @@ -700,16 +702,21 @@ def _map_batches_without_batch_size_validation( def add_column( self, col: str, - fn: Callable[["pandas.DataFrame"], "pandas.Series"], + fn: Callable[ + [DataBatch], + DataBatchColumn, + ], *, + batch_format: Optional[str] = "pandas", compute: Optional[str] = None, concurrency: Optional[Union[int, Tuple[int, int]]] = None, **ray_remote_args, ) -> "Dataset": """Add the given column to the dataset. - A function generating the new column values given the batch in pandas - format must be specified. + A function generating the new column values given the batch in pyarrow or pandas + format must be specified. This function must operate on batches of + `batch_format`. Examples: @@ -729,11 +736,6 @@ def add_column( id int64 new_id int64 - Overwrite the existing values with zeros. - - >>> ds.add_column("id", lambda df: 0).take(3) - [{'id': 0}, {'id': 0}, {'id': 0}] - Time complexity: O(dataset size / parallelism) Args: @@ -741,6 +743,11 @@ def add_column( column is overwritten. fn: Map function generating the column values given a batch of records in pandas format. + batch_format: If ``"default"`` or ``"numpy"``, batches are + ``Dict[str, numpy.ndarray]``. If ``"pandas"``, batches are + ``pandas.DataFrame``. If ``"pyarrow"``, batches are + ``pyarrow.Table``. If ``"numpy"``, batches are + ``Dict[str, numpy.ndarray]``. compute: This argument is deprecated. Use ``concurrency`` argument. concurrency: The number of Ray workers to use concurrently. For a fixed-sized worker pool of size ``n``, specify ``concurrency=n``. For @@ -749,17 +756,72 @@ def add_column( ray_remote_args: Additional resource requirements to request from ray (e.g., num_gpus=1 to request GPUs for the map tasks). """ + # Check that batch_format + accepted_batch_formats = ["pandas", "pyarrow", "numpy"] + if batch_format not in accepted_batch_formats: + raise ValueError( + f"batch_format argument must be on of {accepted_batch_formats}, " + f"got: {batch_format}" + ) - def add_column(batch: "pandas.DataFrame") -> "pandas.DataFrame": - batch.loc[:, col] = fn(batch) - return batch + def add_column(batch: DataBatch) -> DataBatch: + column = fn(batch) + if batch_format == "pandas": + import pandas as pd + + assert isinstance(column, pd.Series), ( + f"For pandas batch format, the function must return a pandas " + f"Series, got: {type(column)}" + ) + if col in batch: + raise ValueError( + f"Trying to add an existing column with name" f" {col}" + ) + batch.loc[:, col] = column + return batch + elif batch_format == "pyarrow": + import pyarrow as pa + + assert isinstance(column, (pa.Array, pa.ChunkedArray)), ( + f"For pyarrow batch format, the function must return a pyarrow " + f"Array, got: {type(column)}" + ) + # Historically, this method was written for pandas batch format. + # To resolve https://github.com/ray-project/ray/issues/48090, + # we also allow pyarrow batch format which is preferred but would be + # a breaking change to enforce. + + # For pyarrow, the index of the column will be -1 if it is missing in + # which case we'll want to append it + column_idx = batch.schema.get_field_index(col) + if column_idx == -1: + # Append the column to the table + return batch.append_column(col, column) + else: + raise ValueError( + f"Trying to add an existing column with name {col}" + ) + + else: + # batch format is assumed to be numpy since we checked at the + # beginning of the add_column function + assert isinstance(column, np.ndarray), ( + f"For numpy batch format, the function must return a " + f"numpy.ndarray, got: {type(column)}" + ) + if col in batch: + raise ValueError( + f"Trying to add an existing column with name" f" {col}" + ) + batch[col] = column + return batch if not callable(fn): raise ValueError("`fn` must be callable, got {}".format(fn)) return self.map_batches( add_column, - batch_format="pandas", # TODO(ekl) we should make this configurable. + batch_format=batch_format, compute=compute, concurrency=concurrency, zero_copy_batch=False, @@ -801,7 +863,7 @@ def drop_columns( Args: cols: Names of the columns to drop. If any name does not exist, - an exception is raised. + an exception is raised. Column names must be unique. compute: This argument is deprecated. Use ``concurrency`` argument. concurrency: The number of Ray workers to use concurrently. For a fixed-sized worker pool of size ``n``, specify ``concurrency=n``. For an autoscaling @@ -810,12 +872,15 @@ def drop_columns( ray (e.g., num_gpus=1 to request GPUs for the map tasks). """ # noqa: E501 + if len(cols) != len(set(cols)): + raise ValueError(f"drop_columns expects unique column names, got: {cols}") + def drop_columns(batch): - return batch.drop(columns=cols) + return batch.drop(cols) return self.map_batches( drop_columns, - batch_format="pandas", + batch_format="pyarrow", zero_copy_batch=True, compute=compute, concurrency=concurrency, @@ -4316,7 +4381,8 @@ def to_tf( If your model accepts additional metadata aside from features and label, specify a single additional column or a list of additional columns. A common use case is to include sample weights in the data samples and train a ``tf.keras.Model`` with ``tf.keras.Model.fit``. - >>> ds = ds.add_column("sample weights", lambda df: 1) + >>> import pandas as pd + >>> ds = ds.add_column("sample weights", lambda df: pd.Series([1] * len(df))) >>> ds.to_tf(feature_columns="features", label_columns="target", additional_columns="sample weights") <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float64, name='features'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'), TensorSpec(shape=(None,), dtype=tf.int64, name='sample weights'))> diff --git a/python/ray/data/iterator.py b/python/ray/data/iterator.py index 58e9a1b7355e..2f19111af80f 100644 --- a/python/ray/data/iterator.py +++ b/python/ray/data/iterator.py @@ -734,7 +734,8 @@ def to_tf( If your model accepts additional metadata aside from features and label, specify a single additional column or a list of additional columns. A common use case is to include sample weights in the data samples and train a ``tf.keras.Model`` with ``tf.keras.Model.fit``. - >>> ds = ds.add_column("sample weights", lambda df: 1) + >>> import pandas as pd + >>> ds = ds.add_column("sample weights", lambda df: pd.Series([1] * len(df))) >>> it = ds.iterator() >>> it.to_tf(feature_columns="sepal length (cm)", label_columns="target", additional_columns="sample weights") <_OptionsDataset element_spec=(TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'), TensorSpec(shape=(None,), dtype=tf.int64, name='sample weights'))> diff --git a/python/ray/data/tests/test_map.py b/python/ray/data/tests/test_map.py index 9b1a4f8d4575..d4e7e2c374de 100644 --- a/python/ray/data/tests/test_map.py +++ b/python/ray/data/tests/test_map.py @@ -9,6 +9,7 @@ import numpy as np import pandas as pd import pyarrow as pa +import pyarrow.compute as pc import pyarrow.parquet as pq import pytest @@ -330,18 +331,99 @@ def map_generator(item: dict) -> Iterator[int]: def test_add_column(ray_start_regular_shared): - ds = ray.data.range(5).add_column("foo", lambda x: 1) + """Tests the add column API.""" + + # Test with pyarrow batch format + ds = ray.data.range(5).add_column( + "foo", lambda x: pa.array([1] * x.num_rows), batch_format="pyarrow" + ) + assert ds.take(1) == [{"id": 0, "foo": 1}] + + # Test with chunked array batch format + ds = ray.data.range(5).add_column( + "foo", lambda x: pa.chunked_array([[1] * x.num_rows]), batch_format="pyarrow" + ) + assert ds.take(1) == [{"id": 0, "foo": 1}] + + ds = ray.data.range(5).add_column( + "foo", lambda x: pc.add(x["id"], 1), batch_format="pyarrow" + ) + assert ds.take(1) == [{"id": 0, "foo": 1}] + + # Adding a column that is already there should result in an error + with pytest.raises( + ray.exceptions.UserCodeException, + match="Trying to add an existing column with name id", + ): + ds = ray.data.range(5).add_column( + "id", lambda x: pc.add(x["id"], 1), batch_format="pyarrow" + ) + assert ds.take(2) == [{"id": 1}, {"id": 2}] + + # Adding a column in the wrong format should result in an error + with pytest.raises( + ray.exceptions.UserCodeException, match="For pyarrow batch " "format" + ): + ds = ray.data.range(5).add_column("id", lambda x: [1], batch_format="pyarrow") + assert ds.take(2) == [{"id": 1}, {"id": 2}] + + # Test with numpy batch format + ds = ray.data.range(5).add_column( + "foo", lambda x: np.array([1] * len(list(x.keys())[0])), batch_format="numpy" + ) + assert ds.take(1) == [{"id": 0, "foo": 1}] + + ds = ray.data.range(5).add_column( + "foo", lambda x: np.add(x["id"], 1), batch_format="numpy" + ) + assert ds.take(1) == [{"id": 0, "foo": 1}] + + # Adding a column that is already there should result in an error + with pytest.raises( + ray.exceptions.UserCodeException, + match="Trying to add an existing column with name id", + ): + ds = ray.data.range(5).add_column( + "id", lambda x: np.add(x["id"], 1), batch_format="numpy" + ) + assert ds.take(2) == [{"id": 1}, {"id": 2}] + + # Adding a column in the wrong format should result in an error + with pytest.raises( + ray.exceptions.UserCodeException, match="For numpy batch " "format" + ): + ds = ray.data.range(5).add_column("id", lambda x: [1], batch_format="numpy") + assert ds.take(2) == [{"id": 1}, {"id": 2}] + + # Test with pandas batch format + ds = ray.data.range(5).add_column("foo", lambda x: pd.Series([1] * x.shape[0])) assert ds.take(1) == [{"id": 0, "foo": 1}] ds = ray.data.range(5).add_column("foo", lambda x: x["id"] + 1) assert ds.take(1) == [{"id": 0, "foo": 1}] - ds = ray.data.range(5).add_column("id", lambda x: x["id"] + 1) - assert ds.take(2) == [{"id": 1}, {"id": 2}] + # Adding a column that is already there should result in an error + with pytest.raises( + ray.exceptions.UserCodeException, + match="Trying to add an existing column with name id", + ): + ds = ray.data.range(5).add_column("id", lambda x: x["id"] + 1) + assert ds.take(2) == [{"id": 1}, {"id": 2}] + + # Adding a column in the wrong format should result in an error + with pytest.raises( + ray.exceptions.UserCodeException, match="For pandas batch " "format" + ): + ds = ray.data.range(5).add_column("id", lambda x: [1], batch_format="pandas") + assert ds.take(2) == [{"id": 1}, {"id": 2}] with pytest.raises(ValueError): ds = ray.data.range(5).add_column("id", 0) + # Test that an invalid batch_format raises an error + with pytest.raises(ValueError): + ray.data.range(5).add_column("foo", lambda x: x["id"] + 1, batch_format="foo") + @pytest.mark.parametrize("names", (["foo", "bar"], {"spam": "foo", "ham": "bar"})) def test_rename_columns(ray_start_regular_shared, names): @@ -362,14 +444,15 @@ def test_drop_columns(ray_start_regular_shared, tmp_path): assert ds.drop_columns(["col2"]).take(1) == [{"col1": 1, "col3": 3}] assert ds.drop_columns(["col1", "col3"]).take(1) == [{"col2": 2}] assert ds.drop_columns([]).take(1) == [{"col1": 1, "col2": 2, "col3": 3}] - assert ds.drop_columns(["col1", "col2", "col3"]).take(1) == [{}] - assert ds.drop_columns(["col1", "col1", "col2", "col1"]).take(1) == [ - {"col3": 3} - ] + assert ds.drop_columns(["col1", "col2", "col3"]).take(1) == [] + assert ds.drop_columns(["col1", "col2"]).take(1) == [{"col3": 3}] # Test dropping non-existent column with pytest.raises((UserCodeException, KeyError)): ds.drop_columns(["dummy_col", "col1", "col2"]).materialize() + with pytest.raises(ValueError, match="drop_columns expects unique column names"): + ds1.drop_columns(["col1", "col2", "col2"]) + def test_select_columns(ray_start_regular_shared): # Test pandas and arrow diff --git a/python/ray/data/tests/test_mongo.py b/python/ray/data/tests/test_mongo.py index 97828aae6bea..eb03aab39f80 100644 --- a/python/ray/data/tests/test_mongo.py +++ b/python/ray/data/tests/test_mongo.py @@ -93,13 +93,13 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): override_num_blocks=2, ) assert ds._block_num_rows() == [3, 2] - assert str(ds) == ( - "Dataset(\n" - " num_rows=5,\n" - " schema={_id: fixed_size_binary[12], float_field: double, " - "int_field: int32}\n" - ")" - ) + assert ds.count() == 5 + assert ds.schema().names == ["_id", "float_field", "int_field"] + # We are not testing the datatype of _id here, because it varies per platform + assert ds.schema().types[1:] == [ + pa.float64(), + pa.int32(), + ] assert df.equals(ds.drop_columns(["_id"]).to_pandas()) # Read a subset of the collection. @@ -111,13 +111,8 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): override_num_blocks=2, ) assert ds._block_num_rows() == [2, 1] - assert str(ds) == ( - "Dataset(\n" - " num_rows=3,\n" - " schema={_id: fixed_size_binary[12], float_field: double, " - "int_field: int32}\n" - ")" - ) + assert ds.count() == 3 + assert ds.schema().names == ["_id", "float_field", "int_field"] df[df["int_field"] < 3].equals(ds.drop_columns(["_id"]).to_pandas()) # Read with auto-tuned parallelism. @@ -126,13 +121,14 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): database=foo_db, collection=foo_collection, ) - assert str(ds) == ( - "Dataset(\n" - " num_rows=5,\n" - " schema={_id: fixed_size_binary[12], float_field: double, " - "int_field: int32}\n" - ")" - ) + + assert ds.count() == 5 + assert ds.schema().names == ["_id", "float_field", "int_field"] + # We are not testing the datatype of _id here, because it varies per platform + assert ds.schema().types[1:] == [ + pa.float64(), + pa.int32(), + ] assert df.equals(ds.drop_columns(["_id"]).to_pandas()) # Read with a parallelism larger than number of rows. @@ -142,13 +138,14 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): collection=foo_collection, override_num_blocks=1000, ) - assert str(ds) == ( - "Dataset(\n" - " num_rows=5,\n" - " schema={_id: fixed_size_binary[12], float_field: double, " - "int_field: int32}\n" - ")" - ) + + assert ds.count() == 5 + assert ds.schema().names == ["_id", "float_field", "int_field"] + # We are not testing the datatype of _id here, because it varies per platform + assert ds.schema().types[1:] == [ + pa.float64(), + pa.int32(), + ] assert df.equals(ds.drop_columns(["_id"]).to_pandas()) # Add a column and then write back to MongoDB. diff --git a/python/ray/data/tests/test_object_gc.py b/python/ray/data/tests/test_object_gc.py index b56c4542618d..2b1947e0498d 100644 --- a/python/ray/data/tests/test_object_gc.py +++ b/python/ray/data/tests/test_object_gc.py @@ -1,6 +1,7 @@ import sys import threading +import pandas as pd import pytest import ray @@ -107,7 +108,7 @@ def test_tf_iteration(shutdown_only): # The size of dataset is 500*(80*80*4)*8B, about 100MB. ds = ray.data.range_tensor( 500, shape=(80, 80, 4), override_num_blocks=100 - ).add_column("label", lambda x: 1) + ).add_column("label", lambda df: pd.Series([1] * len(df))) # to_tf check_to_tf_no_spill(ctx, ds.map(lambda x: x)) From 8d358857a100c859dd3cf9f0236ba3c37a95594c Mon Sep 17 00:00:00 2001 From: dentiny Date: Thu, 21 Nov 2024 19:53:14 -0800 Subject: [PATCH 055/107] [core] C++ update on gcs health check (#48843) As titled. Signed-off-by: hjiang --- .../gcs_server/gcs_health_check_manager.cc | 50 ++++++++++++------- .../gcs/gcs_server/gcs_health_check_manager.h | 28 ++++++++--- src/ray/util/thread_checker.cc | 2 +- src/ray/util/thread_checker.h | 4 +- 4 files changed, 55 insertions(+), 29 deletions(-) diff --git a/src/ray/gcs/gcs_server/gcs_health_check_manager.cc b/src/ray/gcs/gcs_server/gcs_health_check_manager.cc index 2cefb37f6b7c..efa279daf56a 100644 --- a/src/ray/gcs/gcs_server/gcs_health_check_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_health_check_manager.cc @@ -14,15 +14,17 @@ #include "ray/gcs/gcs_server/gcs_health_check_manager.h" +#include + #include "ray/stats/metric.h" + DEFINE_stats(health_check_rpc_latency_ms, "Latency of rpc request for health check.", (), ({1, 10, 100, 1000, 10000}, ), ray::stats::HISTOGRAM); -namespace ray { -namespace gcs { +namespace ray::gcs { GcsHealthCheckManager::GcsHealthCheckManager( instrumented_io_context &io_service, @@ -38,17 +40,18 @@ GcsHealthCheckManager::GcsHealthCheckManager( period_ms_(period_ms), failure_threshold_(failure_threshold) { RAY_CHECK(on_node_death_callback != nullptr); - RAY_CHECK(initial_delay_ms >= 0); - RAY_CHECK(timeout_ms >= 0); - RAY_CHECK(period_ms >= 0); - RAY_CHECK(failure_threshold >= 0); + RAY_CHECK_GE(initial_delay_ms, 0); + RAY_CHECK_GE(timeout_ms, 0); + RAY_CHECK_GE(period_ms, 0); + RAY_CHECK_GE(failure_threshold, 0); } -GcsHealthCheckManager::~GcsHealthCheckManager() {} +GcsHealthCheckManager::~GcsHealthCheckManager() = default; void GcsHealthCheckManager::RemoveNode(const NodeID &node_id) { io_service_.dispatch( [this, node_id]() { + thread_checker_.IsOnSameThread(); auto iter = health_check_contexts_.find(node_id); if (iter == health_check_contexts_.end()) { return; @@ -61,6 +64,7 @@ void GcsHealthCheckManager::RemoveNode(const NodeID &node_id) { void GcsHealthCheckManager::FailNode(const NodeID &node_id) { RAY_LOG(WARNING).WithField(node_id) << "Node is dead because the health check failed."; + thread_checker_.IsOnSameThread(); auto iter = health_check_contexts_.find(node_id); if (iter != health_check_contexts_.end()) { on_node_death_callback_(node_id); @@ -69,7 +73,9 @@ void GcsHealthCheckManager::FailNode(const NodeID &node_id) { } std::vector GcsHealthCheckManager::GetAllNodes() const { + thread_checker_.IsOnSameThread(); std::vector nodes; + nodes.reserve(health_check_contexts_.size()); for (const auto &[node_id, _] : health_check_contexts_) { nodes.emplace_back(node_id); } @@ -84,24 +90,29 @@ void GcsHealthCheckManager::HealthCheckContext::StartHealthCheck() { new (&context_) grpc::ClientContext(); response_.Clear(); - auto deadline = - std::chrono::system_clock::now() + std::chrono::milliseconds(manager_->timeout_ms_); + const auto now = std::chrono::system_clock::now(); + auto deadline = now + std::chrono::milliseconds(manager_->timeout_ms_); context_.set_deadline(deadline); stub_->async()->Check( - &context_, &request_, &response_, [this, now = absl::Now()](::grpc::Status status) { + &context_, + &request_, + &response_, + [this, start = absl::FromChrono(now)](::grpc::Status status) { // This callback is done in gRPC's thread pool. STATS_health_check_rpc_latency_ms.Record( - absl::ToInt64Milliseconds(absl::Now() - now)); + absl::ToInt64Milliseconds(absl::Now() - start)); manager_->io_service_.post( [this, status]() { if (stopped_) { delete this; return; } - RAY_LOG(DEBUG) << "Health check status: " << int(response_.status()); + RAY_LOG(DEBUG) << "Health check status: " + << HealthCheckResponse_ServingStatus_Name( + response_.status()); if (status.ok() && response_.status() == HealthCheckResponse::SERVING) { - // Health check passed + // Health check passed. health_check_remaining_ = manager_->failure_threshold_; } else { --health_check_remaining_; @@ -118,6 +129,9 @@ void GcsHealthCheckManager::HealthCheckContext::StartHealthCheck() { delete this; } else { // Do another health check. + // + // TODO(hjiang): Able to reduce a few health check based on know resource + // usage communication between GCS and raylet. timer_.expires_from_now( boost::posix_time::milliseconds(manager_->period_ms_)); timer_.async_wait([this](auto) { StartHealthCheck(); }); @@ -132,13 +146,13 @@ void GcsHealthCheckManager::HealthCheckContext::Stop() { stopped_ = true; } void GcsHealthCheckManager::AddNode(const NodeID &node_id, std::shared_ptr channel) { io_service_.dispatch( - [this, channel, node_id]() { - RAY_CHECK(health_check_contexts_.count(node_id) == 0); + [this, channel = std::move(channel), node_id]() { + thread_checker_.IsOnSameThread(); auto context = new HealthCheckContext(this, channel, node_id); - health_check_contexts_.emplace(std::make_pair(node_id, context)); + auto [_, is_new] = health_check_contexts_.emplace(node_id, context); + RAY_CHECK(is_new); }, "GcsHealthCheckManager::AddNode"); } -} // namespace gcs -} // namespace ray +} // namespace ray::gcs diff --git a/src/ray/gcs/gcs_server/gcs_health_check_manager.h b/src/ray/gcs/gcs_server/gcs_health_check_manager.h index d877a217d803..a6e36d82972a 100644 --- a/src/ray/gcs/gcs_server/gcs_health_check_manager.h +++ b/src/ray/gcs/gcs_server/gcs_health_check_manager.h @@ -16,16 +16,19 @@ #include +#include +#include +#include +#include + #include "absl/container/flat_hash_map.h" #include "ray/common/asio/instrumented_io_context.h" #include "ray/common/id.h" #include "ray/common/ray_config.h" +#include "ray/util/thread_checker.h" #include "src/proto/grpc/health/v1/health.grpc.pb.h" -class GcsHealthCheckManagerTest; - -namespace ray { -namespace gcs { +namespace ray::gcs { /// GcsHealthCheckManager is used to track the healthiness of the nodes in the ray /// cluster. The health check is done in pull based way, which means this module will send @@ -35,6 +38,9 @@ namespace gcs { /// node will be removed from GcsHealthCheckManager. The node can be added into this class /// later. Although the same node id is not supposed to be reused in ray cluster, this is /// not enforced in this class. +/// +/// All IO operations happens on the same thread, which is managed by the pass-ed in +/// [io_service]. /// TODO (iycheng): Move the GcsHealthCheckManager to ray/common. class GcsHealthCheckManager { public: @@ -58,24 +64,27 @@ class GcsHealthCheckManager { ~GcsHealthCheckManager(); /// Start to track the healthiness of a node. + /// Safe to call from non-io-context threads. /// /// \param node_id The id of the node. /// \param channel The gRPC channel to the node. void AddNode(const NodeID &node_id, std::shared_ptr channel); /// Stop tracking the healthiness of a node. + /// Safe to call from non-io-context threads. /// /// \param node_id The id of the node to stop tracking. void RemoveNode(const NodeID &node_id); - /// Return all the nodes monitored. + /// Return all the nodes monitored and alive. + /// Notice: have to invoke from io-context thread. /// /// \return A list of node id which are being monitored by this class. std::vector GetAllNodes() const; private: /// Fail a node when health check failed. It'll stop the health checking and - /// call on_node_death_callback. + /// call `on_node_death_callback_`. /// /// \param node_id The id of the node. void FailNode(const NodeID &node_id); @@ -133,8 +142,12 @@ class GcsHealthCheckManager { std::function on_node_death_callback_; /// The context of the health check for each nodes. + /// Only living nodes are bookkept, while failed one will be removed. absl::flat_hash_map health_check_contexts_; + /// Checker to make sure there's no concurrent access for node addition and removal. + const ThreadChecker thread_checker_; + /// The delay for the first health check request. const int64_t initial_delay_ms_; /// Timeout for each health check request. @@ -145,5 +158,4 @@ class GcsHealthCheckManager { const int64_t failure_threshold_; }; -} // namespace gcs -} // namespace ray +} // namespace ray::gcs diff --git a/src/ray/util/thread_checker.cc b/src/ray/util/thread_checker.cc index 73a0072c7575..0f33dfd4c712 100644 --- a/src/ray/util/thread_checker.cc +++ b/src/ray/util/thread_checker.cc @@ -16,7 +16,7 @@ namespace ray { -bool ThreadChecker::IsOnSameThread() { +bool ThreadChecker::IsOnSameThread() const { const auto cur_id = std::this_thread::get_id(); std::thread::id uninitialized_id; return thread_id_.compare_exchange_strong(uninitialized_id, cur_id) || diff --git a/src/ray/util/thread_checker.h b/src/ray/util/thread_checker.h index 622624859b75..2e3dcf1ed3df 100644 --- a/src/ray/util/thread_checker.h +++ b/src/ray/util/thread_checker.h @@ -34,10 +34,10 @@ class ThreadChecker { public: // Return true at initialization, or current invocation happens on the same thread as // initialization. - bool IsOnSameThread(); + bool IsOnSameThread() const; private: - std::atomic thread_id_{}; + mutable std::atomic thread_id_{}; }; } // namespace ray From be0bbd0f37d9d7666b9bed504971f6a349097ab9 Mon Sep 17 00:00:00 2001 From: Ruiyang Wang <56065503+rynewang@users.noreply.github.com> Date: Thu, 21 Nov 2024 23:13:23 -0800 Subject: [PATCH 056/107] [core] Erases PopWorkerRequest on registration timeout. (#48858) This fixes a bug from #47694. When a PopWorkerRequest is time out, we should erase it from bookkeeping and invoke the callback with a `WorkerPendingRegistration` status. However only the latter is done and we forgot to erase, causing segfaults on a second call. Signed-off-by: Ruiyang Wang --- src/ray/raylet/worker_pool.cc | 3 ++- src/ray/raylet/worker_pool_test.cc | 13 +++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/ray/raylet/worker_pool.cc b/src/ray/raylet/worker_pool.cc index 943ea89b24b5..eda893e8efbe 100644 --- a/src/ray/raylet/worker_pool.cc +++ b/src/ray/raylet/worker_pool.cc @@ -593,7 +593,8 @@ void WorkerPool::MonitorPopWorkerRequestForRegistration( auto &requests = state.pending_registration_requests; auto it = std::find(requests.begin(), requests.end(), pop_worker_request); if (it != requests.end()) { - // Fail the task... + // Pop and fail the task... + requests.erase(it); PopWorkerStatus status = PopWorkerStatus::WorkerPendingRegistration; PopWorkerCallbackAsync(pop_worker_request->callback, nullptr, status); } diff --git a/src/ray/raylet/worker_pool_test.cc b/src/ray/raylet/worker_pool_test.cc index d945384b7277..0da18903fdfe 100644 --- a/src/ray/raylet/worker_pool_test.cc +++ b/src/ray/raylet/worker_pool_test.cc @@ -1928,6 +1928,19 @@ TEST_F(WorkerPoolDriverRegisteredTest, PopWorkerStatus) { worker_pool_->ClearProcesses(); } +TEST_F(WorkerPoolDriverRegisteredTest, WorkerPendingRegistrationErasesRequest) { + std::shared_ptr popped_worker; + PopWorkerStatus status; + auto task_spec = ExampleTaskSpec(); + // Create a task without push worker. It should time out (WorkerPendingRegistration). + popped_worker = worker_pool_->PopWorkerSync(task_spec, false, &status); + ASSERT_EQ(popped_worker, nullptr); + ASSERT_EQ(status, PopWorkerStatus::WorkerPendingRegistration); + // The request should be erased. + ASSERT_EQ(worker_pool_->NumPendingRegistrationRequests(), 0); + worker_pool_->ClearProcesses(); +} + TEST_F(WorkerPoolDriverRegisteredTest, TestIOWorkerFailureAndSpawn) { std::unordered_set> spill_worker_set; auto spill_worker_callback = From f1316e8af8c8930cd062f78b1d46f7933f744bf8 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Fri, 22 Nov 2024 08:14:04 +0100 Subject: [PATCH 057/107] [RLlib] APPO (new API stack) enhancements vol 04: Smaller cleanups and preparations. (#48844) --- rllib/algorithms/algorithm.py | 22 +++++++++++--- rllib/algorithms/algorithm_config.py | 21 ++++++++++--- rllib/algorithms/appo/appo.py | 3 +- rllib/algorithms/impala/impala.py | 34 +++++++++++---------- rllib/core/learner/learner.py | 6 ++-- rllib/core/learner/learner_group.py | 8 ++--- rllib/core/learner/torch/torch_learner.py | 37 ++++++++++++----------- rllib/utils/metrics/__init__.py | 3 ++ rllib/utils/metrics/metrics_logger.py | 4 +-- rllib/utils/metrics/stats.py | 24 ++++++--------- 10 files changed, 96 insertions(+), 66 deletions(-) diff --git a/rllib/algorithms/algorithm.py b/rllib/algorithms/algorithm.py index bf01aff4be89..7ed33f749713 100644 --- a/rllib/algorithms/algorithm.py +++ b/rllib/algorithms/algorithm.py @@ -129,6 +129,7 @@ NUM_ENV_STEPS_TRAINED_LIFETIME, NUM_EPISODES, NUM_EPISODES_LIFETIME, + NUM_TRAINING_STEP_CALLS_PER_ITERATION, RESTORE_WORKERS_TIMER, RESTORE_EVAL_WORKERS_TIMER, SYNCH_ENV_CONNECTOR_STATES_TIMER, @@ -3215,7 +3216,17 @@ def _run_one_training_iteration(self) -> Tuple[ResultDict, "TrainIterCtx"]: "one single result dict per training iteration." ) - # Only here, reduce the results into a single result dict. + # TODO (sven): Resolve this metric through log_time's future + # ability to compute throughput. + self.metrics.log_value( + NUM_TRAINING_STEP_CALLS_PER_ITERATION, + 1, + reduce="sum", + clear_on_reduce=True, + ) + + # Only here (at the end of the iteration), reduce the results into a single + # result dict. return self.metrics.reduce(), train_iter_ctx def _run_one_evaluation( @@ -3527,13 +3538,16 @@ def _compile_iteration_results_new_api_stack(self, *, train_results, eval_result # Fault tolerance stats. results[FAULT_TOLERANCE_STATS] = { "num_healthy_workers": self.env_runner_group.num_healthy_remote_workers(), - "num_in_flight_async_reqs": ( - self.env_runner_group.num_in_flight_async_reqs() - ), "num_remote_worker_restarts": ( self.env_runner_group.num_remote_worker_restarts() ), } + results["env_runner_group"] = { + "actor_manager_num_outstanding_async_reqs": ( + self.env_runner_group.num_in_flight_async_reqs() + ), + } + # Resolve all `Stats` leafs by peeking (get their reduced values). return tree.map_structure( lambda s: s.peek() if isinstance(s, Stats) else s, diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index 542240a00dac..051a0307367a 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -357,6 +357,11 @@ def __init__(self, algo_class: Optional[type] = None): self.num_gpus_per_learner = 0 self.num_cpus_per_learner = 1 self.local_gpu_idx = 0 + # TODO (sven): This probably works even without any restriction + # (allowing for any arbitrary number of requests in-flight). Test with + # 3 first, then with unlimited, and if both show the same behavior on + # an async algo, remove this restriction entirely. + self.max_requests_in_flight_per_learner = 3 # `self.training()` self.gamma = 0.99 @@ -1798,15 +1803,16 @@ def env_runners( synchronously in turn with their update step (e.g., PPO or DQN). Not relevant for any algos that sample asynchronously, such as APPO or IMPALA. - max_requests_in_flight_per_env_runner: Max number of inflight requests - to each EnvRunner worker. See the FaultTolerantActorManager class for - more details. + max_requests_in_flight_per_env_runner: Max number of in-flight requests + to each EnvRunner (actor)). See the + `ray.rllib.utils.actor_manager.FaultTolerantActorManager` class for more + details. Tuning these values is important when running experiments with large sample batches, where there is the risk that the object store may fill up, causing spilling of objects to disk. This can cause any asynchronous requests to become very slow, making your experiment run slowly as well. You can inspect the object store during your experiment - via a call to Ray memory on your head node, and by using the Ray + via a call to `ray memory` on your head node, and by using the Ray dashboard. If you're seeing that the object store is filling up, turn down the number of remote requests in flight or enable compression. sample_collector: For the old API stack only. The SampleCollector class to @@ -2110,6 +2116,7 @@ def learners( num_cpus_per_learner: Optional[Union[float, int]] = NotProvided, num_gpus_per_learner: Optional[Union[float, int]] = NotProvided, local_gpu_idx: Optional[int] = NotProvided, + max_requests_in_flight_per_learner: Optional[int] = NotProvided, ): """Sets LearnerGroup and Learner worker related configurations. @@ -2135,6 +2142,10 @@ def learners( an index into the available CUDA devices. For example if `os.environ["CUDA_VISIBLE_DEVICES"] = "1"` and `local_gpu_idx=0`, RLlib uses the GPU with ID=1 on the node. + max_requests_in_flight_per_learner: Max number of in-flight requests + to each Learner (actor)). See the + `ray.rllib.utils.actor_manager.FaultTolerantActorManager` class for more + details. Returns: This updated AlgorithmConfig object. @@ -2147,6 +2158,8 @@ def learners( self.num_gpus_per_learner = num_gpus_per_learner if local_gpu_idx is not NotProvided: self.local_gpu_idx = local_gpu_idx + if max_requests_in_flight_per_learner is not NotProvided: + self.max_requests_in_flight_per_learner = max_requests_in_flight_per_learner return self diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index b27e96b02d16..5302f7540248 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -118,7 +118,6 @@ def __init__(self, algo_class=None): # Override some of IMPALAConfig's default values with APPO-specific values. self.num_env_runners = 2 - self.min_time_s_per_iteration = 10 self.target_network_update_freq = 2 self.broadcast_interval = 1 self.grad_clip = 40.0 @@ -233,6 +232,8 @@ def training( if use_critic != DEPRECATED_VALUE: deprecation_warning( old="use_critic", + help="`use_critic` no longer supported! APPO always uses a value " + "function (critic).", error=True, ) diff --git a/rllib/algorithms/impala/impala.py b/rllib/algorithms/impala/impala.py index 69f140fda775..862c2cf84388 100644 --- a/rllib/algorithms/impala/impala.py +++ b/rllib/algorithms/impala/impala.py @@ -80,12 +80,16 @@ class IMPALAConfig(AlgorithmConfig): .. testcode:: from ray.rllib.algorithms.impala import IMPALAConfig - config = IMPALAConfig() - config = config.training(lr=0.0003, train_batch_size_per_learner=512) - config = config.learners(num_learners=1) - config = config.env_runners(num_env_runners=1) + + config = ( + IMPALAConfig() + .environment("CartPole-v1") + .env_runners(num_env_runners=1) + .training(lr=0.0003, train_batch_size_per_learner=512) + .learners(num_learners=1) + ) # Build a Algorithm object from the config and run 1 training iteration. - algo = config.build(env="CartPole-v1") + algo = config.build() algo.train() del algo @@ -94,16 +98,14 @@ class IMPALAConfig(AlgorithmConfig): from ray.rllib.algorithms.impala import IMPALAConfig from ray import air from ray import tune - config = IMPALAConfig() - # Update the config object. - config = config.training( - lr=tune.grid_search([0.0001, 0.0002]), grad_clip=20.0 + config = ( + IMPALAConfig() + .environment("CartPole-v1") + .env_runners(num_env_runners=1) + .training(lr=tune.grid_search([0.0001, 0.0002]), grad_clip=20.0) + .learners(num_learners=1) ) - config = config.learners(num_learners=1) - config = config.env_runners(num_env_runners=1) - # Set the config object's env. - config = config.environment(env="CartPole-v1") # Run with tune. tune.Tuner( "IMPALA", @@ -146,8 +148,6 @@ def __init__(self, algo_class=None): self.broadcast_interval = 1 self.num_aggregation_workers = 0 self.num_gpu_loader_threads = 8 - # IMPALA takes care of its own EnvRunner (weights, connector, metrics) synching. - self._dont_auto_sync_env_runner_states = True self.grad_clip = 40.0 # Note: Only when using enable_rl_module_and_learner=True can the clipping mode @@ -168,6 +168,9 @@ def __init__(self, algo_class=None): # __sphinx_doc_end__ # fmt: on + # IMPALA takes care of its own EnvRunner (weights, connector, metrics) synching. + self._dont_auto_sync_env_runner_states = True + self.lr_schedule = None # @OldAPIStack self.entropy_coeff_schedule = None # @OldAPIStack self.num_multi_gpu_tower_stacks = 1 # @OldAPIstack @@ -181,7 +184,6 @@ def __init__(self, algo_class=None): self.epsilon = 0.1 # @OldAPIstack self._separate_vf_optimizer = False # @OldAPIstack self._lr_vf = 0.0005 # @OldAPIstack - self.train_batch_size = 500 # @OldAPIstack self.num_gpus = 1 # @OldAPIstack self._tf_policy_handles_more_than_one_loss = True # @OldAPIstack diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index 2b58743a52df..5c3aa575ea9e 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -1134,6 +1134,9 @@ def _finalize_fn(batch: Dict[str, numpy.ndarray]) -> Dict[str, Any]: fwd_out, loss_per_module, tensor_metrics = self._update( batch.policy_batches ) + # Convert logged tensor metrics (logged during tensor-mode of MetricsLogger) + # to actual (numpy) values. + self.metrics.tensors_to_numpy(tensor_metrics) self._set_slicing_by_batch_id(batch, value=False) # If `num_iters` is reached break and return. @@ -1143,9 +1146,6 @@ def _finalize_fn(batch: Dict[str, numpy.ndarray]) -> Dict[str, Any]: logger.info( f"===> [Learner {id(self)}] number of iterations run in this epoch: {i}" ) - # Convert logged tensor metrics (logged during tensor-mode of MetricsLogger) - # to actual (numpy) values. - self.metrics.tensors_to_numpy(tensor_metrics) # Log all individual RLModules' loss terms and its registered optimizers' # current learning rates. diff --git a/rllib/core/learner/learner_group.py b/rllib/core/learner/learner_group.py index 31994fa5dcce..a80de4cd2e76 100644 --- a/rllib/core/learner/learner_group.py +++ b/rllib/core/learner/learner_group.py @@ -171,11 +171,9 @@ def __init__( self._worker_manager = FaultTolerantActorManager( self._workers, - # TODO (sven): This probably works even without any restriction - # (allowing for any arbitrary number of requests in-flight). Test with - # 3 first, then with unlimited, and if both show the same behavior on - # an async algo, remove this restriction entirely. - max_remote_requests_in_flight_per_actor=3, + max_remote_requests_in_flight_per_actor=( + self.config.max_requests_in_flight_per_learner + ), ) # Counters for the tags for asynchronous update requests that are # in-flight. Used for keeping trakc of and grouping together the results of diff --git a/rllib/core/learner/torch/torch_learner.py b/rllib/core/learner/torch/torch_learner.py index c8cdff3b8ca3..e04a8b491c9a 100644 --- a/rllib/core/learner/torch/torch_learner.py +++ b/rllib/core/learner/torch/torch_learner.py @@ -147,23 +147,7 @@ def _uncompiled_update( # Activate tensor-mode on our MetricsLogger. self.metrics.activate_tensor_mode() - # Log off-policy'ness of this update. - off_policyness = { - (mid, DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY): ( - (self._weights_seq_no - module_batch[WEIGHTS_SEQ_NO]).float() - ) - for mid, module_batch in batch.items() - if WEIGHTS_SEQ_NO in module_batch - } - for key in off_policyness.keys(): - mid = key[0] - if Columns.LOSS_MASK not in batch[mid]: - off_policyness[key] = torch.mean(off_policyness[key]) - else: - mask = batch[mid][Columns.LOSS_MASK] - num_valid = torch.sum(mask) - off_policyness[key] = torch.sum(off_policyness[key][mask]) / num_valid - self.metrics.log_dict(off_policyness, window=1) + self._compute_off_policyness(batch) fwd_out = self.module.forward_train(batch) loss_per_module = self.compute_losses(fwd_out=fwd_out, batch=batch) @@ -630,6 +614,25 @@ def _map_module_to_device(self, module: MultiRLModule) -> None: if isinstance(module[key], torch.nn.Module): module[key].to(self._device) + def _compute_off_policyness(self, batch): + # Log off-policy'ness of this batch wrt the current weights. + off_policyness = { + (mid, DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY): ( + (self._weights_seq_no - module_batch[WEIGHTS_SEQ_NO]).float() + ) + for mid, module_batch in batch.items() + if WEIGHTS_SEQ_NO in module_batch + } + for key in off_policyness.keys(): + mid = key[0] + if Columns.LOSS_MASK not in batch[mid]: + off_policyness[key] = torch.mean(off_policyness[key]) + else: + mask = batch[mid][Columns.LOSS_MASK] + num_valid = torch.sum(mask) + off_policyness[key] = torch.sum(off_policyness[key][mask]) / num_valid + self.metrics.log_dict(off_policyness, window=1) + @override(Learner) def _get_tensor_variable( self, value, dtype=None, trainable=False diff --git a/rllib/utils/metrics/__init__.py b/rllib/utils/metrics/__init__.py index d9a67933998c..5e7e3ad071bd 100644 --- a/rllib/utils/metrics/__init__.py +++ b/rllib/utils/metrics/__init__.py @@ -11,6 +11,9 @@ NUM_TRAINABLE_PARAMETERS = "num_trainable_parameters" NUM_NON_TRAINABLE_PARAMETERS = "num_non_trainable_parameters" +# Number of times `training_step()` was called in one iteration. +NUM_TRAINING_STEP_CALLS_PER_ITERATION = "num_training_step_calls_per_iteration" + # Counters for sampling, sampling (on eval workers) and # training steps (env- and agent steps). MEAN_NUM_EPISODE_LISTS_RECEIVED = "mean_num_episode_lists_received" diff --git a/rllib/utils/metrics/metrics_logger.py b/rllib/utils/metrics/metrics_logger.py index f1f6f4cc12e8..276d6891b14a 100644 --- a/rllib/utils/metrics/metrics_logger.py +++ b/rllib/utils/metrics/metrics_logger.py @@ -937,10 +937,10 @@ def deactivate_tensor_mode(self): assert self.tensor_mode self._tensor_mode = False # Return all logged tensors (logged during the tensor-mode phase). - ret = {key: self._get_key(key).peek() for key in self._tensor_keys} + logged_tensors = {key: self._get_key(key).peek() for key in self._tensor_keys} # Clear out logged tensor keys. self._tensor_keys.clear() - return ret + return logged_tensors def tensors_to_numpy(self, tensor_metrics): """Converts all previously logged and returned tensors back to numpy values.""" diff --git a/rllib/utils/metrics/stats.py b/rllib/utils/metrics/stats.py index 1929cec2b063..b13b7edb3b48 100644 --- a/rllib/utils/metrics/stats.py +++ b/rllib/utils/metrics/stats.py @@ -216,7 +216,8 @@ def __init__( # Code to execute when exiting a with-context. self._on_exit = on_exit - # On each `.reduce()` call, we store the result of this call in + # On each `.reduce()` call, we store the result of this call in hist[0] and the + # previous `reduce()` result in hist[1]. self._hist = (0, 0) def push(self, value) -> None: @@ -274,6 +275,7 @@ def peek(self, *, previous: bool = False) -> Any: The result of reducing the internal values list (or the previously computed reduced result, if `previous` is True). """ + # Return previously reduced value. if previous: return self._hist[1] return self._reduced_values()[0] @@ -355,10 +357,8 @@ def merge_in_parallel(self, *others: "Stats") -> None: # - Thereby always reducing across the different Stats objects' at the # current index. # - The resulting reduced value (across Stats at current index) is then - # repeated AND - # added to the new merged-values list n times (where n is the number of - # Stats, across - # which we merge). + # repeated AND added to the new merged-values list n times (where n is + # the number of Stats, across which we merge). # - The merged-values list is reversed. # Here: # index -1: [3, 6] -> [4.5, 4.5] @@ -381,13 +381,11 @@ def merge_in_parallel(self, *others: "Stats") -> None: stats.merge_in_parallel(stats1, stats2) # Same here: Fill new merged-values list: # - Start with index -1, moving to the start. - # - Thereby always reducing across the different Stats objects' at the + # - Thereby always reduce across the different Stats objects' at the # current index. # - The resulting reduced value (across Stats at current index) is then - # repeated AND - # added to the new merged-values list n times (where n is the number of - # Stats, across - # which we merge). + # repeated AND added to the new merged-values list n times (where n is the + # number of Stats, across which we merge). # - The merged-values list is reversed. # Here: # index -1: [3, 6] -> [6, 6] @@ -420,7 +418,7 @@ def merge_in_parallel(self, *others: "Stats") -> None: # Parallel-merge two (reduce=sum) stats with no window. # Note that when reduce="sum", we do NOT reduce across the indices of the - # parallel + # parallel values. stats = Stats(reduce="sum") stats1 = Stats(reduce="sum") stats1.push(1) @@ -435,7 +433,6 @@ def merge_in_parallel(self, *others: "Stats") -> None: # index -2: [0, 5] -> [3, 6, 0, 5] # index -3: [2, 4] -> [3, 6, 0, 5, 2, 4] # index -4: [1] -> [3, 6, 0, 5, 2, 4, 1] - # STOP after merged list contains >= 4 items (window size) # reverse: [1, 4, 2, 5, 0, 6, 3] stats.merge_in_parallel(stats1, stats2) check(stats.values, [1, 4, 2, 5, 0, 6, 3]) @@ -443,7 +440,7 @@ def merge_in_parallel(self, *others: "Stats") -> None: # Parallel-merge two "concat" (reduce=None) stats with no window. # Note that when reduce=None, we do NOT reduce across the indices of the - # parallel + # parallel values. stats = Stats(reduce=None, window=float("inf"), clear_on_reduce=True) stats1 = Stats(reduce=None, window=float("inf"), clear_on_reduce=True) stats1.push(1) @@ -586,7 +583,6 @@ def from_state(state: Dict[str, Any]) -> "Stats": def similar_to( other: "Stats", init_value: Optional[Any] = None, - prev_values: Optional[Tuple[Any, Any]] = None, ) -> "Stats": """Returns a new Stats object that's similar to `other`. From 64454cc04672b11a692d7b71c93f1c38e70e9391 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Fri, 22 Nov 2024 02:35:40 -0800 Subject: [PATCH 058/107] Revert "[Data] Fix pandas memory calculation." (#48866) Reverts ray-project/ray#46939 for https://github.com/ray-project/ray/issues/48865 https://github.com/ray-project/ray/issues/48864 https://github.com/ray-project/ray/issues/48863 https://github.com/ray-project/ray/issues/48862 --- python/ray/data/_internal/pandas_block.py | 70 +------ python/ray/data/tests/test_pandas_block.py | 229 +-------------------- 2 files changed, 3 insertions(+), 296 deletions(-) diff --git a/python/ray/data/_internal/pandas_block.py b/python/ray/data/_internal/pandas_block.py index ff1686c1b355..119469b46c1b 100644 --- a/python/ray/data/_internal/pandas_block.py +++ b/python/ray/data/_internal/pandas_block.py @@ -1,6 +1,5 @@ import collections import heapq -import sys from typing import ( TYPE_CHECKING, Any, @@ -295,74 +294,7 @@ def num_rows(self) -> int: return self._table.shape[0] def size_bytes(self) -> int: - from pandas.api.types import is_object_dtype - - from ray.data.extensions import TensorArrayElement, TensorDtype - - pd = lazy_import_pandas() - - def get_deep_size(obj): - """Calculates the memory size of objects, - including nested objects using an iterative approach.""" - seen = set() - total_size = 0 - objects = collections.deque([obj]) - while objects: - current = objects.pop() - - # Skip interning-eligible immutable objects - if isinstance(current, (str, bytes, int, float)): - size = sys.getsizeof(current) - total_size += size - continue - - # Check if the object has been seen before - if id(current) in seen: - continue - seen.add(id(current)) - - try: - size = sys.getsizeof(current) - except TypeError: - size = 0 - total_size += size - - # Handle specific cases - if isinstance(current, np.ndarray): - total_size += current.nbytes - size # Avoid double counting - elif isinstance(current, pd.DataFrame): - total_size += ( - current.memory_usage(index=True, deep=True).sum() - size - ) - elif isinstance(current, (list, tuple, set)): - objects.extend(current) - elif isinstance(current, dict): - objects.extend(current.keys()) - objects.extend(current.values()) - elif isinstance(current, TensorArrayElement): - objects.extend(current.to_numpy()) - return total_size - - # Get initial memory usage including deep introspection - memory_usage = self._table.memory_usage(index=True, deep=True) - - # TensorDtype for ray.air.util.tensor_extensions.pandas.TensorDtype - object_need_check = (TensorDtype,) - # Handle object columns separately - for column in self._table.columns: - # Check pandas object dtype and the extenstion dtype - if is_object_dtype(self._table[column].dtype) or isinstance( - self._table[column].dtype, object_need_check - ): - column_memory = 0 - for element in self._table[column]: - column_memory += get_deep_size(element) - memory_usage[column] = column_memory - - # Sum up total memory usage - total_memory_usage = memory_usage.sum() - - return int(total_memory_usage) + return int(self._table.memory_usage(index=True, deep=True).sum()) def _zip(self, acc: BlockAccessor) -> "pandas.DataFrame": r = self.to_pandas().copy(deep=False) diff --git a/python/ray/data/tests/test_pandas_block.py b/python/ray/data/tests/test_pandas_block.py index 5e83abbf04d7..4585d0e2a133 100644 --- a/python/ray/data/tests/test_pandas_block.py +++ b/python/ray/data/tests/test_pandas_block.py @@ -1,10 +1,4 @@ -import pickle -import random -import sys - -import numpy as np import pandas as pd -import pyarrow as pa import pytest import ray @@ -54,226 +48,7 @@ def fn2(batch): assert isinstance(block, pd.DataFrame) -class TestSizeBytes: - def test_small(ray_start_regular_shared): - animals = ["Flamingo", "Centipede"] - block = pd.DataFrame({"animals": animals}) - - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - # check that memory usage is within 10% of the size_bytes - # For strings, Pandas seems to be fairly accurate, so let's use that. - memory_usage = block.memory_usage(index=True, deep=True).sum() - assert bytes_size == pytest.approx(memory_usage, rel=0.1), ( - bytes_size, - memory_usage, - ) - - def test_large_str(ray_start_regular_shared): - animals = [ - random.choice(["alligator", "crocodile", "centipede", "flamingo"]) - for i in range(100_000) - ] - block = pd.DataFrame({"animals": animals}) - block["animals"] = block["animals"].astype("string") - - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - memory_usage = block.memory_usage(index=True, deep=True).sum() - assert bytes_size == pytest.approx(memory_usage, rel=0.1), ( - bytes_size, - memory_usage, - ) - - def test_large_str_object(ray_start_regular_shared): - """Note - this test breaks if you refactor/move the list of animals.""" - num = 100_000 - animals = [ - random.choice(["alligator", "crocodile", "centipede", "flamingo"]) - for i in range(num) - ] - block = pd.DataFrame({"animals": animals}) - - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - memory_usage = sum([sys.getsizeof(animal) for animal in animals]) - - assert bytes_size == pytest.approx(memory_usage, rel=0.1), ( - bytes_size, - memory_usage, - ) - - def test_large_floats(ray_start_regular_shared): - animals = [random.random() for i in range(100_000)] - block = pd.DataFrame({"animals": animals}) - - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - memory_usage = pickle.dumps(block).__sizeof__() - # check that memory usage is within 10% of the size_bytes - assert bytes_size == pytest.approx(memory_usage, rel=0.1), ( - bytes_size, - memory_usage, - ) - - def test_bytes_object(ray_start_regular_shared): - def generate_data(batch): - for _ in range(8): - yield {"data": [[b"\x00" * 128 * 1024 * 128]]} - - ds = ( - ray.data.range(1, override_num_blocks=1) - .map_batches(generate_data, batch_size=1) - .map_batches(lambda batch: batch, batch_format="pandas") - ) - - true_value = 128 * 1024 * 128 * 8 - for bundle in ds.iter_internal_ref_bundles(): - size = bundle.size_bytes() - # assert that true_value is within 10% of bundle.size_bytes() - assert size == pytest.approx(true_value, rel=0.1), ( - size, - true_value, - ) - - def test_nested_numpy(ray_start_regular_shared): - size = 1024 - rows = 1_000 - data = [ - np.random.randint(size=size, low=0, high=100, dtype=np.int8) - for _ in range(rows) - ] - df = pd.DataFrame({"data": data}) - - block_accessor = PandasBlockAccessor.for_block(df) - block_size = block_accessor.size_bytes() - true_value = rows * size - assert block_size == pytest.approx(true_value, rel=0.1), ( - block_size, - true_value, - ) - - def test_nested_objects(ray_start_regular_shared): - size = 10 - rows = 10_000 - lists = [[random.randint(0, 100) for _ in range(size)] for _ in range(rows)] - data = {"lists": lists} - block = pd.DataFrame(data) - - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - # List overhead + 10 integers per list - true_size = rows * ( - sys.getsizeof([random.randint(0, 100) for _ in range(size)]) + size * 28 - ) - - assert bytes_size == pytest.approx(true_size, rel=0.1), ( - bytes_size, - true_size, - ) - - def test_mixed_types(ray_start_regular_shared): - rows = 10_000 - - data = { - "integers": [random.randint(0, 100) for _ in range(rows)], - "floats": [random.random() for _ in range(rows)], - "strings": [ - random.choice(["apple", "banana", "cherry"]) for _ in range(rows) - ], - "object": [b"\x00" * 128 for _ in range(rows)], - } - block = pd.DataFrame(data) - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - # Manually calculate the size - int_size = rows * 8 - float_size = rows * 8 - str_size = sum(sys.getsizeof(string) for string in data["strings"]) - object_size = rows * sys.getsizeof(b"\x00" * 128) - - true_size = int_size + float_size + str_size + object_size - assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size) - - def test_nested_lists_strings(ray_start_regular_shared): - rows = 5_000 - nested_lists = ["a"] * 3 + ["bb"] * 4 + ["ccc"] * 3 - data = { - "nested_lists": [nested_lists for _ in range(rows)], - } - block = pd.DataFrame(data) - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - # Manually calculate the size - list_overhead = sys.getsizeof(block["nested_lists"].iloc[0]) + sum( - [sys.getsizeof(x) for x in nested_lists] - ) - true_size = rows * list_overhead - assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size) - - @pytest.mark.parametrize("size", [10, 1024]) - def test_multi_level_nesting(ray_start_regular_shared, size): - rows = 1_000 - data = { - "complex": [ - {"list": [np.random.rand(size)], "value": {"key": "val"}} - for _ in range(rows) - ], - } - block = pd.DataFrame(data) - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - numpy_size = np.random.rand(size).nbytes - - values = ["list", "value", "key", "val"] - str_size = sum([sys.getsizeof(v) for v in values]) - - list_ref_overhead = sys.getsizeof([np.random.rand(size)]) - - dict_overhead1 = sys.getsizeof({"key": "val"}) - - dict_overhead3 = sys.getsizeof( - {"list": [np.random.rand(size)], "value": {"key": "val"}} - ) - - true_size = ( - numpy_size + str_size + list_ref_overhead + dict_overhead1 + dict_overhead3 - ) * rows - assert bytes_size == pytest.approx(true_size, rel=0.15), ( - bytes_size, - true_size, - ) - - def test_boolean(ray_start_regular_shared): - data = [random.choice([True, False, None]) for _ in range(100_000)] - block = pd.DataFrame({"flags": pd.Series(data, dtype="boolean")}) - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - # No object case - true_size = block.memory_usage(index=True, deep=True).sum() - assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size) - - def test_arrow(ray_start_regular_shared): - data = [ - random.choice(["alligator", "crocodile", "flamingo"]) for _ in range(50_000) - ] - arrow_dtype = pd.ArrowDtype(pa.string()) - block = pd.DataFrame({"animals": pd.Series(data, dtype=arrow_dtype)}) - block_accessor = PandasBlockAccessor.for_block(block) - bytes_size = block_accessor.size_bytes() - - true_size = block.memory_usage(index=True, deep=True).sum() - assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size) - - if __name__ == "__main__": + import sys + sys.exit(pytest.main(["-v", __file__])) From b9cd63a5947accfc86e9cf5711ca204120fd9228 Mon Sep 17 00:00:00 2001 From: Dongjun Na Date: Sat, 23 Nov 2024 02:41:50 +0900 Subject: [PATCH 059/107] [Feat][Dashboard] Add global UTC timezone button in navbar with local storage (#48510) Signed-off-by: Dongjun Na --- python/ray/dashboard/client/src/App.tsx | 50 +- .../client/src/common/formatUtils.ts | 14 +- .../dashboard/client/src/common/timezone.ts | 794 ++++++++++++++++++ .../client/src/components/SearchComponent.tsx | 174 +++- .../client/src/pages/layout/MainNavLayout.tsx | 15 +- .../pages/metrics/Metrics.component.test.tsx | 4 + .../client/src/pages/metrics/Metrics.tsx | 6 +- .../client/src/pages/metrics/utils.ts | 24 + .../overview/OverviewPage.component.test.tsx | 2 + .../overview/cards/ClusterUtilizationCard.tsx | 5 +- .../pages/overview/cards/NodeCountCard.tsx | 5 +- ...eploymentMetricsSection.component.test.tsx | 4 + .../serve/ServeDeploymentMetricsSection.tsx | 11 +- .../ServeMetricsSection.component.test.tsx | 4 + .../src/pages/serve/ServeMetricsSection.tsx | 11 +- .../dashboard/client/src/util/test-utils.tsx | 2 + python/ray/dashboard/http_server_head.py | 13 + python/ray/dashboard/timezone_utils.py | 56 ++ 18 files changed, 1173 insertions(+), 21 deletions(-) create mode 100644 python/ray/dashboard/client/src/common/timezone.ts create mode 100644 python/ray/dashboard/timezone_utils.py diff --git a/python/ray/dashboard/client/src/App.tsx b/python/ray/dashboard/client/src/App.tsx index bc30e06ccc0c..84b4a2c88e7b 100644 --- a/python/ray/dashboard/client/src/App.tsx +++ b/python/ray/dashboard/client/src/App.tsx @@ -23,7 +23,12 @@ import { StateApiLogViewerPage, } from "./pages/log/Logs"; import { Metrics } from "./pages/metrics"; -import { DashboardUids, getMetricsInfo } from "./pages/metrics/utils"; +import { + DashboardUids, + getMetricsInfo, + getTimeZoneInfo, + TimezoneInfo, +} from "./pages/metrics/utils"; import Nodes, { ClusterMainPageLayout } from "./pages/node"; import { ClusterDetailInfoPage } from "./pages/node/ClusterDetailInfoPage"; import { ClusterLayout } from "./pages/node/ClusterLayout"; @@ -91,6 +96,14 @@ export type GlobalContextType = { * The name of the current selected datasource. */ dashboardDatasource: string | undefined; + /** + * The timezone set on the ray cluster. + */ + serverTimeZone: TimezoneInfo | null | undefined; + /** + * The globally selected current time zone. + */ + currentTimeZone: string | undefined; }; export const GlobalContext = React.createContext({ nodeMap: {}, @@ -102,10 +115,15 @@ export const GlobalContext = React.createContext({ prometheusHealth: undefined, sessionName: undefined, dashboardDatasource: undefined, + serverTimeZone: undefined, + currentTimeZone: undefined, }); const App = () => { - const [context, setContext] = useState({ + const [currentTimeZone, setCurrentTimeZone] = useState(); + const [context, setContext] = useState< + Omit + >({ nodeMap: {}, nodeMapByIp: {}, namespaceMap: {}, @@ -115,6 +133,7 @@ const App = () => { prometheusHealth: undefined, sessionName: undefined, dashboardDatasource: undefined, + serverTimeZone: undefined, }); useEffect(() => { getNodeList().then((res) => { @@ -158,11 +177,36 @@ const App = () => { doEffect(); }, []); + useEffect(() => { + const updateTimezone = async () => { + // Sets the intial timezone to localStorage value if it exists + const storedTimeZone = localStorage.getItem("timezone"); + if (storedTimeZone) { + setCurrentTimeZone(storedTimeZone); + } + + // Fetch the server time zone. + const tzInfo = await getTimeZoneInfo(); + + const timeZone = + storedTimeZone || + tzInfo?.value || + Intl.DateTimeFormat().resolvedOptions().timeZone; + + setCurrentTimeZone(timeZone); + setContext((existingContext) => ({ + ...existingContext, + serverTimeZone: tzInfo, + })); + }; + updateTimezone(); + }, []); + return ( - + diff --git a/python/ray/dashboard/client/src/common/formatUtils.ts b/python/ray/dashboard/client/src/common/formatUtils.ts index 03610fd82553..4b0c9a24a7d1 100644 --- a/python/ray/dashboard/client/src/common/formatUtils.ts +++ b/python/ray/dashboard/client/src/common/formatUtils.ts @@ -1,4 +1,9 @@ import dayjs from "dayjs"; +import timezone from "dayjs/plugin/timezone"; +import utc from "dayjs/plugin/utc"; + +dayjs.extend(utc); +dayjs.extend(timezone); export const formatByteAmount = ( amount: number, @@ -60,5 +65,10 @@ export const formatValue = (rawFloat: number) => { } }; -export const formatDateFromTimeMs = (time: number) => - dayjs(time).format("YYYY/MM/DD HH:mm:ss"); +export const formatTimeZone = (UTC: string) => { + dayjs.tz.setDefault(UTC); +}; + +export const formatDateFromTimeMs = (time: number) => { + return dayjs.utc(time).tz().format("YYYY/MM/DD HH:mm:ss"); +}; diff --git a/python/ray/dashboard/client/src/common/timezone.ts b/python/ray/dashboard/client/src/common/timezone.ts new file mode 100644 index 000000000000..d02b785550fe --- /dev/null +++ b/python/ray/dashboard/client/src/common/timezone.ts @@ -0,0 +1,794 @@ +export const timezones = [ + { + utc: "GMT-12:00", + value: "Etc/GMT+12", + group: "Pacific", + country: "International Date Line West", + }, + { + utc: "GMT-11:00", + value: "Pacific/Pago_Pago", + group: "Pacific", + country: "American Samoa", + }, + { + utc: "GMT-11:00", + value: "Pacific/Midway", + group: "Pacific", + country: "Midway Island", + }, + { + utc: "GMT-10:00", + value: "Pacific/Honolulu", + group: "Pacific", + country: "Hawaii", + }, + { + utc: "GMT-09:00", + value: "America/Anchorage", + group: "America", + country: "Alaska", + }, + { + utc: "GMT-08:00", + value: "America/Los_Angeles", + group: "America", + country: "Pacific Time (US & Canada)", + }, + { + utc: "GMT-08:00", + value: "America/Tijuana", + group: "America", + country: "Tijuana", + }, + { + utc: "GMT-07:00", + value: "America/Phoenix", + group: "America", + country: "Arizona", + }, + { + utc: "GMT-07:00", + value: "America/Mazatlan", + group: "America", + country: "Mazatlan", + }, + { + utc: "GMT-07:00", + value: "America/Denver", + group: "America", + country: "Mountain Time (US & Canada)", + }, + { + utc: "GMT-06:00", + value: "America/Guatemala", + group: "America", + country: "Central America", + }, + { + utc: "GMT-06:00", + value: "America/Chicago", + group: "America", + country: "Central Time (US & Canada)", + }, + { + utc: "GMT-06:00", + value: "America/Chihuahua", + group: "America", + country: "Chihuahua", + }, + { + utc: "GMT-06:00", + value: "America/Guadalajara", + group: "America", + country: "Guadalajara", + }, + { + utc: "GMT-06:00", + value: "America/Mexico_City", + group: "America", + country: "Mexico City", + }, + { + utc: "GMT-06:00", + value: "America/Monterrey", + group: "America", + country: "Monterrey", + }, + { + utc: "GMT-06:00", + value: "America/Regina", + group: "America", + country: "Saskatchewan", + }, + { + utc: "GMT-05:00", + value: "America/Bogota", + group: "America", + country: "Bogota", + }, + { + utc: "GMT-05:00", + value: "America/New_York", + group: "America", + country: "Eastern Time (US & Canada)", + }, + { + utc: "GMT-05:00", + value: "America/Indiana/Indianapolis", + group: "America", + country: "Indiana (East)", + }, + { + utc: "GMT-05:00", + value: "America/Lima", + group: "America", + country: "Lima", + }, + { + utc: "GMT-05:00", + value: "America/Guayaquil", + group: "America", + country: "Quito", + }, + { + utc: "GMT-04:00", + value: "America/Halifax", + group: "America", + country: "Atlantic Time (Canada)", + }, + { + utc: "GMT-04:00", + value: "America/Caracas", + group: "America", + country: "Caracas", + }, + { + utc: "GMT-04:00", + value: "America/Guyana", + group: "America", + country: "Georgetown", + }, + { + utc: "GMT-04:00", + value: "America/La_Paz", + group: "America", + country: "La Paz", + }, + { + utc: "GMT-04:00", + value: "America/Puerto_Rico", + group: "America", + country: "Puerto Rico", + }, + { + utc: "GMT-04:00", + value: "America/Santiago", + group: "America", + country: "Santiago", + }, + { + utc: "GMT-03:30", + value: "America/St_Johns", + group: "America", + country: "Newfoundland", + }, + { + utc: "GMT-03:00", + value: "America/Sao_Paulo", + group: "America", + country: "Brasilia", + }, + { + utc: "GMT-03:00", + value: "America/Argentina/Buenos_Aires", + group: "America", + country: "Buenos Aires", + }, + { + utc: "GMT-03:00", + value: "America/Montevideo", + group: "America", + country: "Montevideo", + }, + { + utc: "GMT-02:00", + value: "America/Godthab", + group: "America", + country: "Greenland", + }, + { + utc: "GMT-02:00", + value: "Etc/GMT+2", + group: "Atlantic", + country: "Mid-Atlantic", + }, + { + utc: "GMT-01:00", + value: "Atlantic/Azores", + group: "Atlantic", + country: "Azores", + }, + { + utc: "GMT-01:00", + value: "Atlantic/Cape_Verde", + group: "Atlantic", + country: "Cape Verde Is.", + }, + { + utc: "GMT+00:00", + value: "Europe/London", + group: "Europe", + country: "Edinburgh", + }, + { + utc: "GMT+00:00", + value: "Europe/Lisbon", + group: "Europe", + country: "Lisbon", + }, + { + utc: "GMT+00:00", + value: "Europe/London", + group: "Europe", + country: "London", + }, + { + utc: "GMT+00:00", + value: "Africa/Monrovia", + group: "Africa", + country: "Monrovia", + }, + { + utc: "GMT+01:00", + value: "Europe/Amsterdam", + group: "Europe", + country: "Amsterdam", + }, + { + utc: "GMT+01:00", + value: "Europe/Belgrade", + group: "Europe", + country: "Belgrade", + }, + { + utc: "GMT+01:00", + value: "Europe/Berlin", + group: "Europe", + country: "Berlin", + }, + { + utc: "GMT+01:00", + value: "Europe/Brussels", + group: "Europe", + country: "Brussels", + }, + { + utc: "GMT+01:00", + value: "Europe/Budapest", + group: "Europe", + country: "Budapest", + }, + { + utc: "GMT+01:00", + value: "Europe/Copenhagen", + group: "Europe", + country: "Copenhagen", + }, + { + utc: "GMT+01:00", + value: "Europe/Madrid", + group: "Europe", + country: "Madrid", + }, + { + utc: "GMT+01:00", + value: "Europe/Paris", + group: "Europe", + country: "Paris", + }, + { + utc: "GMT+01:00", + value: "Europe/Prague", + group: "Europe", + country: "Prague", + }, + { + utc: "GMT+01:00", + value: "Europe/Rome", + group: "Europe", + country: "Rome", + }, + { + utc: "GMT+01:00", + value: "Europe/Sarajevo", + group: "Europe", + country: "Sarajevo", + }, + { + utc: "GMT+01:00", + value: "Europe/Stockholm", + group: "Europe", + country: "Stockholm", + }, + { + utc: "GMT+01:00", + value: "Europe/Vienna", + group: "Europe", + country: "Vienna", + }, + { + utc: "GMT+01:00", + value: "Europe/Warsaw", + group: "Europe", + country: "Warsaw", + }, + { + utc: "GMT+01:00", + value: "Africa/Lagos", + group: "Africa", + country: "West Central Africa", + }, + { + utc: "GMT+02:00", + value: "Asia/Amman", + group: "Asia", + country: "Amman", + }, + { + utc: "GMT+02:00", + value: "Europe/Athens", + group: "Europe", + country: "Athens", + }, + { + utc: "GMT+02:00", + value: "Asia/Beirut", + group: "Asia", + country: "Beirut", + }, + { + utc: "GMT+02:00", + value: "Europe/Bucharest", + group: "Europe", + country: "Bucharest", + }, + { + utc: "GMT+02:00", + value: "Africa/Cairo", + group: "Africa", + country: "Cairo", + }, + { + utc: "GMT+02:00", + value: "Africa/Harare", + group: "Africa", + country: "Harare", + }, + { + utc: "GMT+02:00", + value: "Europe/Helsinki", + group: "Europe", + country: "Helsinki", + }, + { + utc: "GMT+02:00", + value: "Europe/Istanbul", + group: "Europe", + country: "Istanbul", + }, + { + utc: "GMT+02:00", + value: "Asia/Jerusalem", + group: "Asia", + country: "Jerusalem", + }, + { + utc: "GMT+02:00", + value: "Europe/Kiev", + group: "Europe", + country: "Kyiv", + }, + { + utc: "GMT+02:00", + value: "Europe/Minsk", + group: "Europe", + country: "Minsk", + }, + { + utc: "GMT+02:00", + value: "Europe/Riga", + group: "Europe", + country: "Riga", + }, + { + utc: "GMT+02:00", + value: "Europe/Sofia", + group: "Europe", + country: "Sofia", + }, + { + utc: "GMT+02:00", + value: "Europe/Tallinn", + group: "Europe", + country: "Tallinn", + }, + { + utc: "GMT+02:00", + value: "Europe/Vilnius", + group: "Europe", + country: "Vilnius", + }, + { + utc: "GMT+03:00", + value: "Asia/Baghdad", + group: "Asia", + country: "Baghdad", + }, + { + utc: "GMT+03:00", + value: "Asia/Kuwait", + group: "Asia", + country: "Kuwait", + }, + { + utc: "GMT+03:00", + value: "Europe/Moscow", + group: "Europe", + country: "Moscow", + }, + { + utc: "GMT+03:00", + value: "Africa/Nairobi", + group: "Africa", + country: "Nairobi", + }, + { + utc: "GMT+03:00", + value: "Asia/Riyadh", + group: "Asia", + country: "Riyadh", + }, + { + utc: "GMT+03:30", + value: "Asia/Tehran", + group: "Asia", + country: "Tehran", + }, + { + utc: "GMT+04:00", + value: "Asia/Dubai", + group: "Asia", + country: "Abu Dhabi", + }, + { + utc: "GMT+04:00", + value: "Asia/Baku", + group: "Asia", + country: "Baku", + }, + { + utc: "GMT+04:00", + value: "Asia/Muscat", + group: "Asia", + country: "Muscat", + }, + { + utc: "GMT+04:00", + value: "Asia/Tbilisi", + group: "Asia", + country: "Tbilisi", + }, + { + utc: "GMT+04:00", + value: "Asia/Yerevan", + group: "Asia", + country: "Yerevan", + }, + { + utc: "GMT+04:30", + value: "Asia/Kabul", + group: "Asia", + country: "Kabul", + }, + { + utc: "GMT+05:00", + value: "Asia/Karachi", + group: "Asia", + country: "Islamabad", + }, + { + utc: "GMT+05:00", + value: "Asia/Tashkent", + group: "Asia", + country: "Tashkent", + }, + { + utc: "GMT+05:30", + value: "Asia/Kolkata", + group: "Asia", + country: "Chennai", + }, + { + utc: "GMT+05:30", + value: "Asia/Kolkata", + group: "Asia", + country: "Kolkata", + }, + { + utc: "GMT+05:30", + value: "Asia/Kolkata", + group: "Asia", + country: "Mumbai", + }, + { + utc: "GMT+05:30", + value: "Asia/Kolkata", + group: "Asia", + country: "New Delhi", + }, + { + utc: "GMT+05:45", + value: "Asia/Kathmandu", + group: "Asia", + country: "Kathmandu", + }, + { + utc: "GMT+06:00", + value: "Asia/Almaty", + group: "Asia", + country: "Almaty", + }, + { + utc: "GMT+06:00", + value: "Asia/Dhaka", + group: "Asia", + country: "Dhaka", + }, + { + utc: "GMT+06:00", + value: "Asia/Yekaterinburg", + group: "Asia", + country: "Yekaterinburg", + }, + { + utc: "GMT+06:30", + value: "Asia/Yangon", + group: "Asia", + country: "Yangon (Rangoon)", + }, + { + utc: "GMT+07:00", + value: "Asia/Bangkok", + group: "Asia", + country: "Bangkok", + }, + { + utc: "GMT+07:00", + value: "Asia/Hanoi", + group: "Asia", + country: "Hanoi", + }, + { + utc: "GMT+07:00", + value: "Asia/Jakarta", + group: "Asia", + country: "Jakarta", + }, + { + utc: "GMT+07:00", + value: "Asia/Novosibirsk", + group: "Asia", + country: "Novosibirsk", + }, + { + utc: "GMT+08:00", + value: "Asia/Shanghai", + group: "Asia", + country: "Beijing", + }, + { + utc: "GMT+08:00", + value: "Asia/Chongqing", + group: "Asia", + country: "Chongqing", + }, + { + utc: "GMT+08:00", + value: "Asia/Hong_Kong", + group: "Asia", + country: "Hong Kong", + }, + { + utc: "GMT+08:00", + value: "Asia/Krasnoyarsk", + group: "Asia", + country: "Krasnoyarsk", + }, + { + utc: "GMT+08:00", + value: "Asia/Kuala_Lumpur", + group: "Asia", + country: "Kuala Lumpur", + }, + { + utc: "GMT+08:00", + value: "Australia/Perth", + group: "Australia", + country: "Perth", + }, + { + utc: "GMT+08:00", + value: "Asia/Singapore", + group: "Asia", + country: "Singapore", + }, + { + utc: "GMT+08:00", + value: "Asia/Taipei", + group: "Asia", + country: "Taipei", + }, + { + utc: "GMT+08:00", + value: "Asia/Ulaanbaatar", + group: "Asia", + country: "Ulaan Bataar", + }, + { + utc: "GMT+08:00", + value: "Asia/Urumqi", + group: "Asia", + country: "Urumqi", + }, + { + utc: "GMT+09:00", + value: "Asia/Irkutsk", + group: "Asia", + country: "Irkutsk", + }, + { + utc: "GMT+09:00", + value: "Asia/Tokyo", + group: "Asia", + country: "Osaka", + }, + { + utc: "GMT+09:00", + value: "Asia/Tokyo", + group: "Asia", + country: "Sapporo", + }, + { + utc: "GMT+09:00", + value: "Asia/Seoul", + group: "Asia", + country: "Seoul", + }, + { + utc: "GMT+09:00", + value: "Asia/Tokyo", + group: "Asia", + country: "Tokyo", + }, + { + utc: "GMT+09:30", + value: "Australia/Adelaide", + group: "Australia", + country: "Adelaide", + }, + { + utc: "GMT+09:30", + value: "Australia/Darwin", + group: "Australia", + country: "Darwin", + }, + { + utc: "GMT+10:00", + value: "Australia/Brisbane", + group: "Australia", + country: "Brisbane", + }, + { + utc: "GMT+10:00", + value: "Australia/Sydney", + group: "Australia", + country: "Canberra", + }, + { + utc: "GMT+10:00", + value: "Pacific/Guam", + group: "Pacific", + country: "Guam", + }, + { + utc: "GMT+10:00", + value: "Australia/Hobart", + group: "Australia", + country: "Hobart", + }, + { + utc: "GMT+10:00", + value: "Australia/Melbourne", + group: "Australia", + country: "Melbourne", + }, + { + utc: "GMT+10:00", + value: "Pacific/Port_Moresby", + group: "Pacific", + country: "Port Moresby", + }, + { + utc: "GMT+10:00", + value: "Australia/Sydney", + group: "Australia", + country: "Sydney", + }, + { + utc: "GMT+11:00", + value: "Asia/Magadan", + group: "Asia", + country: "Magadan", + }, + { + utc: "GMT+11:00", + value: "Pacific/Noumea", + group: "Pacific", + country: "New Caledonia", + }, + { + utc: "GMT+11:00", + value: "Pacific/Guadalcanal", + group: "Pacific", + country: "Solomon Is.", + }, + { + utc: "GMT+12:00", + value: "Pacific/Auckland", + group: "Pacific", + country: "Auckland", + }, + { + utc: "GMT+12:00", + value: "Pacific/Fiji", + group: "Pacific", + country: "Fiji", + }, + { + utc: "GMT+12:00", + value: "Asia/Kamchatka", + group: "Asia", + country: "Kamchatka", + }, + { + utc: "GMT+12:00", + value: "Pacific/Majuro", + group: "Pacific", + country: "Marshall Is.", + }, + { + utc: "GMT+12:00", + value: "Pacific/Auckland", + group: "Pacific", + country: "Wellington", + }, + { + utc: "GMT+13:00", + value: "Pacific/Tongatapu", + group: "Pacific", + country: "Nuku'alofa", + }, + { + utc: "GMT+13:00", + value: "Pacific/Apia", + group: "Pacific", + country: "Samoa", + }, + { + utc: "GMT+13:00", + value: "Pacific/Fakaofo", + group: "Pacific", + country: "Tokelau Is.", + }, +]; diff --git a/python/ray/dashboard/client/src/components/SearchComponent.tsx b/python/ray/dashboard/client/src/components/SearchComponent.tsx index 8cfc426c6818..495c3d9fc0a2 100644 --- a/python/ray/dashboard/client/src/components/SearchComponent.tsx +++ b/python/ray/dashboard/client/src/components/SearchComponent.tsx @@ -1,6 +1,18 @@ import { SearchOutlined } from "@mui/icons-material"; -import { InputAdornment, MenuItem, TextField } from "@mui/material"; -import React from "react"; +import { + Autocomplete, + Box, + Divider, + InputAdornment, + MenuItem, + TextField, + Typography, +} from "@mui/material"; + +import React, { useEffect, useState } from "react"; +import { formatTimeZone } from "../common/formatUtils"; +import { timezones } from "../common/timezone"; +import { TimezoneInfo } from "../pages/metrics/utils"; export const SearchInput = ({ label, @@ -77,3 +89,161 @@ export const SearchSelect = ({ ); }; + +export const SearchTimezone = ({ + serverTimeZone, + currentTimeZone, +}: { + serverTimeZone?: TimezoneInfo | null; + currentTimeZone?: string; +}) => { + const [timezone, setTimezone] = useState(""); + + useEffect(() => { + if (currentTimeZone !== undefined) { + formatTimeZone(currentTimeZone); + setTimezone(currentTimeZone); + } + }, [currentTimeZone]); + + const handleTimezoneChange = (value: string) => { + localStorage.setItem("timezone", value); + window.location.reload(); + }; + + const options = timezones + .map((x) => x) // Create a copy + .sort((a, b) => a.group.localeCompare(b.group)); + options.unshift({ + value: "Etc/UTC", + utc: "GMT+00:00", + group: "System", + country: "Coordinated Universal Time", + }); + + const browserTimezone = Intl.DateTimeFormat().resolvedOptions().timeZone; + + const browserOffset = (() => { + const offset = new Date().getTimezoneOffset(); + const sign = offset < 0 ? "+" : "-"; + const hours = Math.abs(Math.floor(offset / 60)) + .toString() + .padStart(2, "0"); + const minutes = Math.abs(offset % 60) + .toString() + .padStart(2, "0"); + return `GMT${sign}${hours}:${minutes}`; + })(); + + if (browserOffset) { + options.unshift({ + value: browserTimezone, + utc: browserOffset, + group: "System", + country: "Browser Time", + }); + } + + const serverUtc = + serverTimeZone?.value && + timezones.find((t) => t.value === serverTimeZone.value)?.utc; + if (serverUtc) { + options.unshift({ + value: serverTimeZone.value, + utc: serverUtc, + group: "System", + country: "Dashboard Server Timezone", + }); + } + + const curUtc = timezones.find((t) => t.value === timezone)?.utc; + return ( + { + if (newValue) { + handleTimezoneChange(newValue.value); + } + }} + options={options} + getOptionLabel={(option) => option.value} + groupBy={(option) => option.group} + filterOptions={(options, { inputValue }) => + options.filter( + (item) => + item.value.includes(inputValue) || + item.utc.includes(inputValue) || + item.country.toLowerCase().includes(inputValue.toLowerCase()) || + item.group.toLowerCase().includes(inputValue.toLowerCase()), + ) + } + renderOption={(props, option) => ( + + + {option.country} + + + {option.value} + + + + {option.utc} + + + )} + renderInput={(params) => ( + + )} + renderGroup={(params) => ( +
  • + + {params.group} + + + {params.children} + + +
  • + )} + slotProps={{ + paper: { + style: { + width: "400px", + }, + }, + popper: { + placement: "bottom-end", + style: { + width: "fit-content", + }, + }, + }} + /> + ); +}; diff --git a/python/ray/dashboard/client/src/pages/layout/MainNavLayout.tsx b/python/ray/dashboard/client/src/pages/layout/MainNavLayout.tsx index 256c830cd29f..ea261a454b75 100644 --- a/python/ray/dashboard/client/src/pages/layout/MainNavLayout.tsx +++ b/python/ray/dashboard/client/src/pages/layout/MainNavLayout.tsx @@ -3,6 +3,7 @@ import React, { useContext } from "react"; import { RiBookMarkLine, RiFeedbackLine } from "react-icons/ri/"; import { Outlet, Link as RouterLink } from "react-router-dom"; import { GlobalContext } from "../../App"; +import { SearchTimezone } from "../../components/SearchComponent"; import Logo from "../../logo.svg"; import { MainNavContext, useMainNavState } from "./mainNavContext"; @@ -106,7 +107,8 @@ const NAV_ITEMS = [ const MainNavBar = () => { const { mainNavPageHierarchy } = useContext(MainNavContext); const rootRouteId = mainNavPageHierarchy[0]?.id; - const { metricsContextLoaded, grafanaHost } = useContext(GlobalContext); + const { metricsContextLoaded, grafanaHost, serverTimeZone, currentTimeZone } = + useContext(GlobalContext); let navItems = NAV_ITEMS; if (!metricsContextLoaded || grafanaHost === "DISABLED") { @@ -179,6 +181,17 @@ const MainNavBar = () => {
    + + + + + ); }; diff --git a/python/ray/dashboard/client/src/pages/metrics/Metrics.component.test.tsx b/python/ray/dashboard/client/src/pages/metrics/Metrics.component.test.tsx index c2cf646b743d..a7e3e06f0cea 100644 --- a/python/ray/dashboard/client/src/pages/metrics/Metrics.component.test.tsx +++ b/python/ray/dashboard/client/src/pages/metrics/Metrics.component.test.tsx @@ -22,6 +22,8 @@ const Wrapper = ({ children }: PropsWithChildren<{}>) => { nodeMapByIp: {}, namespaceMap: {}, dashboardDatasource: "Prometheus", + serverTimeZone: undefined, + currentTimeZone: undefined, }} > {children} @@ -47,6 +49,8 @@ const MetricsDisabledWrapper = ({ children }: PropsWithChildren<{}>) => { nodeMapByIp: {}, namespaceMap: {}, dashboardDatasource: "Prometheus", + serverTimeZone: undefined, + currentTimeZone: undefined, }} > {children} diff --git a/python/ray/dashboard/client/src/pages/metrics/Metrics.tsx b/python/ray/dashboard/client/src/pages/metrics/Metrics.tsx index fd542e534cd1..99d459a4fe54 100644 --- a/python/ray/dashboard/client/src/pages/metrics/Metrics.tsx +++ b/python/ray/dashboard/client/src/pages/metrics/Metrics.tsx @@ -571,8 +571,8 @@ const MetricsSection = ({ dashboardUid, dashboardDatasource, }: MetricsSectionProps) => { - const { grafanaHost, sessionName } = useContext(GlobalContext); - + const { grafanaHost, sessionName, currentTimeZone } = + useContext(GlobalContext); return ( { const path = `/d-solo/${dashboardUid}?${pathParams}` + - `&${refreshParams}${timeRangeParams}&var-SessionName=${sessionName}&var-datasource=${dashboardDatasource}`; + `&${refreshParams}&timezone=${currentTimeZone}${timeRangeParams}&var-SessionName=${sessionName}&var-datasource=${dashboardDatasource}`; return ( { return await get(GRAFANA_HEALTHCHECK_URL); }; @@ -68,3 +74,21 @@ export const getMetricsInfo = async () => { return info; }; + +export type TimezoneInfo = { + offset: string; + value: string; +}; + +export const getTimeZoneInfo = async () => { + try { + const resp = await get(TIMEZONE_URL); + if (resp.data) { + return { + offset: resp.data.offset, + value: resp.data.value, + }; + } + } catch (e) {} + return null; +}; diff --git a/python/ray/dashboard/client/src/pages/overview/OverviewPage.component.test.tsx b/python/ray/dashboard/client/src/pages/overview/OverviewPage.component.test.tsx index a92ebb6f4701..9b0d432b9f92 100644 --- a/python/ray/dashboard/client/src/pages/overview/OverviewPage.component.test.tsx +++ b/python/ray/dashboard/client/src/pages/overview/OverviewPage.component.test.tsx @@ -84,6 +84,8 @@ const Wrapper = nodeMapByIp: {}, namespaceMap: {}, dashboardDatasource: "Prometheus", + serverTimeZone: undefined, + currentTimeZone: undefined, }} > {children} diff --git a/python/ray/dashboard/client/src/pages/overview/cards/ClusterUtilizationCard.tsx b/python/ray/dashboard/client/src/pages/overview/cards/ClusterUtilizationCard.tsx index ab76f014f22f..a1c04c8047ab 100644 --- a/python/ray/dashboard/client/src/pages/overview/cards/ClusterUtilizationCard.tsx +++ b/python/ray/dashboard/client/src/pages/overview/cards/ClusterUtilizationCard.tsx @@ -20,11 +20,12 @@ export const ClusterUtilizationCard = ({ sessionName, dashboardUids, dashboardDatasource, + currentTimeZone, } = useContext(GlobalContext); const grafanaDefaultDashboardUid = dashboardUids?.default ?? "rayDefaultDashboard"; const path = `/d-solo/${grafanaDefaultDashboardUid}/default-dashboard?orgId=1&theme=light&panelId=41&var-datasource=${dashboardDatasource}`; - const timeRangeParams = "&from=now-30m&to=now"; + const timeRangeParams = "&from=now-1h&to=now"; if (!metricsContextLoaded || grafanaHost === "DISABLED") { return null; @@ -51,7 +52,7 @@ export const ClusterUtilizationCard = ({ component="iframe" title="Cluster Utilization" sx={{ flex: 1 }} - src={`${grafanaHost}${path}&refresh${timeRangeParams}&var-SessionName=${sessionName}`} + src={`${grafanaHost}${path}&refresh&timezone=${currentTimeZone}${timeRangeParams}&var-SessionName=${sessionName}`} frameBorder="0" /> { sessionName, dashboardUids, dashboardDatasource, + currentTimeZone, } = useContext(GlobalContext); const grafanaDefaultDashboardUid = dashboardUids?.default ?? "rayDefaultDashboard"; const path = `/d-solo/${grafanaDefaultDashboardUid}/default-dashboard?orgId=1&theme=light&panelId=24&var-datasource=${dashboardDatasource}`; - const timeRangeParams = "&from=now-30m&to=now"; + const timeRangeParams = "&from=now-1h&to=now"; if (!metricsContextLoaded || grafanaHost === "DISABLED") { return null; @@ -45,7 +46,7 @@ export const NodeCountCard = ({ className, sx }: NodeCountCardProps) => { component="iframe" title="Node Count" sx={{ flex: 1 }} - src={`${grafanaHost}${path}&refresh${timeRangeParams}&var-SessionName=${sessionName}`} + src={`${grafanaHost}${path}&refresh&timezone=${currentTimeZone}${timeRangeParams}&var-SessionName=${sessionName}`} frameBorder="0" /> )} diff --git a/python/ray/dashboard/client/src/pages/serve/ServeDeploymentMetricsSection.component.test.tsx b/python/ray/dashboard/client/src/pages/serve/ServeDeploymentMetricsSection.component.test.tsx index d0629ed7832c..a1f552514043 100644 --- a/python/ray/dashboard/client/src/pages/serve/ServeDeploymentMetricsSection.component.test.tsx +++ b/python/ray/dashboard/client/src/pages/serve/ServeDeploymentMetricsSection.component.test.tsx @@ -22,6 +22,8 @@ const Wrapper = ({ children }: PropsWithChildren<{}>) => { nodeMapByIp: {}, namespaceMap: {}, dashboardDatasource: "Prometheus", + serverTimeZone: undefined, + currentTimeZone: undefined, }} > {children} @@ -47,6 +49,8 @@ const MetricsDisabledWrapper = ({ children }: PropsWithChildren<{}>) => { nodeMapByIp: {}, namespaceMap: {}, dashboardDatasource: "Prometheus", + serverTimeZone: undefined, + currentTimeZone: undefined, }} > {children} diff --git a/python/ray/dashboard/client/src/pages/serve/ServeDeploymentMetricsSection.tsx b/python/ray/dashboard/client/src/pages/serve/ServeDeploymentMetricsSection.tsx index 33f62879a1d4..921f804e7fb6 100644 --- a/python/ray/dashboard/client/src/pages/serve/ServeDeploymentMetricsSection.tsx +++ b/python/ray/dashboard/client/src/pages/serve/ServeDeploymentMetricsSection.tsx @@ -51,8 +51,13 @@ export const ServeReplicaMetricsSection = ({ className, sx, }: ServeDeploymentMetricsSectionProps) => { - const { grafanaHost, prometheusHealth, dashboardUids, dashboardDatasource } = - useContext(GlobalContext); + const { + grafanaHost, + prometheusHealth, + dashboardUids, + dashboardDatasource, + currentTimeZone, + } = useContext(GlobalContext); const grafanaServeDashboardUid = dashboardUids?.serveDeployment ?? "rayServeDashboard"; @@ -179,7 +184,7 @@ export const ServeReplicaMetricsSection = ({ {METRICS_CONFIG.map(({ title, pathParams }) => { const path = `/d-solo/${grafanaServeDashboardUid}?${pathParams}` + - `${refreshParams}${timeRangeParams}&var-Deployment=${encodeURIComponent( + `${refreshParams}&timezone=${currentTimeZone}${timeRangeParams}&var-Deployment=${encodeURIComponent( deploymentName, )}&var-Replica=${encodeURIComponent( replicaId, diff --git a/python/ray/dashboard/client/src/pages/serve/ServeMetricsSection.component.test.tsx b/python/ray/dashboard/client/src/pages/serve/ServeMetricsSection.component.test.tsx index 6f5826087a63..c276d0fde417 100644 --- a/python/ray/dashboard/client/src/pages/serve/ServeMetricsSection.component.test.tsx +++ b/python/ray/dashboard/client/src/pages/serve/ServeMetricsSection.component.test.tsx @@ -26,6 +26,8 @@ const Wrapper = ({ children }: PropsWithChildren<{}>) => { nodeMapByIp: {}, namespaceMap: {}, dashboardDatasource: "Prometheus", + serverTimeZone: undefined, + currentTimeZone: undefined, }} > {children} @@ -51,6 +53,8 @@ const MetricsDisabledWrapper = ({ children }: PropsWithChildren<{}>) => { nodeMapByIp: {}, namespaceMap: {}, dashboardDatasource: "Prometheus", + serverTimeZone: undefined, + currentTimeZone: undefined, }} > {children} diff --git a/python/ray/dashboard/client/src/pages/serve/ServeMetricsSection.tsx b/python/ray/dashboard/client/src/pages/serve/ServeMetricsSection.tsx index efb3be529314..b03cfbcc56f8 100644 --- a/python/ray/dashboard/client/src/pages/serve/ServeMetricsSection.tsx +++ b/python/ray/dashboard/client/src/pages/serve/ServeMetricsSection.tsx @@ -77,8 +77,13 @@ export const ServeMetricsSection = ({ metricsConfig, sx, }: ServeMetricsSectionProps) => { - const { grafanaHost, prometheusHealth, dashboardUids, dashboardDatasource } = - useContext(GlobalContext); + const { + grafanaHost, + prometheusHealth, + dashboardUids, + dashboardDatasource, + currentTimeZone, + } = useContext(GlobalContext); const grafanaServeDashboardUid = dashboardUids?.serve ?? "rayServeDashboard"; const [refreshOption, setRefreshOption] = useState( RefreshOptions.FIVE_SECONDS, @@ -196,7 +201,7 @@ export const ServeMetricsSection = ({ {metricsConfig.map(({ title, pathParams }) => { const path = `/d-solo/${grafanaServeDashboardUid}?${pathParams}` + - `${refreshParams}${timeRangeParams}&var-datasource=${dashboardDatasource}`; + `${refreshParams}&timezone=${currentTimeZone}${timeRangeParams}&var-datasource=${dashboardDatasource}`; return ( ) => { prometheusHealth: true, sessionName: "session-name", dashboardDatasource: "Prometheus", + serverTimeZone: undefined, + currentTimeZone: undefined, }; return ( diff --git a/python/ray/dashboard/http_server_head.py b/python/ray/dashboard/http_server_head.py index e1c427b1b288..078d4a97dd38 100644 --- a/python/ray/dashboard/http_server_head.py +++ b/python/ray/dashboard/http_server_head.py @@ -12,6 +12,7 @@ import ray import ray.dashboard.optional_utils as dashboard_optional_utils +import ray.dashboard.timezone_utils as timezone_utils import ray.dashboard.utils as dashboard_utils from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag from ray._private.utils import get_or_create_event_loop @@ -139,6 +140,18 @@ async def get_favicon(self, req) -> aiohttp.web.FileResponse: ) ) + @routes.get("/timezone") + async def get_timezone(self, req) -> aiohttp.web.Response: + try: + current_timezone = timezone_utils.get_current_timezone_info() + return aiohttp.web.json_response(current_timezone) + + except Exception as e: + logger.error(f"Error getting timezone: {e}") + return aiohttp.web.Response( + status=500, text="Internal Server Error:" + str(e) + ) + def get_address(self): assert self.http_host and self.http_port return self.http_host, self.http_port diff --git a/python/ray/dashboard/timezone_utils.py b/python/ray/dashboard/timezone_utils.py new file mode 100644 index 000000000000..6a0d68b9c1a9 --- /dev/null +++ b/python/ray/dashboard/timezone_utils.py @@ -0,0 +1,56 @@ +import logging +from datetime import datetime + +logger = logging.getLogger(__name__) + +timezones = [ + {"offset": "-12:00", "value": "Etc/+12"}, + {"offset": "-11:00", "value": "Pacific/Pago_Pago"}, + {"offset": "-10:00", "value": "Pacific/Honolulu"}, + {"offset": "-09:00", "value": "America/Anchorage"}, + {"offset": "-08:00", "value": "America/Los_Angeles"}, + {"offset": "-07:00", "value": "America/Phoenix"}, + {"offset": "-06:00", "value": "America/Guatemala"}, + {"offset": "-05:00", "value": "America/Bogota"}, + {"offset": "-04:00", "value": "America/Halifax"}, + {"offset": "-03:30", "value": "America/St_Johns"}, + {"offset": "-03:00", "value": "America/Sao_Paulo"}, + {"offset": "-02:00", "value": "America/Godthab"}, + {"offset": "-01:00", "value": "Atlantic/Azores"}, + {"offset": "+00:00", "value": "Europe/London"}, + {"offset": "+01:00", "value": "Europe/Amsterdam"}, + {"offset": "+02:00", "value": "Asia/Amman"}, + {"offset": "+03:00", "value": "Asia/Baghdad"}, + {"offset": "+03:30", "value": "Asia/Tehran"}, + {"offset": "+04:00", "value": "Asia/Dubai"}, + {"offset": "+04:30", "value": "Asia/Kabul"}, + {"offset": "+05:00", "value": "Asia/Karachi"}, + {"offset": "+05:30", "value": "Asia/Kolkata"}, + {"offset": "+05:45", "value": "Asia/Kathmandu"}, + {"offset": "+06:00", "value": "Asia/Almaty"}, + {"offset": "+06:30", "value": "Asia/Yangon"}, + {"offset": "+07:00", "value": "Asia/Bangkok"}, + {"offset": "+08:00", "value": "Asia/Shanghai"}, + {"offset": "+09:00", "value": "Asia/Irkutsk"}, + {"offset": "+09:30", "value": "Australia/Adelaide"}, + {"offset": "+10:00", "value": "Australia/Brisbane"}, + {"offset": "+11:00", "value": "Asia/Magadan"}, + {"offset": "+12:00", "value": "Pacific/Auckland"}, + {"offset": "+13:00", "value": "Pacific/Tongatapu"}, +] + + +def get_current_timezone_info(): + current_tz = datetime.now().astimezone().tzinfo + offset = current_tz.utcoffset(None) + hours, remainder = divmod(offset.total_seconds(), 3600) + minutes = remainder // 60 + sign = "+" if hours >= 0 else "-" + current_offset = f"{sign}{abs(int(hours)):02d}:{abs(int(minutes)):02d}" + + current_timezone = next( + (tz for tz in timezones if tz["offset"] == current_offset), + {"offset": None, "value": None}, + ) + + return current_timezone From 61dc075770b4a183e0cd9436f24ac1a1ce3fab9c Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 22 Nov 2024 11:51:17 -0600 Subject: [PATCH 060/107] [Data] Add new Parquet counting release test (#48853) Issues with this current counting Parquet release test: * It doesn't test autoscaling * It doesn't test multi-node * It uses a list of paths as input, which is extremely less common than a directory as input --------- Signed-off-by: Balaji Veeramani --- .../multi_node_autoscaling_compute.yaml | 18 +++++ .../dataset/parquet_metadata_resolution.py | 36 ---------- .../dataset/read_and_consume_benchmark.py | 69 +++++++++++++++++++ release/release_data_tests.yaml | 19 ++--- 4 files changed, 91 insertions(+), 51 deletions(-) create mode 100644 release/nightly_tests/dataset/multi_node_autoscaling_compute.yaml delete mode 100644 release/nightly_tests/dataset/parquet_metadata_resolution.py create mode 100644 release/nightly_tests/dataset/read_and_consume_benchmark.py diff --git a/release/nightly_tests/dataset/multi_node_autoscaling_compute.yaml b/release/nightly_tests/dataset/multi_node_autoscaling_compute.yaml new file mode 100644 index 000000000000..7b3612d3b4b8 --- /dev/null +++ b/release/nightly_tests/dataset/multi_node_autoscaling_compute.yaml @@ -0,0 +1,18 @@ +# This config matches the default config for Anyscale workspaces with autoscaling. +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west-2 + +max_workers: 0 + +head_node_type: + name: head_node + instance_type: m5.2xlarge + resources: + cpu: 0 + +worker_node_types: + - name: worker_node + instance_type: m5.2xlarge + min_workers: 0 + max_workers: 10 + use_spot: false diff --git a/release/nightly_tests/dataset/parquet_metadata_resolution.py b/release/nightly_tests/dataset/parquet_metadata_resolution.py deleted file mode 100644 index b9f634f3f17f..000000000000 --- a/release/nightly_tests/dataset/parquet_metadata_resolution.py +++ /dev/null @@ -1,36 +0,0 @@ -import argparse -import os - -from benchmark import Benchmark - -parser = argparse.ArgumentParser(description="Parquet Metadata Read") -parser.add_argument("--num-files", type=int, default=30) -parser.add_argument("--cloud", type=str, choices=["aws", "gcp"]) - - -if __name__ == "__main__": - args = parser.parse_args() - import ray - - print("Connecting to Ray cluster...") - ray.init(address="auto") - - num = args.num_files - - assert args.cloud in {"aws", "gcp"}, args.cloud - if args.cloud == "aws": - prefix = "s3://shuffling-data-loader-benchmarks/data/r10_000_000_000-f1000" - if args.cloud == "gcp": - # NOTE(@bveeramani): I made a mistake while transferring the files from S3 to - # GCS, so there's an extra "r10_000_000_000-f1000" in the URI. Don't worry about - # it. The files are the same. - prefix = "gs://shuffling-data-loader-benchmarks/data/r10_000_000_000-f1000/r10_000_000_000-f1000" # noqa: E501 - files = [f"{prefix}/input_data_{i}.parquet.snappy" for i in range(args.num_files)] - - def _trigger_parquet_metadata_load(): - # This should only read Parquet metadata. - ray.data.read_parquet(files).count() - - benchmark = Benchmark("parquet_metadata_resolution") - benchmark.run_fn("read_metadata", _trigger_parquet_metadata_load) - benchmark.write_result(os.environ["TEST_OUTPUT_JSON"]) diff --git a/release/nightly_tests/dataset/read_and_consume_benchmark.py b/release/nightly_tests/dataset/read_and_consume_benchmark.py new file mode 100644 index 000000000000..f833d27035ce --- /dev/null +++ b/release/nightly_tests/dataset/read_and_consume_benchmark.py @@ -0,0 +1,69 @@ +import ray + +from benchmark import Benchmark + +import argparse +from typing import Callable + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser() + parser.add_argument("path", type=str) + parser.add_argument( + "--format", + choices=["image", "parquet"], + required=True, + ) + + consume_group = parser.add_mutually_exclusive_group() + consume_group.add_argument("--count", action="store_true") + consume_group.add_argument("--iterate", action="store_true") + + return parser.parse_args() + + +def main(args): + benchmark = Benchmark("read-and-consume") + read_fn = get_read_fn(args) + consume_fn = get_consume_fn(args) + + def benchmark_fn(): + ds = read_fn(args.path) + consume_fn(ds) + + benchmark.run_fn(str(vars(args)), benchmark_fn) + benchmark.write_result() + + +def get_read_fn(args: argparse.Namespace) -> Callable[[str], ray.data.Dataset]: + if args.format == "image": + read_fn = ray.data.read_images + elif args.format == "parquet": + read_fn = ray.data.read_parquet + else: + assert False, f"Invalid data format argument: {args}" + + return read_fn + + +def get_consume_fn(args: argparse.Namespace) -> Callable[[ray.data.Dataset], None]: + if args.count: + + def consume_fn(ds): + ds.count() + + elif args.iterate: + + def consume_fn(ds): + for _ in ds.iter_internal_ref_bundles(): + pass + + else: + assert False, f"Invalid consume arguments: {args}" + + return consume_fn + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/release/release_data_tests.yaml b/release/release_data_tests.yaml index 9b3c9d3bd7ac..49fa85ccc38d 100644 --- a/release/release_data_tests.yaml +++ b/release/release_data_tests.yaml @@ -2,7 +2,7 @@ # Dataset tests ############### -- name: parquet_metadata_resolution +- name: count_parquet group: data-tests working_dir: nightly_tests/dataset @@ -12,22 +12,11 @@ cluster: byod: type: gpu - cluster_compute: single_node_benchmark_compute.yaml + cluster_compute: multi_node_autoscaling_compute.yaml run: - # Expect the test to finish around 40 seconds. - timeout: 100 - script: python parquet_metadata_resolution.py --num-files 915 --cloud aws - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - run: - script: python parquet_metadata_resolution.py --num-files 915 --cloud gcp + timeout: 600 + script: python read_and_consume_benchmark.py s3://ray-benchmark-data/parquet/10TiB --format parquet --count - name: stable_diffusion_benchmark group: data-tests From 8508cda556b5fc02ca73af83181a26c82c7b830a Mon Sep 17 00:00:00 2001 From: Kai-Hsun Chen Date: Fri, 22 Nov 2024 10:25:51 -0800 Subject: [PATCH 061/107] [metrics][prometheus] Support `ray metrics shutdown-prometheus` (#48799) Signed-off-by: kaihsun --- doc/source/cluster/metrics.md | 12 +++++++++++- .../modules/metrics/install_and_start_prometheus.py | 2 ++ .../modules/tests/test_metrics_integration.py | 13 +++++++++++++ python/ray/scripts/scripts.py | 10 ++++++++++ 4 files changed, 36 insertions(+), 1 deletion(-) diff --git a/doc/source/cluster/metrics.md b/doc/source/cluster/metrics.md index ad9e42cd77dd..361a5d1e3c74 100644 --- a/doc/source/cluster/metrics.md +++ b/doc/source/cluster/metrics.md @@ -67,7 +67,17 @@ ray_dashboard_api_requests_count_requests_total You can then see the number of requests to the Ray Dashboard API over time. -To stop Prometheus, run `kill ` where `` is the PID of the Prometheus process that was printed out when you ran the command. To find the PID, you can also run `ps aux | grep prometheus`. +To stop Prometheus, run the following commands: + +```sh +# case 1: Ray > 2.40 +ray metrics shutdown-prometheus + +# case 2: Otherwise +# Run `ps aux | grep prometheus` to find the PID of the Prometheus process. Then, kill the process. +kill +``` + ### [Optional] Manual: Running Prometheus locally diff --git a/python/ray/dashboard/modules/metrics/install_and_start_prometheus.py b/python/ray/dashboard/modules/metrics/install_and_start_prometheus.py index cf7cb31c3607..ea0ff2459f65 100644 --- a/python/ray/dashboard/modules/metrics/install_and_start_prometheus.py +++ b/python/ray/dashboard/modules/metrics/install_and_start_prometheus.py @@ -93,6 +93,7 @@ def start_prometheus(prometheus_dir): f"{prometheus_dir}/prometheus", "--config.file", str(config_file), + "--web.enable-lifecycle", ] try: process = subprocess.Popen(prometheus_cmd) @@ -107,6 +108,7 @@ def print_shutdown_message(process_id): message = ( f"Prometheus is running with PID {process_id}.\n" "To stop Prometheus, use the command: " + "`ray metrics shutdown-prometheus`, " f"'kill {process_id}', or if you need to force stop, " f"use 'kill -9 {process_id}'." ) diff --git a/python/ray/dashboard/modules/tests/test_metrics_integration.py b/python/ray/dashboard/modules/tests/test_metrics_integration.py index 7974b4a43cf9..0b125ca2dd5b 100644 --- a/python/ray/dashboard/modules/tests/test_metrics_integration.py +++ b/python/ray/dashboard/modules/tests/test_metrics_integration.py @@ -1,11 +1,14 @@ import subprocess import sys +import time import pytest +from click.testing import CliRunner from ray.dashboard.consts import PROMETHEUS_CONFIG_INPUT_PATH from ray.dashboard.modules.metrics import install_and_start_prometheus from ray.dashboard.modules.metrics.templates import PROMETHEUS_YML_TEMPLATE +from ray.scripts.scripts import metrics_group @pytest.mark.parametrize( @@ -40,6 +43,16 @@ def test_e2e(capsys): subprocess.run(["kill", str(pid)]) +def test_shutdown_prometheus(): + install_and_start_prometheus.main() + runner = CliRunner() + # Sleep for a few seconds to make sure Prometheus is running + # before we try to shut it down. + time.sleep(5) + result = runner.invoke(metrics_group, ["shutdown-prometheus"]) + assert result.exit_code == 0 + + def test_prometheus_config_content(): # Test to make sure the content in the hardcoded file # (python/ray/dashboard/modules/metrics/export/prometheus/prometheus.yml) will diff --git a/python/ray/scripts/scripts.py b/python/ray/scripts/scripts.py index a69c1369db72..1f26a483a7aa 100644 --- a/python/ray/scripts/scripts.py +++ b/python/ray/scripts/scripts.py @@ -15,6 +15,7 @@ from typing import Optional, Set, List, Tuple from ray.dashboard.modules.metrics import install_and_start_prometheus from ray.util.check_open_ports import check_open_ports +import requests import click import psutil @@ -2583,6 +2584,15 @@ def launch_prometheus(): install_and_start_prometheus.main() +@metrics_group.command(name="shutdown-prometheus") +def shutdown_prometheus(): + try: + requests.post("http://localhost:9090/-/quit") + except requests.exceptions.RequestException as e: + print(f"An error occurred: {e}") + sys.exit(1) + + def add_command_alias(command, name, hidden): new_command = copy.deepcopy(command) new_command.hidden = hidden From 365266d760ed3e292062aaf1d57e05dab4b64180 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Fri, 22 Nov 2024 19:35:16 +0100 Subject: [PATCH 062/107] [RLlib] Fix broken APPO release test (config error). (#48869) Fix broken APPO release test (config error). Signed-off-by: sven1977 --- rllib/tuned_examples/appo/pong_appo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rllib/tuned_examples/appo/pong_appo.py b/rllib/tuned_examples/appo/pong_appo.py index d79dbaa13fc7..ca36ca60fb7c 100644 --- a/rllib/tuned_examples/appo/pong_appo.py +++ b/rllib/tuned_examples/appo/pong_appo.py @@ -65,7 +65,7 @@ def _env_creator(cfg): entropy_coeff=[[0, 0.05], [3000000, 0.0]], # <- crucial parameter to finetune # Only update connector states and model weights every n training_step calls. broadcast_interval=5, - learner_queue_size=1, + circular_buffer_num_batches=1, ) .rl_module( model_config=DefaultModelConfig( From d18c9541746889b4f2854cc6c4e06c33b846abdd Mon Sep 17 00:00:00 2001 From: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Date: Fri, 22 Nov 2024 12:06:28 -0800 Subject: [PATCH 063/107] =?UTF-8?q?[core]=20Use=20m6i.large=20for=20distri?= =?UTF-8?q?buted/many=5Fnodes=5Ftests/compute=5Fconfig.=E2=80=A6=20(#48855?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rui Qiao --- .../benchmarks/distributed/many_nodes_tests/compute_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/benchmarks/distributed/many_nodes_tests/compute_config.yaml b/release/benchmarks/distributed/many_nodes_tests/compute_config.yaml index 7f1e59497146..2a8de6119ea3 100644 --- a/release/benchmarks/distributed/many_nodes_tests/compute_config.yaml +++ b/release/benchmarks/distributed/many_nodes_tests/compute_config.yaml @@ -23,7 +23,7 @@ head_node_type: worker_node_types: - name: worker_node - instance_type: m5.large + instance_type: m6i.large min_workers: 500 max_workers: 2000 use_spot: false From 56b6e1d46b55e687e78e9b526397e3d278bccee7 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Fri, 22 Nov 2024 12:19:57 -0800 Subject: [PATCH 064/107] [data][dashboard] fix confusing dataset operator name (#48805) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In https://github.com/ray-project/ray/issues/48482, a user reported that the name used in ray data dashboard is confusing, in case the name of the operator ends with a number. The name used in ray data dashboard is actually just a key identifier of the dataset and not its name. I add another field to encode its name and use it for data dashboard. Closes https://github.com/ray-project/ray/issues/48482 Test: - CI - More beautiful than ever Screenshot 2024-11-21 at 12 17 21 PM Signed-off-by: can --- .../src/components/DataOverviewTable.tsx | 2 +- .../data/DataOverview.component.test.tsx | 7 +++--- python/ray/dashboard/client/src/type/data.ts | 1 + .../modules/data/tests/test_data_head.py | 24 ++++++++++++++----- .../_internal/execution/streaming_executor.py | 1 + python/ray/data/tests/test_stats.py | 1 + 6 files changed, 26 insertions(+), 10 deletions(-) diff --git a/python/ray/dashboard/client/src/components/DataOverviewTable.tsx b/python/ray/dashboard/client/src/components/DataOverviewTable.tsx index c52bdee0c28b..c3538ba9ccd4 100644 --- a/python/ray/dashboard/client/src/components/DataOverviewTable.tsx +++ b/python/ray/dashboard/client/src/components/DataOverviewTable.tsx @@ -193,7 +193,7 @@ const DataRow = ({ {isDatasetRow && datasetMetrics.dataset} - {isOperatorRow && operatorMetrics.operator} + {isOperatorRow && operatorMetrics.name} { operators: [ { operator: "test_ds1_op1", + name: "test_ds1_op", state: "RUNNING", progress: 99, total: 101, @@ -104,11 +105,11 @@ describe("DataOverview", () => { expect(screen.getByText("70/80")).toBeVisible(); // Operator dropdown - expect(screen.queryByText("test_ds1_op1")).toBeNull(); + expect(screen.queryByText("test_ds1_op")).toBeNull(); await user.click(screen.getByTitle("Expand Dataset test_ds1")); - expect(screen.getByText("test_ds1_op1")).toBeVisible(); + expect(screen.getByText("test_ds1_op")).toBeVisible(); await user.click(screen.getByTitle("Collapse Dataset test_ds1")); - expect(screen.queryByText("test_ds1_op1")).toBeNull(); + expect(screen.queryByText("test_ds1_op")).toBeNull(); // Second Dataset expect(screen.getByText("test_ds2")).toBeVisible(); diff --git a/python/ray/dashboard/client/src/type/data.ts b/python/ray/dashboard/client/src/type/data.ts index dbbfc5889238..64d91a006568 100644 --- a/python/ray/dashboard/client/src/type/data.ts +++ b/python/ray/dashboard/client/src/type/data.ts @@ -12,6 +12,7 @@ export type DatasetMetrics = DataMetrics & { export type OperatorMetrics = DataMetrics & { operator: string; + name: string; }; export type DataMetrics = { diff --git a/python/ray/dashboard/modules/data/tests/test_data_head.py b/python/ray/dashboard/modules/data/tests/test_data_head.py index 650079360a8b..c94a50878c95 100644 --- a/python/ray/dashboard/modules/data/tests/test_data_head.py +++ b/python/ray/dashboard/modules/data/tests/test_data_head.py @@ -32,6 +32,7 @@ ] + DATA_SCHEMA OPERATOR_SCHEMA = [ + "name", "operator", ] + DATA_SCHEMA @@ -64,12 +65,23 @@ def test_get_datasets(): operators = dataset["operators"] assert len(operators) == 2 op0 = operators[0] + op1 = operators[1] assert sorted(op0.keys()) == sorted(OPERATOR_SCHEMA) - assert op0["operator"] == "Input0" - assert op0["progress"] == 20 - assert op0["total"] == 20 - assert op0["state"] == "FINISHED" - assert operators[1]["operator"] == "ReadRange->MapBatches()1" + assert sorted(op1.keys()) == sorted(OPERATOR_SCHEMA) + assert { + "operator": "Input0", + "name": "Input", + "state": "FINISHED", + "progress": 20, + "total": 20, + }.items() <= op0.items() + assert { + "operator": "ReadRange->MapBatches()1", + "name": "ReadRange->MapBatches()", + "state": "FINISHED", + "progress": 20, + "total": 20, + }.items() <= op1.items() ds.map_batches(lambda x: x).materialize() data = requests.get(DATA_HEAD_URLS["GET"].format(job_id=job_id)).json() @@ -83,4 +95,4 @@ def test_get_datasets(): if __name__ == "__main__": - sys.exit(pytest.main(["-v", __file__])) + sys.exit(pytest.main(["-vv", __file__])) diff --git a/python/ray/data/_internal/execution/streaming_executor.py b/python/ray/data/_internal/execution/streaming_executor.py index ca48d7766c35..a4276e2bafe6 100644 --- a/python/ray/data/_internal/execution/streaming_executor.py +++ b/python/ray/data/_internal/execution/streaming_executor.py @@ -399,6 +399,7 @@ def _get_state_dict(self, state): "end_time": time.time() if state != "RUNNING" else None, "operators": { f"{op.name}{i}": { + "name": op.name, "progress": op_state.num_completed_tasks, "total": op.num_outputs_total(), "state": state, diff --git a/python/ray/data/tests/test_stats.py b/python/ray/data/tests/test_stats.py index cb0d31f22774..a41e060fb3f5 100644 --- a/python/ray/data/tests/test_stats.py +++ b/python/ray/data/tests/test_stats.py @@ -1648,6 +1648,7 @@ def test_stats_actor_datasets(ray_start_cluster): assert "Input0" in operators assert "ReadRange->MapBatches()1" in operators for value in operators.values(): + assert value["name"] in ["Input", "ReadRange->MapBatches()"] assert value["progress"] == 20 assert value["total"] == 20 assert value["state"] == "FINISHED" From 62533084e74684649abd43dcfd1ed7e26b985cf2 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 22 Nov 2024 16:18:12 -0600 Subject: [PATCH 065/107] [Data] Remove unhelpful all-to-all release tests (#48872) Push-based shuffle hasn't been maintained for a very long time. The release tests often fail, and we don't do anything about it. Aggregate benchmark release test operators on <1 GiB of data, so it's not very useful. --------- Signed-off-by: Balaji Veeramani --- .../dataset/aggregate_benchmark.py | 141 ------------------ release/release_data_tests.yaml | 117 --------------- 2 files changed, 258 deletions(-) delete mode 100644 release/nightly_tests/dataset/aggregate_benchmark.py diff --git a/release/nightly_tests/dataset/aggregate_benchmark.py b/release/nightly_tests/dataset/aggregate_benchmark.py deleted file mode 100644 index 8085ed0ca6d9..000000000000 --- a/release/nightly_tests/dataset/aggregate_benchmark.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import Tuple - -import ray -from ray.data._internal.aggregate import ( - _AggregateOnKeyBase, - Max, - Mean, - Min, - Sum, -) -from ray.data.block import Block -from ray.data.dataset import Dataset -import pyarrow.compute as pac - -from benchmark import Benchmark - - -def run_h2oai(benchmark: Benchmark): - """This benchmark is originally from https://github.com/h2oai/db-benchmark - - Here we run all group-by queries from the benchmark on Ray Datasets. - The input files are pre-generated and stored in AWS S3 beforehand. - """ - - # Test input file schema={ - # id1: string, id2: string, id3: string, id4: int64, id5: int64, id6: int64, - # v1: int64, v2: int64, v3: double - # }) - test_input = [ - ("s3://air-example-data/h2oai_benchmark/G1_1e7_1e2_0_0.csv", "h2oai-500M") - ] - for path, test_name in test_input: - input_ds = ray.data.read_csv(path) - # Number of blocks (parallelism) should be set as number of available CPUs - # to get best performance. - num_blocks = int(ray.cluster_resources().get("CPU", 1)) - input_ds = input_ds.repartition(num_blocks).materialize() - - q_list = [ - (h2oai_q1, "q1"), - (h2oai_q3, "q3"), - (h2oai_q4, "q4"), - (h2oai_q5, "q5"), - (h2oai_q7, "q7"), - (h2oai_q8, "q8"), - ] - - for q, name in q_list: - benchmark.run_materialize_ds(f"{test_name}-{name}", q, ds=input_ds) - - -def h2oai_q1(ds: Dataset) -> Dataset: - return ds.groupby("id1").sum("v1") - - -def h2oai_q2(ds: Dataset) -> Dataset: - # TODO(chengsu): Run this after dataset supports multiple group-by keys. - # return ds.groupby(["id1", "id2"]).sum("v1") - raise NotImplementedError - - -def h2oai_q3(ds: Dataset) -> Dataset: - return ds.groupby("id3").aggregate(Sum("v1"), Mean("v3")) - - -def h2oai_q4(ds: Dataset) -> Dataset: - return ds.groupby("id4").aggregate(Mean("v1"), Mean("v2"), Mean("v3")) - - -def h2oai_q5(ds: Dataset) -> Dataset: - return ds.groupby("id6").aggregate(Sum("v1"), Sum("v2"), Sum("v3")) - - -def h2oai_q6(ds: Dataset) -> Dataset: - # TODO(chengsu): Run this after dataset supports multiple group-by keys. - # return ds.groupby(["id4", "id5"]).aggregate(Median("v3"), Std("v3")) - raise NotImplementedError - - -def h2oai_q7(ds: Dataset) -> Dataset: - ds = ds.groupby("id3").aggregate(Max("v1"), Min("v2")) - ds = ds.map_batches( - lambda df: df.assign(result=df["max(v1)"] - df["min(v2)"]), - batch_format="pandas", - ) - return ds - - -def h2oai_q8(ds: Dataset) -> Dataset: - def accumulate_block(agg: Tuple[float, float], block: Block) -> Tuple[float, float]: - column = block["v3"] - top_k_indices = pac.top_k_unstable(column, k=2) - top_k_result = pac.take(column, top_k_indices).to_pylist() - top_k_result.extend([float("-inf")] * (2 - len(top_k_result))) - top_k_result = (top_k_result[0], top_k_result[1]) - return merge(agg, top_k_result) - - def merge( - agg1: Tuple[float, float], - agg2: Tuple[float, float], - ) -> Tuple[float, float]: - if agg1[0] >= agg2[0]: - value1 = agg1[0] - value2 = max(agg1[1], agg2[0]) - else: - value1 = agg2[0] - value2 = max(agg1[0], agg2[1]) - return (value1, value2) - - class Top2(_AggregateOnKeyBase): - def __init__(self, on): - self._set_key_fn(on) - super().__init__( - init=lambda _: (float("-inf"), float("-inf")), - merge=merge, - accumulate_block=accumulate_block, - name=(f"top2({str(on)})"), - ) - - return ds.groupby("id6").aggregate(Top2("v3")) - - -def h2oai_q9(ds: Dataset) -> Dataset: - # TODO(chengsu): Run this after dataset supports multiple group-by keys. - # return ds.groupby(["id2", "id4"]).aggregate(pow(corr("v1", "v2"), 2)) - raise NotImplementedError - - -def h2oai_q10(ds: Dataset) -> Dataset: - # TODO(chengsu): Run this after dataset supports multiple group-by keys. - # return ds.groupby(["id1", "id2", "id3", "id4", "id5", "id6"]) - # .aggregate(Count(), Sum("v3")) - raise NotImplementedError - - -if __name__ == "__main__": - benchmark = Benchmark("aggregate") - - run_h2oai(benchmark) - - benchmark.write_result() diff --git a/release/release_data_tests.yaml b/release/release_data_tests.yaml index 49fa85ccc38d..e8e71b7dadbf 100644 --- a/release/release_data_tests.yaml +++ b/release/release_data_tests.yaml @@ -126,30 +126,6 @@ cluster: cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml -- name: aggregate_benchmark - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: single_node_benchmark_compute.yaml - - run: - timeout: 1800 - script: python aggregate_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - - name: read_parquet_benchmark_single_node group: data-tests working_dir: nightly_tests/dataset @@ -629,99 +605,6 @@ cluster: cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml -- name: dataset_shuffle_push_based_random_shuffle_1tb - group: data-tests - working_dir: nightly_tests - - stable: false - - frequency: nightly - team: data - - cluster: - byod: - runtime_env: - - RAY_worker_killing_policy=retriable_lifo - pip: - - ray[default] - cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml - - run: - timeout: 7200 - script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle - wait_for_nodes: - num_nodes: 20 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - -- name: dataset_shuffle_push_based_sort_1tb - group: data-tests - working_dir: nightly_tests - - frequency: nightly - team: data - stable: False - - cluster: - byod: - runtime_env: - - RAY_worker_killing_policy=retriable_lifo - pip: - - ray[default] - cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml - - run: - timeout: 7200 - script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 - wait_for_nodes: - num_nodes: 20 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: shuffle/datasets_large_scale_compute_small_instances_gce.yaml - -- name: dataset_shuffle_push_based_random_shuffle_100tb - group: data-tests - working_dir: nightly_tests - stable: false - - frequency: weekly - team: data - cluster: - byod: - runtime_env: - - RAY_object_spilling_config={"type":"filesystem","params":{"directory_path":["/tmp/data0","/tmp/data1"]}} - post_build_script: byod_dataset_shuffle.sh - cluster_compute: shuffle/100tb_shuffle_compute.yaml - - run: - timeout: 28800 - script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=100000 --partition-size=1e9 --shuffle - wait_for_nodes: - num_nodes: 100 - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: shuffle/100tb_shuffle_compute_gce.yaml - run: - timeout: 28800 - script: RAY_DATA_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=40000 --partition-size=1e9 --shuffle - wait_for_nodes: - num_nodes: 100 ############################ # Batch Inference Benchmarks From 27e349ee9f5a2bedb39670373fdc62dd39a215ea Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Fri, 22 Nov 2024 14:42:50 -0800 Subject: [PATCH 066/107] [serve] debug gcs failure (#48873) ## Why are these changes needed? - add debug statements - shutdown serve before shutting down ray (deflake some test failures I've seen) - remove `--forked` so test output is captured when test fails --------- Signed-off-by: Cindy Zhang --- python/ray/serve/tests/test_gcs_failure.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/python/ray/serve/tests/test_gcs_failure.py b/python/ray/serve/tests/test_gcs_failure.py index cb582a3df51c..3dabb58a99fc 100644 --- a/python/ray/serve/tests/test_gcs_failure.py +++ b/python/ray/serve/tests/test_gcs_failure.py @@ -30,9 +30,14 @@ def serve_ha(external_redis, monkeypatch): # noqa: F811 ) serve.start() yield (address_info, _get_global_client()) - ray.shutdown() + + # When GCS is down, right now some core worker members are not cleared + # properly in ray.shutdown. + ray.worker._global_node.start_gcs_server() + # Clear cache and global serve client serve.shutdown() + ray.shutdown() @pytest.mark.skipif( @@ -127,6 +132,7 @@ def router_populated_with_replicas( else: replicas = get_replicas_func() + print(f"Replica set in router: {replicas}") assert len(replicas) >= threshold # Return early if we don't need to check cache @@ -299,7 +305,4 @@ def test_proxy_router_updated_replicas_then_gcs_failure(serve_ha): if __name__ == "__main__": - # When GCS is down, right now some core worker members are not cleared - # properly in ray.shutdown. Given that this is not hi-pri issue, - # using --forked for isolation. - sys.exit(pytest.main(["-v", "-s", "--forked", __file__])) + sys.exit(pytest.main(["-v", "-s", __file__])) From 5fdfddc1b1756c061bc0219041e79d740dd73189 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Fri, 22 Nov 2024 15:41:21 -0800 Subject: [PATCH 067/107] [test] use async with for async timeout (#48881) or on newest version of async-timeout it will panic and fail Signed-off-by: Lonnie Liu --- python/ray/dashboard/modules/tests/test_utils.py | 2 +- python/ray/tests/test_gcs_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/ray/dashboard/modules/tests/test_utils.py b/python/ray/dashboard/modules/tests/test_utils.py index fc4fa85dfaed..38b24da02504 100644 --- a/python/ray/dashboard/modules/tests/test_utils.py +++ b/python/ray/dashboard/modules/tests/test_utils.py @@ -6,7 +6,7 @@ async def http_get(http_session, url, timeout_seconds=60): - with async_timeout.timeout(timeout_seconds): + async with async_timeout.timeout(timeout_seconds): async with http_session.get(url) as response: return await response.json() diff --git a/python/ray/tests/test_gcs_utils.py b/python/ray/tests/test_gcs_utils.py index c25beac6e598..50862f386346 100644 --- a/python/ray/tests/test_gcs_utils.py +++ b/python/ray/tests/test_gcs_utils.py @@ -267,7 +267,7 @@ async def test_gcs_aio_client_is_async(ray_start_regular): gcs_client = gcs_utils.GcsAioClient(address=gcs_address, nums_reconnect_retry=0) await gcs_client.internal_kv_put(b"A", b"B", False, b"NS", timeout=2) - with async_timeout.timeout(3): + async with async_timeout.timeout(3): none, result = await asyncio.gather( asyncio.sleep(2), gcs_client.internal_kv_get(b"A", b"NS", timeout=2) ) From a8e1709f19d0026c757daec6decb05432a80aa46 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 22 Nov 2024 18:10:46 -0600 Subject: [PATCH 068/107] [Data] Add new Parquet and image reading release tests (#48870) The existing reading release tests for Parquet and images aren't representative of real-world workloads: the datasets are too small and they're run on single-node clusters. --------- Signed-off-by: Balaji Veeramani --- ...ti_node_read_images_benchmark_compute.yaml | 15 -- ...ode_read_images_benchmark_compute_gce.yaml | 17 -- .../dataset/read_images_benchmark.py | 148 ------------------ .../dataset/read_parquet_benchmark.py | 120 -------------- release/release_data_tests.yaml | 112 +++++-------- 5 files changed, 36 insertions(+), 376 deletions(-) delete mode 100644 release/nightly_tests/dataset/multi_node_read_images_benchmark_compute.yaml delete mode 100644 release/nightly_tests/dataset/multi_node_read_images_benchmark_compute_gce.yaml delete mode 100644 release/nightly_tests/dataset/read_images_benchmark.py delete mode 100644 release/nightly_tests/dataset/read_parquet_benchmark.py diff --git a/release/nightly_tests/dataset/multi_node_read_images_benchmark_compute.yaml b/release/nightly_tests/dataset/multi_node_read_images_benchmark_compute.yaml deleted file mode 100644 index 9655daad50cd..000000000000 --- a/release/nightly_tests/dataset/multi_node_read_images_benchmark_compute.yaml +++ /dev/null @@ -1,15 +0,0 @@ -cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} -region: us-west-2 - -max_workers: 19 - -head_node_type: - name: head_node - instance_type: m5.16xlarge - -worker_node_types: - - name: worker_node - instance_type: m5.4xlarge - max_workers: 19 - min_workers: 19 - use_spot: false diff --git a/release/nightly_tests/dataset/multi_node_read_images_benchmark_compute_gce.yaml b/release/nightly_tests/dataset/multi_node_read_images_benchmark_compute_gce.yaml deleted file mode 100644 index bca10d5c5447..000000000000 --- a/release/nightly_tests/dataset/multi_node_read_images_benchmark_compute_gce.yaml +++ /dev/null @@ -1,17 +0,0 @@ -cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} -region: us-west1 -allowed_azs: - - us-west1-c - -max_workers: 19 - -head_node_type: - name: head_node - instance_type: n2-standard-64 # aws m5.16xlarge - -worker_node_types: - - name: worker_node - instance_type: n2-standard-16 # aws m5.4xlarge - max_workers: 19 - min_workers: 19 - use_spot: false diff --git a/release/nightly_tests/dataset/read_images_benchmark.py b/release/nightly_tests/dataset/read_images_benchmark.py deleted file mode 100644 index 2a5f68db6e2f..000000000000 --- a/release/nightly_tests/dataset/read_images_benchmark.py +++ /dev/null @@ -1,148 +0,0 @@ -import argparse -import os -import random -import shutil -import tempfile -from typing import List, Tuple - -from PIL import Image - -import ray - -from benchmark import Benchmark - - -def parse_args(): - parser = argparse.ArgumentParser() - group = parser.add_mutually_exclusive_group() - group.add_argument( - "--single-node", - action="store_true", - help="Run single-node read_images benchmark.", - ) - group.add_argument( - "--multi-node", - action="store_true", - help="Run multi-node read_images benchmark.", - ) - return parser.parse_args() - - -def main(args): - ray.init() - - benchmark = Benchmark("read-images") - if args.single_node: - run_images_benchmark_single_node(benchmark) - elif args.multi_node: - run_images_benchmark_multi_node(benchmark) - - benchmark.write_result() - - -def generate_images( - num_images: int, sizes: List[Tuple[int, int]], modes: List[str], formats: List[str] -) -> str: - - dimensions = [] - for mode in modes: - if mode in ["1", "L", "P"]: - dimension = 1 - elif mode in ["RGB", "YCbCr", "LAB", "HSV"]: - dimension = 3 - elif mode in ["RGBA", "CMYK", "I", "F"]: - dimension = 4 - else: - raise ValueError(f"Found unknown image mode: {mode}.") - dimensions.append(dimension) - - images_dir = tempfile.mkdtemp() - - for image_idx in range(num_images): - size = random.choice(sizes) - file_format = random.choice(formats) - mode_idx = random.randrange(len(modes)) - mode = modes[mode_idx] - dimension = dimensions[mode_idx] - - width, height = size - file_name = f"{images_dir}/{image_idx}.{file_format}" - pixels_per_dimension = [] - for _ in range(dimension): - pixels = os.urandom(width * height) - pixels_per_dimension.append(pixels) - - image = Image.new(mode, size) - if len(pixels_per_dimension) == 1: - image.putdata(pixels_per_dimension[0]) - else: - image.putdata(list(zip(*pixels_per_dimension))) - image.save(file_name) - - return images_dir - - -def run_images_benchmark_single_node(benchmark: Benchmark): - # Set global random seed. - random.seed(42) - - test_input = [ - generate_images(100, [(256, 256)], ["RGB"], ["jpg"]), - generate_images(100, [(2048, 2048)], ["RGB"], ["jpg"]), - generate_images( - 1000, [(64, 64), (256, 256)], ["RGB", "L"], ["jpg", "jpeg", "png"] - ), - ] - - benchmark.run_materialize_ds( - "images-100-256-rbg-jpg", ray.data.read_images, test_input[0] - ) - benchmark.run_materialize_ds( - "images-100-2048-rbg-jpg", ray.data.read_images, test_input[1] - ) - benchmark.run_materialize_ds( - "images-100-2048-to-256-rbg-jpg", - ray.data.read_images, - test_input[1], - size=(256, 256), - ) - benchmark.run_materialize_ds( - "images-1000-mix", - ray.data.read_images, - test_input[2], - size=(256, 256), - mode="RGB", - ) - - for root in test_input: - shutil.rmtree(root) - - # TODO(chengsu): run benchmark on 20G and 100G imagenet data in multi-nodes - # cluster. - benchmark.run_materialize_ds( - "images-imagenet-1g", - ray.data.read_images, - "s3://air-example-data-2/1G-image-data-synthetic-raw", - ) - - -def run_images_benchmark_multi_node(benchmark: Benchmark): - hundred_thousand_image_paths = [ - f"s3://air-example-data-2/100k-images-data-synthetic-raw/dog_{i}/dog_0.jpg" - for i in range(100_000) - ] - hundred_million_image_paths = [] - for _ in range(100_000_000 // 100_000): - hundred_million_image_paths.extend(hundred_thousand_image_paths) - - def fn(): - ds = ray.data.read_images(hundred_million_image_paths) - for _ in ds.iter_batches(batch_size=None, batch_format="pyarrow"): - pass - - benchmark.run_fn("images-100M", fn) - - -if __name__ == "__main__": - args = parse_args() - main(args) diff --git a/release/nightly_tests/dataset/read_parquet_benchmark.py b/release/nightly_tests/dataset/read_parquet_benchmark.py deleted file mode 100644 index 96ceff9ff55c..000000000000 --- a/release/nightly_tests/dataset/read_parquet_benchmark.py +++ /dev/null @@ -1,120 +0,0 @@ -import ray -from ray.data.dataset import Dataset - -from benchmark import Benchmark -from parquet_data_generator import generate_data - -import shutil -import tempfile -from typing import Optional - - -def read_parquet( - root: str, - override_num_blocks: Optional[int] = None, - use_threads: bool = False, - filter=None, - columns=None, -) -> Dataset: - return ray.data.read_parquet( - paths=root, - override_num_blocks=override_num_blocks, - use_threads=use_threads, - filter=filter, - columns=columns, - ) - - -def run_read_parquet_benchmark(benchmark: Benchmark): - # Test with different override_num_blocks (multi-processing for single node) - # and threading. - for override_num_blocks in [1, 2, 4]: - for use_threads in [True, False]: - test_name = f"read-parquet-downsampled-nyc-taxi-2009-{override_num_blocks}-{use_threads}" # noqa: E501 - benchmark.run_materialize_ds( - test_name, - read_parquet, - root="s3://anonymous@air-example-data/ursa-labs-taxi-data/downsampled_2009_full_year_data.parquet", # noqa: E501 - override_num_blocks=override_num_blocks, - use_threads=use_threads, - ) - - # TODO: Test below is currently excluded, due to failure around - # pickling the Dataset involving the filter expression. - # The error is present on Python < 3.8, and involves the pickle/pickle5 - # libraries. `pickle` is included as a default library from Python 3.8+, - # whereas Python versions before this must import the backported `pickle5` library - # to maintain the same functionality. - - # Test with projection and filter pushdowns. - # Since we have projection and filter pushdown, we can run the read on the full - # size of one year data fast enough on a single node. - # test_name = "read-parquet-nyc-taxi-2018-pushdown" - # filter_expr = (pa.dataset.field("passenger_count") <= 10) & ( - # pa.dataset.field("passenger_count") > 0 - # ) - # benchmark.run( - # test_name, - # read_parquet, - # root="s3://anonymous@air-example-data/ursa-labs-taxi-data/by_year/2018", - # columns=["passenger_count", "trip_distance"], - # filter=filter_expr, - # ) - - # Test with different number files to handle: from a few to many. - data_dirs = [] - # Each test set has same total number of rows, which are distributed - # to different number of files. - total_rows = 1024 * 1024 * 8 - for num_files in [8, 128, 1024]: - for compression in ["snappy", "gzip"]: - data_dirs.append(tempfile.mkdtemp()) - generate_data( - num_rows=total_rows, - num_files=num_files, - num_row_groups_per_file=16, - compression=compression, - data_dir=data_dirs[-1], - ) - test_name = f"read-parquet-random-data-{num_files}-{compression}" - benchmark.run_materialize_ds( - test_name, - read_parquet, - root=data_dirs[-1], - override_num_blocks=1, # We are testing one task to handle N files - ) - for dir in data_dirs: - shutil.rmtree(dir) - - # Test reading many small files. - num_files = 1000 - num_row_groups_per_file = 2 - total_rows = num_files * num_row_groups_per_file - compression = "gzip" - - many_files_dir = "s3://air-example-data-2/read-many-parquet-files/" - # If needed, use the following utility to generate files on S3. - # Otherwise, the benchmark will read pre-generated files in the above bucket. - # generate_data( - # num_rows=total_rows, - # num_files=num_files, - # num_row_groups_per_file=num_row_groups_per_file, - # compression=compression, - # data_dir=many_files_dir, - # ) - test_name = f"read-many-parquet-files-s3-{num_files}-{compression}" - benchmark.run_materialize_ds( - test_name, - read_parquet, - root=many_files_dir, - ) - - -if __name__ == "__main__": - ray.init() - - benchmark = Benchmark("read-parquet") - - run_read_parquet_benchmark(benchmark) - - benchmark.write_result() diff --git a/release/release_data_tests.yaml b/release/release_data_tests.yaml index e8e71b7dadbf..752aaacb4fb0 100644 --- a/release/release_data_tests.yaml +++ b/release/release_data_tests.yaml @@ -1,3 +1,39 @@ +############### +# Reading tests +############### + +- name: read_parquet + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: multi_node_autoscaling_compute.yaml + + run: + timeout: 3600 + script: python read_and_consume_benchmark.py s3://ray-benchmark-data/parquet/10TiB --format parquet --iterate + +- name: read_images + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + + cluster: + byod: + type: gpu + cluster_compute: multi_node_autoscaling_compute.yaml + + run: + timeout: 3600 + script: python read_and_consume_benchmark.py s3://air-example-data-2/300G-image-data-synthetic-raw --format image --iterate + ############### # Dataset tests ############### @@ -126,82 +162,6 @@ cluster: cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml -- name: read_parquet_benchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: single_node_benchmark_compute.yaml - - run: - # Expect the benchmark to finish in 400 seconds. - timeout: 400 - script: python read_parquet_benchmark.py - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -- name: read_images_benchmark_single_node - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - post_build_script: byod_install_mosaicml.sh - cluster_compute: single_node_benchmark_compute.yaml - - run: - timeout: 1800 - script: python read_images_benchmark.py --single-node - - variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_compute: single_node_benchmark_compute_gce.yaml - -# TODO: Re-enable this test once we fix https://github.com/ray-project/ray/issues/40686. -# - name: read_images_benchmark_multi_node -# group: data-tests -# working_dir: nightly_tests/dataset - -# frequency: nightly-3x -# team: data - -# cluster: -# byod: -# type: gpu -# cluster_compute: multi_node_read_images_benchmark_compute.yaml - -# run: -# timeout: 28800 -# script: python read_images_benchmark.py --multi-node - -# variations: -# - __suffix__: aws -# - __suffix__: gce -# env: gce -# frequency: manual -# cluster: -# cluster_compute: multi_node_read_images_benchmark_compute_gce.yaml - - name: read_images_comparison_microbenchmark_single_node group: data-tests working_dir: nightly_tests/dataset From 4b4f3c669bc71027cbae99d5b12ec750b70d96d4 Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Sat, 23 Nov 2024 09:24:55 +0800 Subject: [PATCH 069/107] [data] fix a bug that may cause async map tasks to hang (#48861) ## Why are these changes needed? Fix a bug that may cause async map tasks to hang. See code comments for details. This issue can be reproduced with an existing test `test_map_batches_async_generator` on slow machines. --------- Signed-off-by: Hao Chen --- .../data/_internal/planner/plan_udf_map_op.py | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/python/ray/data/_internal/planner/plan_udf_map_op.py b/python/ray/data/_internal/planner/plan_udf_map_op.py index 9cd81c0d5f3b..605efe7c95cc 100644 --- a/python/ray/data/_internal/planner/plan_udf_map_op.py +++ b/python/ray/data/_internal/planner/plan_udf_map_op.py @@ -352,6 +352,8 @@ def transform_fn( # generators, and in the main event loop, yield them from # the queue as they become available. output_batch_queue = queue.Queue() + # Sentinel object to signal the end of the async generator. + sentinel = object() async def process_batch(batch: DataBatch): try: @@ -366,29 +368,33 @@ async def process_batch(batch: DataBatch): ) # Put the exception into the queue to signal an error async def process_all_batches(): - loop = ray.data._map_actor_context.udf_map_asyncio_loop - tasks = [loop.create_task(process_batch(x)) for x in input_iterable] + try: + loop = ray.data._map_actor_context.udf_map_asyncio_loop + tasks = [loop.create_task(process_batch(x)) for x in input_iterable] - ctx = ray.data.DataContext.get_current() - if ctx.execution_options.preserve_order: - for task in tasks: - await task() - else: - for task in asyncio.as_completed(tasks): - await task + ctx = ray.data.DataContext.get_current() + if ctx.execution_options.preserve_order: + for task in tasks: + await task() + else: + for task in asyncio.as_completed(tasks): + await task + finally: + output_batch_queue.put(sentinel) # Use the existing event loop to create and run Tasks to process each batch loop = ray.data._map_actor_context.udf_map_asyncio_loop - future = asyncio.run_coroutine_threadsafe(process_all_batches(), loop) + asyncio.run_coroutine_threadsafe(process_all_batches(), loop) # Yield results as they become available. - # After all futures are completed, drain the queue to - # yield any remaining results. - while not future.done() or not output_batch_queue.empty(): + while True: # Here, `out_batch` is a one-row output batch # from the async generator, corresponding to a # single row from the input batch. out_batch = output_batch_queue.get() + if out_batch is sentinel: + # Break out of the loop when the sentinel is received. + break if isinstance(out_batch, Exception): raise out_batch _validate_batch_output(out_batch) From 354dea6e25929c611b95f569162a036117c60eab Mon Sep 17 00:00:00 2001 From: Mengjin Yan Date: Fri, 22 Nov 2024 20:46:04 -0800 Subject: [PATCH 070/107] [Core] Fix the Flaky Test GcsClientReconnectionTest (#48879) Signed-off-by: Mengjin Yan --- .../test/gcs_client_reconnection_test.cc | 55 +++++++++++++------ 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/src/ray/gcs/gcs_client/test/gcs_client_reconnection_test.cc b/src/ray/gcs/gcs_client/test/gcs_client_reconnection_test.cc index 99a92034aed2..2e003ec39977 100644 --- a/src/ray/gcs/gcs_client/test/gcs_client_reconnection_test.cc +++ b/src/ray/gcs/gcs_client/test/gcs_client_reconnection_test.cc @@ -14,6 +14,7 @@ #include #include +#include #include "absl/strings/substitute.h" #include "gtest/gtest.h" @@ -198,7 +199,7 @@ TEST_F(GcsClientReconnectionTest, ReconnectionBasic) { ASSERT_TRUE(status.ok()) << status.ToString(); p1.set_value(*p); })); - ASSERT_EQ(f1.wait_for(1s), std::future_status::timeout); + ASSERT_EQ(std::future_status::timeout, f1.wait_for(1s)); // Make sure io context is not blocked std::promise p2; @@ -210,7 +211,7 @@ TEST_F(GcsClientReconnectionTest, ReconnectionBasic) { StartGCS(); // Make sure the request is executed - ASSERT_EQ(f1.get(), "B"); + ASSERT_EQ("B", f1.get()); } TEST_F(GcsClientReconnectionTest, ReconnectionBackoff) { @@ -241,8 +242,14 @@ TEST_F(GcsClientReconnectionTest, ReconnectionBackoff) { ShutdownGCS(); + std::promise p2; + auto f2 = p2.get_future(); RAY_UNUSED(client->InternalKV().AsyncInternalKVPut( - "", "A", "B", false, gcs::GetGcsTimeoutMs(), [](auto, auto) {})); + "", "A", "B", false, gcs::GetGcsTimeoutMs(), [&p2](auto status, auto) { + ASSERT_TRUE(status.ok()) << status.ToString(); + p2.set_value(); + })); + ASSERT_EQ(std::future_status::timeout, f2.wait_for(1s)); ASSERT_TRUE(WaitUntil( [channel]() { @@ -305,27 +312,41 @@ TEST_F(GcsClientReconnectionTest, QueueingAndBlocking) { ShutdownGCS(); // Send one request which should fail - RAY_UNUSED(client->InternalKV().AsyncInternalKVPut( - "", "A", "B", false, gcs::GetGcsTimeoutMs(), [](auto status, auto) {})); - - // Make sure it's not blocking std::promise p2; - client_io_service_->post([&p2]() { p2.set_value(); }, ""); auto f2 = p2.get_future(); - ASSERT_EQ(std::future_status::ready, f2.wait_for(1s)); - - // Send the second one and it should block the thread RAY_UNUSED(client->InternalKV().AsyncInternalKVPut( - "", "A", "B", false, gcs::GetGcsTimeoutMs(), [](auto status, auto) {})); - std::this_thread::sleep_for(1s); + "", "A", "B", false, gcs::GetGcsTimeoutMs(), [&p2](auto status, auto) { + ASSERT_TRUE(status.ok()) << status.ToString(); + p2.set_value(); + })); + ASSERT_EQ(std::future_status::timeout, f2.wait_for(1s)); + + // Make sure it's not blocking std::promise p3; client_io_service_->post([&p3]() { p3.set_value(); }, ""); auto f3 = p3.get_future(); - ASSERT_EQ(std::future_status::timeout, f3.wait_for(1s)); + ASSERT_EQ(std::future_status::ready, f3.wait_for(1s)); + + // Send the second one and it should block the thread + std::promise p4; + auto f4 = p4.get_future(); + RAY_UNUSED(client->InternalKV().AsyncInternalKVPut( + "", "A", "B", false, gcs::GetGcsTimeoutMs(), [&p4](auto status, auto) { + ASSERT_TRUE(status.ok()) << status.ToString(); + p4.set_value(); + })); + ASSERT_EQ(std::future_status::timeout, f4.wait_for(1s)); + + std::promise p5; + client_io_service_->post([&p5]() { p5.set_value(); }, ""); + auto f5 = p5.get_future(); + ASSERT_EQ(std::future_status::timeout, f5.wait_for(1s)); // Resume GCS server and it should unblock StartGCS(); - ASSERT_EQ(std::future_status::ready, f3.wait_for(5s)); + ASSERT_EQ(std::future_status::ready, f5.wait_for(5s)); + ASSERT_EQ(std::future_status::ready, f2.wait_for(1s)); + ASSERT_EQ(std::future_status::ready, f4.wait_for(1s)); } TEST_F(GcsClientReconnectionTest, Timeout) { @@ -335,7 +356,7 @@ TEST_F(GcsClientReconnectionTest, Timeout) { "gcs_rpc_server_reconnect_timeout_s": 60, "gcs_storage": "redis", "gcs_grpc_max_request_queued_max_bytes": 10, - "gcs_server_request_timeout_seconds": 3 + "gcs_server_request_timeout_seconds": 10 } )"); StartGCS(); @@ -346,11 +367,11 @@ TEST_F(GcsClientReconnectionTest, Timeout) { ASSERT_TRUE(added); ShutdownGCS(); - std::vector values; ASSERT_TRUE( client->InternalKV().Keys("", "A", gcs::GetGcsTimeoutMs(), values).IsTimedOut()); ASSERT_TRUE(values.empty()); + StartGCS(); ASSERT_TRUE(client->InternalKV().Keys("", "A", gcs::GetGcsTimeoutMs(), values).ok()); ASSERT_EQ(std::vector{"A"}, values); From 56413d29f7c1ff58e221a29429932326e965298e Mon Sep 17 00:00:00 2001 From: Alexey Kudinkin Date: Fri, 22 Nov 2024 21:15:40 -0800 Subject: [PATCH 071/107] Fixing pyarrow dep constraints in `setup.py` (#48886) ## Why are these changes needed? This is a follow-up to a recent change upgrading minimal supported PyArrow version from 6.0.1 to 9.0.0 ## Related issue number ## Checks - [ ] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :( Signed-off-by: Alexey Kudinkin --- python/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/setup.py b/python/setup.py index 2e6958d021da..16017fa5447a 100644 --- a/python/setup.py +++ b/python/setup.py @@ -228,7 +228,7 @@ def get_packages(self): pandas_dep = "pandas >= 1.3" numpy_dep = "numpy >= 1.20" pyarrow_deps = [ - "pyarrow >= 6.0.1", + "pyarrow >= 9.0.0", "pyarrow <18; sys_platform == 'darwin' and platform_machine == 'x86_64'", ] setup_spec.extras = { From 37aa0c66110fc235762c29612b90f1c73869e6cf Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Fri, 22 Nov 2024 21:47:03 -0800 Subject: [PATCH 072/107] [vm launcher] pin gcp image to a working version in release tests (#48890) the latest version of the image changes the default user from ubuntu to jupyter. Signed-off-by: Lonnie Liu --- python/ray/autoscaler/gcp/example-full.yaml | 4 +-- .../gcp/example-minimal-pinned.yaml | 36 +++++++++++++++++++ release/release_tests.yaml | 2 +- 3 files changed, 39 insertions(+), 3 deletions(-) create mode 100644 python/ray/autoscaler/gcp/example-minimal-pinned.yaml diff --git a/python/ray/autoscaler/gcp/example-full.yaml b/python/ray/autoscaler/gcp/example-full.yaml index f5b30613aed9..2fa4c1211752 100644 --- a/python/ray/autoscaler/gcp/example-full.yaml +++ b/python/ray/autoscaler/gcp/example-full.yaml @@ -70,7 +70,7 @@ available_node_types: initializeParams: diskSizeGb: 50 # See https://cloud.google.com/compute/docs/images for more images - sourceImage: projects/deeplearning-platform-release/global/images/family/common-cpu + sourceImage: projects/deeplearning-platform-release/global/images/common-cpu-v20240922 # Additional options can be found in in the compute docs at # https://cloud.google.com/compute/docs/reference/rest/v1/instances/insert @@ -105,7 +105,7 @@ available_node_types: initializeParams: diskSizeGb: 50 # See https://cloud.google.com/compute/docs/images for more images - sourceImage: projects/deeplearning-platform-release/global/images/family/common-cpu + sourceImage: projects/deeplearning-platform-release/global/images/common-cpu-v20240922 # Run workers on preemtible instance by default. # Comment this out to use on-demand. scheduling: diff --git a/python/ray/autoscaler/gcp/example-minimal-pinned.yaml b/python/ray/autoscaler/gcp/example-minimal-pinned.yaml new file mode 100644 index 000000000000..ce42e6705854 --- /dev/null +++ b/python/ray/autoscaler/gcp/example-minimal-pinned.yaml @@ -0,0 +1,36 @@ +auth: + ssh_user: ubuntu +cluster_name: minimal +provider: + availability_zone: us-west1-a + project_id: null # TODO: set your GCP project ID here + region: us-west1 + type: gcp + +# Needs to pin the VM images for stability.. +available_node_types: + ray_head_default: + resources: {"CPU": 2} + node_config: + machineType: n1-standard-2 + disks: + - boot: true + autoDelete: true + type: PERSISTENT + initializeParams: + diskSizeGb: 50 + sourceImage: projects/deeplearning-platform-release/global/images/common-cpu-v20240922 + ray_worker_small: + min_workers: 0 + resources: {"CPU": 2} + node_config: + machineType: n1-standard-2 + disks: + - boot: true + autoDelete: true + type: PERSISTENT + initializeParams: + diskSizeGb: 50 + sourceImage: projects/deeplearning-platform-release/global/images/common-cpu-v20240922 + scheduling: + - preemptible: true diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 4ffa492e6cde..1c150aedf601 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -4131,7 +4131,7 @@ run: timeout: 1200 - script: python launch_and_verify_cluster.py gcp/example-minimal.yaml + script: python launch_and_verify_cluster.py gcp/example-minimal-pinned.yaml - name: gcp_cluster_launcher_full group: cluster-launcher-test From 5cd8967f1c0c16d3ae5fedb8449d0d25dd4f9f3e Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Sat, 23 Nov 2024 06:33:16 -0800 Subject: [PATCH 073/107] [data] add back gen image function (#48895) which was imported from another file that has been deleted. Signed-off-by: Lonnie Liu --- .../dataset/read_tfrecords_benchmark.py | 39 ++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/release/nightly_tests/dataset/read_tfrecords_benchmark.py b/release/nightly_tests/dataset/read_tfrecords_benchmark.py index 48d1bb229195..34fecb02fd41 100644 --- a/release/nightly_tests/dataset/read_tfrecords_benchmark.py +++ b/release/nightly_tests/dataset/read_tfrecords_benchmark.py @@ -1,3 +1,4 @@ +import os import random import shutil import tempfile @@ -7,11 +8,47 @@ from ray.data.dataset import Dataset from benchmark import Benchmark -from read_images_benchmark import generate_images +from PIL import Image import pyarrow as pa import numpy as np +def generate_images( + num_images: int, sizes: List[Tuple[int, int]], modes: List[str], formats: List[str] +) -> str: + dimensions = [] + for mode in modes: + if mode in ["1", "L", "P"]: + dimension = 1 + elif mode in ["RGB", "YCbCr", "LAB", "HSV"]: + dimension = 3 + elif mode in ["RGBA", "CMYK", "I", "F"]: + dimension = 4 + else: + raise ValueError(f"Found unknown image mode: {mode}.") + dimensions.append(dimension) + images_dir = tempfile.mkdtemp() + for image_idx in range(num_images): + size = random.choice(sizes) + file_format = random.choice(formats) + mode_idx = random.randrange(len(modes)) + mode = modes[mode_idx] + dimension = dimensions[mode_idx] + width, height = size + file_name = f"{images_dir}/{image_idx}.{file_format}" + pixels_per_dimension = [] + for _ in range(dimension): + pixels = os.urandom(width * height) + pixels_per_dimension.append(pixels) + image = Image.new(mode, size) + if len(pixels_per_dimension) == 1: + image.putdata(pixels_per_dimension[0]) + else: + image.putdata(list(zip(*pixels_per_dimension))) + image.save(file_name) + return images_dir + + def read_tfrecords(path: str) -> Dataset: return ray.data.read_tfrecords(paths=path).materialize() From 4345c6c1fdcd2b79edf9563ce5e590cda89dcaa6 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Sat, 23 Nov 2024 14:28:19 -0800 Subject: [PATCH 074/107] [doc] document the right dask version (#48899) 2024.6.0 is used since ray 2.34 Signed-off-by: Lonnie Liu --- doc/source/ray-more-libs/dask-on-ray.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/source/ray-more-libs/dask-on-ray.rst b/doc/source/ray-more-libs/dask-on-ray.rst index 3e130bfcaa35..1e6ae2f39129 100644 --- a/doc/source/ray-more-libs/dask-on-ray.rst +++ b/doc/source/ray-more-libs/dask-on-ray.rst @@ -31,7 +31,10 @@ workload. Using the Dask-on-Ray scheduler, the entire Dask ecosystem can be exec * - Ray Version - Dask Version - * - ``2.8.0`` or above + * - ``2.34.0`` or above + - | ``2022.10.1 (Python version < 3.12)`` + | ``2024.6.0 (Python version >= 3.12)`` + * - ``2.8.0`` to ``2.33.x`` - ``2022.10.1`` * - ``2.5.0`` to ``2.7.x`` - | ``2022.2.0 (Python version < 3.8)`` From 4a295714b670c1c8d8eccb03722d129c6fcd58a8 Mon Sep 17 00:00:00 2001 From: ryanaoleary <113500783+ryanaoleary@users.noreply.github.com> Date: Sun, 24 Nov 2024 00:52:50 +0000 Subject: [PATCH 075/107] [Core][Autoscaler] Configure idleTimeoutSeconds per node type (#48813) ## Why are these changes needed? Adds `idle_timeout_s` as a field to `node_type_configs`, enabling the v2 autoscaler to configure idle termination per worker type. This PR depends on a change in KubeRay to the RayCluster CRD, since we want to support passing `idleTimeoutSeconds` to individual worker groups such that they can specify a custom idle duration: https://github.com/ray-project/kuberay/pull/2558 ## Related issue number Closes #36888 ## Checks - [x] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [x] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [x] Unit tests - [ ] Release tests - [ ] This PR is not tested :( --------- Signed-off-by: ryanaoleary Signed-off-by: ryanaoleary <113500783+ryanaoleary@users.noreply.github.com> Co-authored-by: Kai-Hsun Chen Co-authored-by: Ricky Xu --- .../_private/kuberay/autoscaling_config.py | 8 +- python/ray/autoscaler/ray-schema.json | 1 + .../autoscaler/v2/instance_manager/config.py | 3 + python/ray/autoscaler/v2/scheduler.py | 6 +- .../ray/autoscaler/v2/tests/test_scheduler.py | 76 +++++++++++++++++++ 5 files changed, 92 insertions(+), 2 deletions(-) diff --git a/python/ray/autoscaler/_private/kuberay/autoscaling_config.py b/python/ray/autoscaler/_private/kuberay/autoscaling_config.py index 96aec03744b2..0bf61b311128 100644 --- a/python/ray/autoscaler/_private/kuberay/autoscaling_config.py +++ b/python/ray/autoscaler/_private/kuberay/autoscaling_config.py @@ -219,7 +219,7 @@ def _node_type_from_group_spec( resources = _get_ray_resources_from_group_spec(group_spec, is_head) - return { + node_type = { "min_workers": min_workers, "max_workers": max_workers, # `node_config` is a legacy field required for compatibility. @@ -228,6 +228,12 @@ def _node_type_from_group_spec( "resources": resources, } + idle_timeout_s = group_spec.get(IDLE_SECONDS_KEY) + if idle_timeout_s is not None: + node_type["idle_timeout_s"] = float(idle_timeout_s) + + return node_type + def _get_ray_resources_from_group_spec( group_spec: Dict[str, Any], is_head: bool diff --git a/python/ray/autoscaler/ray-schema.json b/python/ray/autoscaler/ray-schema.json index ad5da68ea2a0..2e07dadac912 100644 --- a/python/ray/autoscaler/ray-schema.json +++ b/python/ray/autoscaler/ray-schema.json @@ -345,6 +345,7 @@ }, "min_workers": {"type": "integer"}, "max_workers": {"type": "integer"}, + "idle_timeout_s": {"type": "number", "nullable": true}, "resources": { "type": "object", "patternProperties": { diff --git a/python/ray/autoscaler/v2/instance_manager/config.py b/python/ray/autoscaler/v2/instance_manager/config.py index c9597eef6c73..a7e582eacdbc 100644 --- a/python/ray/autoscaler/v2/instance_manager/config.py +++ b/python/ray/autoscaler/v2/instance_manager/config.py @@ -128,6 +128,8 @@ class NodeTypeConfig: min_worker_nodes: int # The maximal number of worker nodes can be launched for this node type. max_worker_nodes: int + # Idle timeout seconds for worker nodes of this node type. + idle_timeout_s: Optional[float] = None # The total resources on the node. resources: Dict[str, float] = field(default_factory=dict) # The labels on the node. @@ -346,6 +348,7 @@ def get_node_type_configs(self) -> Dict[NodeType, NodeTypeConfig]: name=node_type, min_worker_nodes=node_config.get("min_workers", 0), max_worker_nodes=max_workers_nodes, + idle_timeout_s=node_config.get("idle_timeout_s", None), resources=node_config.get("resources", {}), labels=node_config.get("labels", {}), launch_config_hash=launch_config_hash, diff --git a/python/ray/autoscaler/v2/scheduler.py b/python/ray/autoscaler/v2/scheduler.py index 3732a6282632..2d5a70065066 100644 --- a/python/ray/autoscaler/v2/scheduler.py +++ b/python/ray/autoscaler/v2/scheduler.py @@ -1584,6 +1584,11 @@ def _enforce_idle_termination( continue idle_timeout_s = ctx.get_idle_timeout_s() + # Override the scheduler idle_timeout_s if set for this node_type. + node_type = node.node_type + if node_type in node_type_configs: + if node_type_configs[node_type].idle_timeout_s is not None: + idle_timeout_s = node_type_configs[node_type].idle_timeout_s if idle_timeout_s is None: # No idle timeout is set, skip the idle termination. continue @@ -1606,7 +1611,6 @@ def _enforce_idle_termination( # Honor the min_worker_nodes setting for the node type. min_count = 0 - node_type = node.node_type if node_type in node_type_configs: min_count = node_type_configs[node_type].min_worker_nodes if ( diff --git a/python/ray/autoscaler/v2/tests/test_scheduler.py b/python/ray/autoscaler/v2/tests/test_scheduler.py index e6d6cb71978d..3a188bdaf2ce 100644 --- a/python/ray/autoscaler/v2/tests/test_scheduler.py +++ b/python/ray/autoscaler/v2/tests/test_scheduler.py @@ -1434,6 +1434,82 @@ def test_idle_termination_with_min_worker(min_workers): assert len(to_terminate) == 0 +@pytest.mark.parametrize("node_type_idle_timeout_s", [1, 2, 10]) +def test_idle_termination_with_node_type_idle_timeout(node_type_idle_timeout_s): + """ + Test that idle nodes are terminated when idle_timeout_s is set for node type. + """ + scheduler = ResourceDemandScheduler(event_logger) + + node_type_configs = { + "type_cpu_with_idle_timeout": NodeTypeConfig( + name="type_cpu", + resources={"CPU": 1}, + min_worker_nodes=0, + max_worker_nodes=5, + idle_timeout_s=node_type_idle_timeout_s, + launch_config_hash="hash1", + ), + } + + idle_time_s = 5 + constraints = [] + + request = sched_request( + node_type_configs=node_type_configs, + instances=[ + make_autoscaler_instance( + im_instance=Instance( + instance_type="type_cpu_with_idle_timeout", + status=Instance.RAY_RUNNING, + launch_config_hash="hash1", + instance_id="i-1", + node_id="r-1", + ), + ray_node=NodeState( + node_id=b"r-1", + ray_node_type_name="type_cpu_with_idle_timeout", + available_resources={"CPU": 0}, + total_resources={"CPU": 1}, + idle_duration_ms=0, # Non idle + status=NodeStatus.RUNNING, + ), + cloud_instance_id="c-1", + ), + make_autoscaler_instance( + im_instance=Instance( + instance_id="i-2", + instance_type="type_cpu_with_idle_timeout", + status=Instance.RAY_RUNNING, + launch_config_hash="hash1", + node_id="r-2", + ), + ray_node=NodeState( + ray_node_type_name="type_cpu_with_idle_timeout", + node_id=b"r-2", + available_resources={"CPU": 1}, + total_resources={"CPU": 1}, + idle_duration_ms=idle_time_s * 1000, + status=NodeStatus.IDLE, + ), + cloud_instance_id="c-2", + ), + ], + # Set autoscaler idle_timeout_s to a value greater than + # node_type_idle_timeout_s and idle_time_s. + idle_timeout_s=idle_time_s * 1000, + cluster_resource_constraints=constraints, + ) + + reply = scheduler.schedule(request) + _, to_terminate = _launch_and_terminate(reply) + if node_type_idle_timeout_s <= idle_time_s: + assert len(to_terminate) == 1 + assert to_terminate == [("i-2", "r-2", TerminationRequest.Cause.IDLE)] + else: + assert len(to_terminate) == 0 + + def test_gang_scheduling(): """ Test that gang scheduling works. From c2f93341a6c310892c624e815aa000f305b37186 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Sat, 23 Nov 2024 21:34:55 -0800 Subject: [PATCH 076/107] [air] dreambooth pandas type fix (#48904) lambda for `add_column` must return a `panda.Series` now. Signed-off-by: Lonnie Liu --- .../05_dreambooth_finetuning/dreambooth/dataset.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/source/templates/05_dreambooth_finetuning/dreambooth/dataset.py b/doc/source/templates/05_dreambooth_finetuning/dreambooth/dataset.py index 6c7e1e52aa48..bba46f34208b 100644 --- a/doc/source/templates/05_dreambooth_finetuning/dreambooth/dataset.py +++ b/doc/source/templates/05_dreambooth_finetuning/dreambooth/dataset.py @@ -88,7 +88,9 @@ def transform_image( transform_image, fn_kwargs={"output_column_name": "instance_image"} ) .drop_columns(["image"]) - .add_column("instance_prompt_ids", lambda df: [instance_prompt_ids] * len(df)) + .add_column( + "instance_prompt_ids", lambda df: pd.Series([instance_prompt_ids] * len(df)) + ) ) # END: Apply preprocessing steps as Ray Dataset operations @@ -97,7 +99,9 @@ def transform_image( transform_image, fn_kwargs={"output_column_name": "class_image"} ) .drop_columns(["image"]) - .add_column("class_prompt_ids", lambda df: [class_prompt_ids] * len(df)) + .add_column( + "class_prompt_ids", lambda df: pd.Series([class_prompt_ids] * len(df)) + ) ) # --- Ray Data From 02ac0cdc7adf5e611134840c73fa47dd7866140d Mon Sep 17 00:00:00 2001 From: dentiny Date: Sat, 23 Nov 2024 21:35:15 -0800 Subject: [PATCH 077/107] [core] Fix gcs unit tests dependency (#48892) Our usage for gtest and dependency is incorrect, in short, we shouldn't declare `@com_google_googletest//:gtest_main` as dependency and **meanwhile** have main function defined in the test. One weird behavior I noticed is, `SetUp` overriden function will be invoked twice somehow. Signed-off-by: hjiang Co-authored-by: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> --- BUILD.bazel | 6 +++--- .../gcs/gcs_server/test/gcs_actor_manager_test.cc | 5 ----- .../gcs_server/test/gcs_actor_scheduler_test.cc | 5 ----- .../test/gcs_autoscaler_state_manager_test.cc | 5 ----- .../test/gcs_health_check_manager_test.cc | 14 -------------- .../gcs/gcs_server/test/gcs_job_manager_test.cc | 4 ---- .../gcs/gcs_server/test/gcs_node_manager_test.cc | 5 ----- .../test/gcs_placement_group_manager_test.cc | 5 ----- .../test/gcs_placement_group_scheduler_test.cc | 5 ----- .../gcs_server/test/gcs_resource_manager_test.cc | 5 ----- .../gcs/gcs_server/test/gcs_worker_manager_test.cc | 5 ----- .../test/in_memory_gcs_table_storage_test.cc | 5 ----- .../gcs/gcs_server/test/usage_stats_client_test.cc | 5 ----- 13 files changed, 3 insertions(+), 71 deletions(-) diff --git a/BUILD.bazel b/BUILD.bazel index 03c002fc1256..dd31e976d6c2 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1627,7 +1627,7 @@ ray_cc_test( deps = [ ":gcs_server_lib", ":gcs_test_util_lib", - "@com_google_googletest//:gtest_main", + "@com_google_googletest//:gtest", ], ) @@ -1649,7 +1649,7 @@ ray_cc_test( deps = [ ":gcs_server_lib", ":gcs_test_util_lib", - "@com_google_googletest//:gtest_main", + "@com_google_googletest//:gtest", ], ) @@ -1883,7 +1883,7 @@ ray_cc_test( ":gcs_table_storage_test_lib", ":gcs_test_util_lib", ":store_client_test_lib", - "@com_google_googletest//:gtest_main", + "@com_google_googletest//:gtest", ], ) diff --git a/src/ray/gcs/gcs_server/test/gcs_actor_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_actor_manager_test.cc index c3b8c9f2a421..9bb274af97bd 100644 --- a/src/ray/gcs/gcs_server/test/gcs_actor_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_actor_manager_test.cc @@ -1424,8 +1424,3 @@ TEST_F(GcsActorManagerTest, TestDestroyActorWhenActorIsCreating) { } // namespace gcs } // namespace ray - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc index c14497db7eaa..48a0c0becbe9 100644 --- a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc @@ -1191,8 +1191,3 @@ TEST_F(GcsActorSchedulerTest, TestReleaseUnusedActorWorkersByGcs) { } // namespace gcs } // namespace ray - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/gcs_autoscaler_state_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_autoscaler_state_manager_test.cc index b0b31b182d77..856261b032e4 100644 --- a/src/ray/gcs/gcs_server/test/gcs_autoscaler_state_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_autoscaler_state_manager_test.cc @@ -847,8 +847,3 @@ TEST_F(GcsAutoscalerStateManagerTest, TestGcsKvManagerInternalConfig) { } // namespace gcs } // namespace ray - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/gcs_health_check_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_health_check_manager_test.cc index 1f0722a6d6d7..35fc308f28a9 100644 --- a/src/ray/gcs/gcs_server/test/gcs_health_check_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_health_check_manager_test.cc @@ -271,17 +271,3 @@ TEST_F(GcsHealthCheckManagerTest, StressTest) { io_service.stop(); t->join(); } - -int main(int argc, char **argv) { - InitShutdownRAII ray_log_shutdown_raii(ray::RayLog::StartRayLog, - ray::RayLog::ShutDownRayLog, - argv[0], - ray::RayLogLevel::INFO, - /*log_dir=*/""); - - ray::RayLog::InstallFailureSignalHandler(argv[0]); - ray::RayLog::InstallTerminateHandler(); - - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/gcs_job_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_job_manager_test.cc index 107af6752a5d..b18658dffc95 100644 --- a/src/ray/gcs/gcs_server/test/gcs_job_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_job_manager_test.cc @@ -720,8 +720,4 @@ TEST_F(GcsJobManagerTest, TestNodeFailure) { EXPECT_TRUE(WaitForCondition(condition, 2000)); } -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} } // namespace ray diff --git a/src/ray/gcs/gcs_server/test/gcs_node_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_node_manager_test.cc index a8a0157e0d54..fcc273aa4f9f 100644 --- a/src/ray/gcs/gcs_server/test/gcs_node_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_node_manager_test.cc @@ -97,8 +97,3 @@ TEST_F(GcsNodeManagerTest, TestListener) { } } // namespace ray - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc index 268096815cbe..d407332d8021 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc @@ -1011,8 +1011,3 @@ TEST_F(GcsPlacementGroupManagerTest, TestCheckCreatorJobIsDeadWhenGcsRestart) { } // namespace gcs } // namespace ray - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc index 5d3f11ed39b0..8bbcaedefc63 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc @@ -1476,8 +1476,3 @@ TEST_F(GcsPlacementGroupSchedulerTest, TestBundlesRemovedWhenNodeDead) { } } // namespace ray - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/gcs_resource_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_resource_manager_test.cc index 31fdc58530d8..cc2d3dec33a8 100644 --- a/src/ray/gcs/gcs_server/test/gcs_resource_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_resource_manager_test.cc @@ -255,8 +255,3 @@ TEST_F(GcsResourceManagerTest, TestGetDrainingNodes) { } } // namespace ray - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/gcs_worker_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_worker_manager_test.cc index 3dd5c1e720e2..37d6a67b7b0d 100644 --- a/src/ray/gcs/gcs_server/test/gcs_worker_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_worker_manager_test.cc @@ -297,8 +297,3 @@ TEST_F(GcsWorkerManagerTest, TestUpdateWorkerNumPausedThreads) { ASSERT_EQ(reply.worker_table_data(0).num_paused_threads(), num_paused_threads_delta); } } - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/in_memory_gcs_table_storage_test.cc b/src/ray/gcs/gcs_server/test/in_memory_gcs_table_storage_test.cc index 0a63277536c8..dba6ddce5922 100644 --- a/src/ray/gcs/gcs_server/test/in_memory_gcs_table_storage_test.cc +++ b/src/ray/gcs/gcs_server/test/in_memory_gcs_table_storage_test.cc @@ -35,8 +35,3 @@ TEST_F(InMemoryGcsTableStorageTest, TestGcsTableWithJobIdApi) { } } // namespace ray - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/ray/gcs/gcs_server/test/usage_stats_client_test.cc b/src/ray/gcs/gcs_server/test/usage_stats_client_test.cc index 21e660b1f1cd..9448f0000b9f 100644 --- a/src/ray/gcs/gcs_server/test/usage_stats_client_test.cc +++ b/src/ray/gcs/gcs_server/test/usage_stats_client_test.cc @@ -45,8 +45,3 @@ TEST_F(UsageStatsClientTest, TestRecordExtraUsageTag) { ASSERT_EQ(value.value(), "value2"); }); } - -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} From 559dd6735c602cb9977b09a2b04fd5a9852597ec Mon Sep 17 00:00:00 2001 From: Kai-Hsun Chen Date: Sun, 24 Nov 2024 14:25:39 -0800 Subject: [PATCH 078/107] [core][autoscaler] Health check logs are not visible in the autoscaler container's stdout (#48905) ## Why are these changes needed? * The Autoscaler container doesn't display information like `print("The Ray head is ready. Starting the autoscaler.")` in STDOUT/STDERR for some reason. To display logs to STDOUT/STDERR, we need to explicitly specify `flush` in `print()` or use the logging module. I don't know why the flush isn't triggered. The default end of `print` is `\n`, which should trigger a line-buffered flush. * Change `logging.warn` to `logging.warning` because `logging.warn` is deprecated. See [this doc](https://docs.python.org/3/library/logging.html#logging.Logger.warning) for more details. image ## Related issue number ## Checks - [ ] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :( --------- Signed-off-by: kaihsun --- python/ray/autoscaler/_private/kuberay/run_autoscaler.py | 8 ++++---- .../autoscaler/v2/instance_manager/instance_manager.py | 6 +++--- python/ray/autoscaler/v2/monitor.py | 3 ++- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/python/ray/autoscaler/_private/kuberay/run_autoscaler.py b/python/ray/autoscaler/_private/kuberay/run_autoscaler.py index 452bfef66c3e..efafac6d8f37 100644 --- a/python/ray/autoscaler/_private/kuberay/run_autoscaler.py +++ b/python/ray/autoscaler/_private/kuberay/run_autoscaler.py @@ -44,12 +44,12 @@ def run_kuberay_autoscaler(cluster_name: str, cluster_namespace: str): "--skip-version-check", ] ) - # Logging is not ready yet. Print to stdout for now. - print("The Ray head is ready. Starting the autoscaler.") + logger.info("The Ray head is ready. Starting the autoscaler.") break except subprocess.CalledProcessError: - print("The Ray head is not yet ready.") - print(f"Will check again in {BACKOFF_S} seconds.") + logger.warning( + f"The Ray head is not ready. Will check again in {BACKOFF_S} seconds." + ) time.sleep(BACKOFF_S) # The Ray head container sets up the log directory. Thus, we set up logging diff --git a/python/ray/autoscaler/v2/instance_manager/instance_manager.py b/python/ray/autoscaler/v2/instance_manager/instance_manager.py index aa926ba92747..6a1f6e207408 100644 --- a/python/ray/autoscaler/v2/instance_manager/instance_manager.py +++ b/python/ray/autoscaler/v2/instance_manager/instance_manager.py @@ -58,7 +58,7 @@ def update_instance_manager_state( """ Updates the instance manager state. - If there's a any failure, no updates would be made and the reply + If there's any failure, no updates would be made and the reply would contain the latest version of the instance manager state, and the error info. @@ -80,7 +80,7 @@ def update_instance_manager_state( f"Version mismatch: expected: {request.expected_version}, " f"actual: {version}" ) - logger.warn(err_str) + logger.warning(err_str) return self._get_update_im_state_reply( StatusCode.VERSION_MISMATCH, version, @@ -110,7 +110,7 @@ def update_instance_manager_state( err_str = ( f"Version mismatch: expected: {version}, actual: {result.version}" ) - logger.warn(err_str) + logger.warning(err_str) return self._get_update_im_state_reply( StatusCode.VERSION_MISMATCH, result.version, err_str ) diff --git a/python/ray/autoscaler/v2/monitor.py b/python/ray/autoscaler/v2/monitor.py index 8277addc0017..c9334244aa8c 100644 --- a/python/ray/autoscaler/v2/monitor.py +++ b/python/ray/autoscaler/v2/monitor.py @@ -17,6 +17,7 @@ from ray._private.event.event_logger import get_event_logger from ray._private.ray_logging import setup_component_logger from ray._private.usage.usage_lib import record_extra_usage_tag +from ray._private.worker import SCRIPT_MODE from ray._raylet import GcsClient from ray.autoscaler._private.constants import ( AUTOSCALER_METRIC_PORT, @@ -77,7 +78,7 @@ def __init__( ) self._session_name = self._get_session_name(self.gcs_client) logger.info(f"session_name: {self._session_name}") - worker.mode = 0 + worker.set_mode(SCRIPT_MODE) head_node_ip = self.gcs_address.split(":")[0] self.autoscaler = None From fe52a258417a36d0b4117df43ba6cce16bfce148 Mon Sep 17 00:00:00 2001 From: dentiny Date: Sun, 24 Nov 2024 17:29:23 -0800 Subject: [PATCH 079/107] [core] Use unified timestamp (#48894) @rynewang raised a good point at https://github.com/ray-project/ray/pull/48843#discussion_r1853173542, which points out better to use a unified timestamp instead of multiple. The risk of which is (1) system clock vs steady clock; (2) different algo/impl to get timestamp, etc. Signed-off-by: hjiang --- src/ray/gcs/gcs_server/gcs_health_check_manager.cc | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/ray/gcs/gcs_server/gcs_health_check_manager.cc b/src/ray/gcs/gcs_server/gcs_health_check_manager.cc index efa279daf56a..d6e858482185 100644 --- a/src/ray/gcs/gcs_server/gcs_health_check_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_health_check_manager.cc @@ -90,14 +90,11 @@ void GcsHealthCheckManager::HealthCheckContext::StartHealthCheck() { new (&context_) grpc::ClientContext(); response_.Clear(); - const auto now = std::chrono::system_clock::now(); - auto deadline = now + std::chrono::milliseconds(manager_->timeout_ms_); - context_.set_deadline(deadline); + const auto now = absl::Now(); + const auto deadline = now + absl::Milliseconds(manager_->timeout_ms_); + context_.set_deadline(absl::ToChronoTime(deadline)); stub_->async()->Check( - &context_, - &request_, - &response_, - [this, start = absl::FromChrono(now)](::grpc::Status status) { + &context_, &request_, &response_, [this, start = now](::grpc::Status status) { // This callback is done in gRPC's thread pool. STATS_health_check_rpc_latency_ms.Record( absl::ToInt64Milliseconds(absl::Now() - start)); From ba8674ac946c39487714e49a2a26ed155143b7dd Mon Sep 17 00:00:00 2001 From: Blocka Date: Mon, 25 Nov 2024 13:27:11 +0800 Subject: [PATCH 080/107] [Doc] Logging: Add Fluent Bit DaemonSet and Grafana Loki to Persist KubeRay operator logs (#48725) Signed-off-by: win5923 --- .../cluster/configure-manage-dashboard.md | 2 +- .../cluster/kubernetes/configs/loki.log.yaml | 46 +++++++ .../getting-started/raycluster-quick-start.md | 5 +- doc/source/cluster/kubernetes/user-guides.md | 6 +- .../cluster/kubernetes/user-guides/config.md | 2 +- .../user-guides/images/loki-logs.png | Bin 0 -> 343788 bytes ...> persist-kuberay-custom-resource-logs.md} | 4 +- .../persist-kuberay-operator-logs.md | 116 ++++++++++++++++++ .../user-guides/configure-logging.md | 12 +- .../serve/production-guide/kubernetes.md | 2 +- 10 files changed, 180 insertions(+), 15 deletions(-) create mode 100644 doc/source/cluster/kubernetes/configs/loki.log.yaml create mode 100644 doc/source/cluster/kubernetes/user-guides/images/loki-logs.png rename doc/source/cluster/kubernetes/user-guides/{logging.md => persist-kuberay-custom-resource-logs.md} (99%) create mode 100644 doc/source/cluster/kubernetes/user-guides/persist-kuberay-operator-logs.md diff --git a/doc/source/cluster/configure-manage-dashboard.md b/doc/source/cluster/configure-manage-dashboard.md index ce8eb9c9e941..885357ce38b2 100644 --- a/doc/source/cluster/configure-manage-dashboard.md +++ b/doc/source/cluster/configure-manage-dashboard.md @@ -5,7 +5,7 @@ Dashboard configurations may differ depending on how you launch Ray Clusters (e.g., local Ray Cluster v.s. KubeRay). Integrations with Prometheus and Grafana are optional for enhanced Dashboard experience. :::{note} -Ray Dashboard is only intended for interactive development and debugging because the Dashboard UI and the underlying data are not accessible after Clusters are terminated. For production monitoring and debugging, users should rely on [persisted logs](../cluster/kubernetes/user-guides/logging.md), [persisted metrics](./metrics.md), [persisted Ray states](../ray-observability/user-guides/cli-sdk.rst), and other observability tools. +Ray Dashboard is useful for interactive development and debugging because when clusters terminate, the dashboard UI and the underlying data are no longer accessible. For production monitoring and debugging, you should rely on [persisted logs](../cluster/kubernetes/user-guides/persist-kuberay-custom-resource-logs.md), [persisted metrics](./metrics.md), [persisted Ray states](../ray-observability/user-guides/cli-sdk.rst), and other observability tools. ::: ## Changing the Ray Dashboard port diff --git a/doc/source/cluster/kubernetes/configs/loki.log.yaml b/doc/source/cluster/kubernetes/configs/loki.log.yaml new file mode 100644 index 000000000000..07ab28d13344 --- /dev/null +++ b/doc/source/cluster/kubernetes/configs/loki.log.yaml @@ -0,0 +1,46 @@ +# Fluent Bit Config +config: + inputs: | + [INPUT] + Name tail + Path /var/log/containers/*.log + multiline.parser docker, cri + Tag kube.* + Mem_Buf_Limit 5MB + Skip_Long_Lines On + + filters: | + [FILTER] + Name kubernetes + Match kube.* + Merge_Log On + Keep_Log Off + K8S-Logging.Parser On + K8S-Logging.Exclude On + + outputs: | + [OUTPUT] + Name loki + Match * + Host loki-gateway + Port 80 + Labels job=fluent-bit,namespace=$kubernetes['namespace_name'],pod=$kubernetes['pod_name'],container=$kubernetes['container_name'] + Auto_Kubernetes_Labels Off + tenant_id test +--- +# Grafana Datasource Config +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + editable: true + url: http://loki-gateway.default + jsonData: + timeout: 60 + maxLines: 1000 + httpHeaderName1: "X-Scope-OrgID" + secureJsonData: + httpHeaderValue1: "test" diff --git a/doc/source/cluster/kubernetes/getting-started/raycluster-quick-start.md b/doc/source/cluster/kubernetes/getting-started/raycluster-quick-start.md index 1915081b0717..6275564a9ea7 100644 --- a/doc/source/cluster/kubernetes/getting-started/raycluster-quick-start.md +++ b/doc/source/cluster/kubernetes/getting-started/raycluster-quick-start.md @@ -35,11 +35,12 @@ kubectl get pods # kuberay-operator-7fbdbf8c89-pt8bk 1/1 Running 0 27s ``` -KubeRay offers multiple options for operator installations, such as Helm, Kustomize, and a single-namespaced operator. For further information, please refer to [the installation instructions in the KubeRay documentation](https://ray-project.github.io/kuberay/deploy/installation/). +KubeRay offers multiple options for operator installations, such as Helm, Kustomize, and a single-namespaced operator. For further information, see [the installation instructions in the KubeRay documentation](https://ray-project.github.io/kuberay/deploy/installation/). +(raycluster-deploy)= ## Step 3: Deploy a RayCluster custom resource -Once the KubeRay operator is running, we are ready to deploy a RayCluster. To do so, we create a RayCluster Custom Resource (CR) in the `default` namespace. +Once the KubeRay operator is running, you're ready to deploy a RayCluster. Create a RayCluster Custom Resource (CR) in the `default` namespace. ::::{tab-set} diff --git a/doc/source/cluster/kubernetes/user-guides.md b/doc/source/cluster/kubernetes/user-guides.md index bb8713a51822..0b2b49639949 100644 --- a/doc/source/cluster/kubernetes/user-guides.md +++ b/doc/source/cluster/kubernetes/user-guides.md @@ -15,7 +15,8 @@ user-guides/config user-guides/configuring-autoscaling user-guides/kuberay-gcs-ft user-guides/gke-gcs-bucket -user-guides/logging +user-guides/persist-kuberay-custom-resource-logs +user-guides/persist-kuberay-operator-logs user-guides/gpu user-guides/tpu user-guides/rayserve-dev-doc @@ -45,7 +46,8 @@ at the {ref}`introductory guide ` first. * {ref}`kuberay-gpu` * {ref}`kuberay-tpu` * {ref}`kuberay-gcs-ft` -* {ref}`kuberay-logging` +* {ref}`persist-kuberay-custom-resource-logs` +* {ref}`persist-kuberay-operator-logs` * {ref}`kuberay-dev-serve` * {ref}`kuberay-pod-command` * {ref}`kuberay-pod-security` diff --git a/doc/source/cluster/kubernetes/user-guides/config.md b/doc/source/cluster/kubernetes/user-guides/config.md index 5ca8df8f1c73..e4b18aaa4dea 100644 --- a/doc/source/cluster/kubernetes/user-guides/config.md +++ b/doc/source/cluster/kubernetes/user-guides/config.md @@ -126,7 +126,7 @@ Here are some of the subfields of the pod `template` to pay attention to: #### containers A Ray pod template specifies at minimum one container, namely the container that runs the Ray processes. A Ray pod template may also specify additional sidecar -containers, for purposes such as {ref}`log processing `. However, the KubeRay operator assumes that +containers, for purposes such as {ref}`log processing `. However, the KubeRay operator assumes that the first container in the containers list is the main Ray container. Therefore, make sure to specify any sidecar containers **after** the main Ray container. In other words, the Ray container should be the **first** diff --git a/doc/source/cluster/kubernetes/user-guides/images/loki-logs.png b/doc/source/cluster/kubernetes/user-guides/images/loki-logs.png new file mode 100644 index 0000000000000000000000000000000000000000..2419cf7ca8f0aab384296e24659d02fe16b8ce25 GIT binary patch literal 343788 zcmb5WWmKF?(=JSKLeKL%`KQI^;*abv-<9?t}eOiD#E_VOQO9dd<_EwgC;E{rUV0n1b~5o4?;$SzQZ7KHVOUl z%1KG`6HLV@@jmngyqSoc2nslBD@Gu~$ade_5?4v$+_yRLM%v@bf-BOk_&9Zmv&ry+? z;j#WbVuLISRmlCE&W^G=l&K}*dF%#9S+@&RLm-W=54;h)64rd$UJ|*iZPGvF@ zrlr4F*kcw#rC6(f!KO;opoOTfuc zjmhIf&(F_Of5}02t0H-;)8x408`7SYJyaqf`Nmku$aJ(iE8a8V`>;g8FSyj-88%H> zt7abz|K8TW4_Y&imS8vnIa(#4w6yeZ>HSw#BIV)_0LuQ!xss8Rr9bsKGh24yDK3WXh7};bw0L{1i;RpO7}72z zHw`aEt_+(d!EVc^lQtZV%;G1cDDF+7fs^@Pb>8!wrmjGjA$QujT=?+zB7|&?oQ+Kc zEiRFsx}V;}>`;y9O(qYjGGgOIpU(&{g?%BW&aNevE_u-B&U;{!Ye?r(^S z#waLrfL-9%QY|W>W)F2;k=?GI41b@^46dD>o@}P4Q$1oci=quHX-EJPhGd~UmM4Q^ zpX`{PBSZttTdcK+YspG8W#rkcKl)yQ^LW>wbc`bBhX#LhlVEN%H=mM_>&~T?D*04O zkmgrgTV~gjh5V$Wghn z4hjZB3Hjg!`GS5AeLw?^nH~VV^p_yj$hGYeYN7>3OI6RI_Ct~o2PmgQ=v(iSE`ov$VrLz{bhfy0E?UWYq7 zQ4H!8d&(+vkfWp6tgH%!#F42fR6He?J>_j8JmZiC<`LGKa;E!-g^fr!-di&kx<}`< zN;*YlgX6nze@?UnxODQ{2(%JdtZZ5Wqof)llU(C)s$bVp037&B!CZn`yrZa*uP&8Y z3?V6J2Pp?Lwb8FXqmc^8>b$jtfbdl1qbf^C`5nsyo~BU`ZrQS;x90a}YTY8Eh@7Ha zvMZb}s|TgK9Kqz=-W|L;ku0fXd9bN){QNiwoP}tP>O)y_Lz1)GrIs0KSxyftQ=mb$ zOs74)W8I&b{C2QqWpy{3G6#aq^z6?%TnEFl19b^yL+7xm1$mEUj8<3+sk(|yHlt!< zEoKz!UpTO9bg+aTahJHLzvy+5kaqYYA9BBW#*5xui*=rtd;=H7w<+&;lPUgc;@OB*%N-C@Hr4 zrK!01I^}Fmy~^I(i*shaZDo}JONi@sOM|%#^6eLsjiNc6v%S%P=3k~g2h4G?gEIF9 z8#Crtna2#xs%D6MR>*?U1I2LX!D`_0Cn_w(OA;`9=LKO$vSeDPAL%df`4QHa?hTsG zLvG(V5jqiBlsr3d9qE|hOCVpxv6g)@;cDF-9_Psw%}`0AjTzfmvZoCL5AGH?hD*3t zG`rxlbd^>V$yr}=_RN8UG9LBC`RWaPiT(~oE4#x>dLipm&|CaER<%f(ba+m(^nI;J zMma6#lZ}X?ELzcIT=U|w`~sP0#4S4!sV^7%g;^|V6}*!0b#p6LqG;c*tvH9h>~hgf zTbi1Rm1=h4Otytw20E@HHJaAe<3v@o<_|B#LEm-OwCvv;3O(XmahLUhSyp-v=CyAn z-bir!5X8C|;|OWj;vUZWe$(4TT|2PHB~mW~wxF#VY|pXvS6zSlNY_(inVF1kbmh3o z_Y%$&hH!c!Kv@1J95a?cRz|76sai$cB;XNt*gGmT67-C4cu#)B?V;N z!4Khe`g=?P4VTKJH=6sh*N!@)%9Xk-mUPB82gIGmRcR~kMi<|1)}8b_0k2h*(Q@f_ z1kZk4w?BFaxZF@*_%?ZO`hd^p&Hd-4S8()8Hzq-c8;YdDYo21>; z^FAwn{RO32N>A10!5hJFPxgo>ogfINefuj^0;d8izE7Mki~^1_yddKnUU#u{znjQ= z4ppg&&VgzwP$)5(V@Bx3ZoCXJcd)+4Omuq7MARq*{D<{~q6MnU@TM$@3E$pkPdG+vHHWJ7{yBti5u} zo)Q7!-n$F6T!1*n>zegCU8OaW-|_XrHMotnT_ip}IVbv+hi9;d;>3J{_SRl5&;{50 z5S4`MwW1~d8}%(X<}BoyWXhaDxlDFZGC;&oW%Dq>*2r(|sQ8y)N!q%Q<)9~xi{l>m z%dn8MgWb-}HqrMI%Qvv5Nc*U1RF13?k%Zwylvn$c=xsj=>#kTluSpUECbd?ei6kmGVqnm5hap4L?1G{Z02Jn>@BTi7g zHXv1xapogvIP%n2;&dyg2Ry%D!tdpXvtm##Tq;u_fK{o9&`wEy(1_yN6O`p24Zac$ zT~Yel+ctN)H7rGL>X+L_YE-<1AG`S*zl}b8-$B_c)smCy)gZ|Bx~GIrE1957MPy_w zZl=lDLQV5CZq~zAgu(4GCHA@Xgj*gNY1uW#?Lgc>v`}w%0MezFsQs$_l2_@l@tsz& z$Cp~-KSwEz002dBJ8F3DpB%Kcg<7O(7a%r=$9pyuLb};xfs*+HQFB&6kj>8B zYu@w(GziN2{R2%Blgtgi)8|Tn^y%cJzX7dpTnltC6xHX$(h)3a%NC~MniU`!VMB$x zv8@}-O>?4KxxOvAy9Rz7uWoWVUQQ7d(F^f?c>qcnPCiwxUzudDUEz`S<(}%ZGqt`N=}h7b?`zJ~5TUKrdZf4v z$=we5U0VFl32V%$fp6v@J-7RrE>dx}?Mi;2xyo|jY?E@ot9foxSkpFr+vCQk2up4( zR6XtB?V+Ug?T12&oTbXVf>n=gerI9Pq)>HX8$ksFSGZn|*5&b6wmd=YtF)9vzxKF# z=F3&st}A`7aUAK-8@X-dte6uJXxw`1eK~C1 z6Cu&7A$JtcqJF@%4ItGEKth|+A`CV*EE$%G?@me7OoQjRA?++1oKS0Cg3Q2{Jv`+Y zX!s#wfXt&OhDovRF(7v7Z~dc>FT@iJlr|$Ij{z2k`{9kGue(vX`HY@3DKwr8waR?o z!(6ngw~-tnHuP#KH1xIXYHgNlE!l5gs@?8U_$?Lec&;JXWZKEOsVV<>=Ai-mFP+vk zFdeXw*#zN9q>(-VU7AXd?w?P}&jvptaXwExb-a%qFROgeQ_j2$K8z;5ag6+GyNyJJ zJ25_mmKJB& z*~LNB`O?>Vu|pg!EDXgQ=Uv%Y{kxFZj8r7A-6WdNW1llW(HT>0W(eufgv7??FB_9~ z58@1+{SqpBd^9?8<-8_@rZh6+9++RP$b4%tDtpqDvQ2BbJ3>}REy=zFP)=yo-%^ru zyfb*FCHt}ri>205{waU<*J|hCmLyw6BuULwWfxN&nW@0-dkKQ1Z<{fNRks5Rdq)^~ z+4690^%&Er$s4g0S7hT6@&h4}_1C{zO#l%`)&Zd(t;7nqH+OrLgprFGbg3LphQJ|@ zV}8>eZH(PHXE=#(U4dP9GN-epEE~9*We@GQhk$4UT?7;VP{Pvw{)(pEETpv^HEm2; zpxN>{9cT0(|5sG^mL8m}4?@SkQIB6UzP4<8?hUvF(LK!MmHJ5r2445(Ar(*!a}n)M z9bt~Uab-O|{Eowu4o^61Zx%rV_S@TNjRMz`BU>FlEpC51C6Y;^JgUZIDLq+ol@iWI zTddmGnD5*QG6fA(u3bF?Y2|M>#D>&J10&e@hJ_|;;CvO8U?}-Vg zk>I(Z?rH^qE95c2lWeNQUiE|U{x*(0Um(lkMxh{Jp9>!_Pxu3#IbT6`>KCQ38hd96 z^pPrY+*gay-8*H-;kd1Rng zdN?hOU#`(R76N+2~ zl=O01-*_XI&GOMygo5vOR-!W%2e}!~j_HL6cwe{{9VxsSJ)MN_yL#w;cwEyVz?-}H zNaJp;jXYZBLy+B%x=!jZZ-8ZPLcdxZW;yZv}OWlNJ)5^w}=Wg7e z!j6LD+~^nUPin`Ef_}m3JrQ69u^xFH-UaAUNiSyUI4peg!;s|~z|76*oc zLnIvK#4E}Z@X7@IR=)ICDHnoe>LZ_?_#e+l4mGOw=-Zy}dzu~=srqYeB5gnT&%gBn zo9pc;H*RK{>GpYnAH^#xyVA(GH=&ur_O>by#gpGQF-RSJcyT}Gm`zv$;fvfqv!A=T zWtu^!5Z(D{DY&?`-oM+pb}SC&dwyzOQFB3a@%Hd~DAFn}q6K~4XZ{%xz(%B;1R%9T z>(=<%@PV3N+1YAJdpWH8uZC;jF_Us#z89 z%X_8~QC`hV7JmJ-NHqqM^>9y_wef8v)5cG!8W)NZ2of^Pa3_jb^QMVm2bt zfbV(r3WO_**-fY~<133c0h0Qj1gS^c7L(nwGKsAZGLKE%Bpq<37IrBL`rR25$o-Ms zEi8>WEuCR(Wq?xiOpIktEE1Bq>P^r|9b8zbWl4w7BhObWJ^AzVVFnQ;xZSsEc!ClT znkh-5@7K>@+Pqx~W3a&RT|AfUh4_lNO;W8w;?#?kXw)_DUetG2o*3I-9d_BXr31>>th+`x6$P|kA>szC&R@;5Bgvtujl7>fE zg^#>m7T`3TRP26ZmCc)Jto%fRZL2@oK5GqU;4`TN&Kjw*xc2)BVs*QxUgT{tTZ?~ovoUQ0`TLgKa=oB z7rr75a5#yZJMC|?*vo7t(er!ZpPYHJTCJb6Al~MlBz*d#&OdEd6+-HFdwG$UOW&=^ zNj%=D@<$C$VSM#=I!ax*79rHJd9LbH#>g~2NzmkYjs0vSJNj9=8`%5f zSUHWTRx5Zy;ROs<>s$XS2)#2DH0+Eq!I zn%9W~?Qj~FlN*}+AE?2mVfQC|tgS9*E1BvH>cyE)|Cw3;>4AWK5GhKe2`R?;qwjof zyFg|@9pEe1Bx|pIxh4_fyr~rv*n@d3O^+k7R2m+TZzt=N(yD#cyEc^1VmL5ok|4#? zge`;10!&7_ve+Vb?QvxExYD~l4UaxsXFu{iDF;61NbT@>H{FVRHz{UM6W@EXJvTek zpzluwn+)ph(wz*Plj(<_K}1!M_#~aX;ZRA{0wU7olSz$rji~l-fp6QrF-y#EH^LQW z920*igggR2kh|v?v;=0I#{P)hH&eXKEcj(k7P{DY_=TfM?}zB)9RzVdceAV6`_Y*L zV?r9C-%1VH?ua*S351B%?Q(~U8)4f$+W%uYluWKf`LW>+W-{Z`xJk^)PltnZ(q}tI zL&scq|iO7c4*Ych1M5ep4o5YT(<57;`RypO}E|) zbwm)f>|6%WOQSe0O|P&iy8yrC8DFRdU6xu5k8^1Koh5P^uN4VEwi)Ixm9Jz_(hooD zeiu?BQ48Kn$V3Ap%;TYT_e=gxh21LA_w_hGUB|L{fgDZPQM2ZnIk{_(^hJ@C-nOYi zlhTB_&1Y^oKWD?cExV7ZZ!_3yNuSYc&aRHB`kkx;bg90*^5L^Q@o|`MojlDTSm``k zvvMqN_W5?rO@bHVr2fXoG(A@l2@vUHxN^1Ybv91fgW~k@MZ{7YkpK^9v9GLrKUqh- zASE9v=~%A&z*PpISG6R;oXOlv@!`R2SIG7!}0Yu4aLis#FB(biObNDAdR$}mnpF;w1N(M{7>BhIzHeA1<445B*#V+y9IM00lP6tg+53&wqW59mQS*S zqrB*-fm`R93eU^q`o*+2l3quxmY%~m8m$fPx7+4iYgolMG}rv2(w5nF?|}n(w%S!I zH^1VFI+FPj#nPOwu%S;du1tyh77x`~aaC9)i2@T*EAd^T3A$Skgp#eYvQ%-Q~5m-Kbh|21I@J+lE_U=o(ey^DQ-YB#FE8e@>i_;fLbdW8Y3F7Dd!?f+Cs;A zm_y&rBD8_q11X_3NN6e`KbPU{NLsfhS$Q)va>?lChbUBU^1R zgfSzwsegt+gNnxVYJGidbZy-)t5^5~?{@Yb9}!!#n)%!W*%NFXPIhy|Sjj?=o;dZ- zg?ZRi$441p-D9Ry>cagk(a5tmQPdVsj!*W3y=V&6dDL<6H8#B;HPPsDp)X^{?Bkxy z*!p(zkB4C{NhqrMS^vCNm7&UI(X z!sja?Ax{S`!kooRxWbCxzy{3Bfb{Oqtv>E0Ou9N=2vsc8$?xf>D+z25%p!m3Se@fG zVu40=zsD=!XI$c%d_)$A{Q&@7S&UTM6|T>k35kCyO+Op1-Tcg~@W+aYiKQL!IEXy> zF4{8c`g)@WyGE(3I%Inr!2Y(bBo_kKO^HZ~$$FE<-&+n{W(FgHBz|+TpidCVUF-}R zAwPDlkhJqRN^)-c&8&qI=;X`p(?cHDNZM|E&rV0VA7U#;xG!mv%6)dDM6*Pxfr7kp zi$JN%tVaQ$3&&GICweE)^5K9R85QcmvB2!2yMn<)C9_vqeR1Csw6(eE$Z3oTHWBVh zoFqeaC#%fy7nS*CC*RB|eZBr7{K4&z9Zex>E(*|-$zgQ1)tS7pU5IO_UBplPL^Uxg z+HqvI9#;ZrMUgHBN0tL%>*wft=WX;d$1A5&h%wsj?uaBBYDHa#X}y`kt$8Ez1iHM| zj8eE|(}i6rvRb>5;#wD`rRaQ>5@5`uP7LWi~(Uja)R zwPCj&_gW2%(7IkOZ5v8>9KH>gp03S3)Dsfo=#cM$y2QMQa~#?M z=W}|g)2(r54P!f<{6X(^LpF0@X7+W~L>nd>(kM=1f4lC0hiZlLP&xebHVDyF{Vij- zllj0!Ay))L zYlS^|20u#f8WgQ)zxC47i@-su!D8JK61ds0az7q@f2(~87VwqUS5g&yiriQubq@U4-wM$(x7Vp8LX zxSoY&SuO;*Zc<3|$!_YEaXmpuq1p8pvbqd^+ReW7I+m{?vl~zDJwD;Yo^TjG*l2~m z;iqG*3@_||1+ZeyJ_EqH7k3=1&6@RP56T_EXMwCXCrV21^O(9L5`mX6i&+_B--Y$S zfg7u#C1D~gW{dfU2`7!%qOpo`TJLrmbPwNv>eDkkRrEI%qTV^l*o#r>3Rcu02W*`+WN5oUYzvN1WEM zN65*)IkKlfj%b<_7ez|--FSirc1oN{shh|O305x9FkzpysMeLI;y2hPn9NbQ=8~ul z3Iy_ytm-+hliZS4md9ZC3$i`*-Bf#@<)%zvh3Y+!K3f&Vg;)A5MLD-#r{uK@g%_ll z&dv%=9dbAdFcPooJ^=1WPuff&W*1Inx7Wxu<#245@;1+P=%Ftzogc=Qru$u*ceDIH zw+dDcntlHLovn%<^n;B+1eVuPORcL`sQ8&h`9h^L+ysHcB+K|&+G62re4M}AjWqT* zIEf|mTX9f}-DDM2J*IrOt@|l1%Nq$?*J;*phwYMg))g>!XttDHj>w2yj8E zyO)O3r$lfHYtQ2jtp*CH5A=LF%94&SQ^Inm()oSSh->CV%>NY8Cz{Jvo zAtTeHrk#d_AX8A3JDlY_a!kQ`1X5DM@VJmym}d5S!Bdjoyo=R z&Q)!`t`g>9^R>g8`>QBA3q8%cF{wq26yMMfN4jlSQzPc33OKEytIuqN3T1A8P`)aA zZKzvlGFRJBaNm74yYl5gy8)@gohCP$fC+N5pYX%)Uv)BN1<{?q1Xq%Ub&1q7rz(h< z6d(LuC>}OCb~9+6XA$|H%B?W2^<$tng_ zyL|-Xs&{sfs&gb(lb5m9{1BuI8D8S7N8RT6nk)2y&wuKvR=W_ZtipRWys=n{fMX9A z22ku}K0V&L$=Ca-gz!s8?ZH0x=JzZYmYICGB(WrJc}s`%w!dPL>)Nk1IPIsQ8QT#L z!$P)6X7P>-p$r{J-Q~H1`a$+HrVKdmveHqS^SikoY++`@Ac?-gvfoUG1MTB?AG=p#V46{FRLMCm5XeZ zDFP&XdACz7xtir#B{SB&W2D+okA49fY|Yg7dr$Cob!41~N%;~fm>FwGdB&*-{&?G) z9@KKyFglu8U$o(5xe(!(3}yJlid&s`57YK%VEC${=`v>62HeF%Snmdiy&vlYk3EiF zBZ_hLeBV@ecyy*%%BaJLw@Ig{DaWB)o#e>tWt1bpVHQc(C*x~MUDb~;@F?|suJ_4h3?Il&imD2wtg6f60Bsp z{vuT=F03Dw*A6T$O4_kyv%!@#Tz1$I93N&yaXOI#Kev}x%&7jXpf5h$m5LHE z5I~RAOmp^$*(_y#>4Ka>?$YzIUk2KDj_kzT=*jUq!*hJph zRBp6_uus!g)aVcq^{?P1dA4^bJVCqyr#7NH73qb{{;6}Qo2xWXBAD?6{;;+8@ZFmF z%vw-oA&y#ybqKfmw$Ugx5j8E|hLksj|KsA4**b-!&D)ZImW1V9`p$k&hRaq;k2>w zqH1lMLE}+`l=Rk%^qPdtSf4%SA2!+aCF>t*30zcG+s$NMh32ZE-52x=|5fBpII zi0`lOxc`sTv$QsUf7p327|Jh6wze*txzI^{e1vUkYC2FBb8fd>X$#^b!-P_96puFD z)(2A`NUlA7nM==!HMO)3mBk{Xc|k1!lKk2Oe5DGCMlsp(715l;P7UjxzS+g0`>Xkh zdLQk75mHgsd=wJukdl=eFDwBK44@nzlRkdY!Ppw1VPeA3)YSBpSF0NXFwOZjG_D z)(iS7so(d=fbk_QnxdZMvkf3d`%cZcz-4g9|;dqob0Q0~~j$NvjI zPbnUn{rGi$zn`yX2LLchYW1?AETur7e-rz3I@82hoq(4Mf9gmIiA+=glA#ry5(O+p zqwasx2q&&eCy)RE{{C!E_tuZ6n~mG1dW6Y$)-$iEX=hcx^u8i} zQ>rkF>lOX+mTRmy-bFnq*A7|0fs`)3_6j!#4En$39k@(QjZOU(&6&W3&*y+^D$7Y= zI@;rpaHFps7|VDf!0DYJBtgk- z%lX`VE8bHD0YK#3K7o95Cq8HYPlEA3O1D$(>geDlK~|YWN9u+C7O2qu((&}!u2`?4 z!ZsAjo})1Z3=NEOn5Cv&h!n1ntT~>sx{?annYY9R^W320kGCK&+pUJXi!wm8(7Kwc zjo7^B(L&##eODfJ$dDbG+kGwR9FZ1<(j#f8A1xK=4z~{)TgwreYc=u<T{oP%6fAE!gpS-tn{FT>1H{T|-*(&ZEFAXyy}tfF>|ERCB?8!ye*(lA2=-|R96 zV1A>vUc@ugkX%$u;k!nxv+;c;8OL6aYa@&(2b_=1WovVz{-ni6PDO!507Rl;56I1Z z%MKvqyZD=0^0#~mKScrnWPX;hEjk?#BNv$h;)x*;T7MgiXp_Q}#p@k;5`X@9*9_bh2b1R3}YtR7=lOJkluQ>Qt-Nx8OFU@wdFRjBu!9%Wc_js25XN`dERfuTC%F`;K4) z8++Mf=1s~t(B2f`?(%k(9yw6#TreK+z;XGPZ{p>#{@{W7_|Tice-I2Cyn@Nvj$v&Q7>3 z^M0;J&K3 zv=9KIBr%y;h)CR8)MY+MdJ=d@lBLAM5DqLi@t~e7OG2fo+E`y96l6%ixB$REb;SeR zoq(#o%Lw_}OiVPk2f#t9JK!RIq6}C+JVy7jvRR?1D~Cay7o&;Szo_7Z{`*&#goRISmya8WsK#8c&6Hwd%}}jop0?QX zOkr)0Jw4QQN|P$8Mn@E%bbz#lxn12T9W;DN))*{i(yw8E@8XX&O-p$wQ##@H~1}9&PB&HYaRwcm+tKV9glg!F?mnu*i{DB39-OiBt&iIn|FiTr0wLvle_U3T~mI z@w^oOoPLt)En5ED zSLX~u{Zq5;(^sFeb!py(*5i!mS5HU=!+U3z+TeqDpHs}?6h0X;;ICve^bM{T467)j z9lEDSVQX2K-?rW`P~sZwjqEW>;a2(WXw%1I`LgFTw>&Sa!{!?l0p}9|vHIceUoH&Q zxx?TxP_^lZJasZMwc5x)b|dBWXXN0ee8DX1DHtN-yhFf9Vk?eb8GkVGiBJk_F)E}Q z!-}hq9OTDwJTZGWo`ZzB?ML9CHD?s!i}$}a@MF4ZY&h5yf&qZW2Nle=2iH)ZwfSbn z-m2eEOa9c9Z)#vimqjyEoCPn6uCZ6iV_Bapc}b-B?t{DD?mDpZYS|TQ9h_O$#AGBW zmHK?0o$=hi{{p3#BO)MeF7d{*kD&EuJXPl%(fspNOJpGQpht)6sI?kxfHiK(CErY( zhkeVm>p#^?lTU4I6_C^adi+?>4<-CIek}MBt&a2x)bf?(-}ao-W>%irc5H7mGA(h&GF_xRG#cXQ!kgCE#I(9Te4f;1pU^ZSU;-$y6d(NL8@H*0z7NqLU0!rT71H zNs&STfaoI1!0)n9-l0G7=6^@5iNz%}wo;q|3IT>pjf~*5w7fK=J^B9o0W3u-MGcM6 zwl?jp-Q7bb6RWsCp_mQ}e*Zu5aSl+b!e)rm*6B}&hRWv`UpOc~-r8r7o$X&SwpgU9 zYJ~&f&nJS_M7rJ2s0Ec7k8X?u@=O;u% zXes1&cLQ(z;NZtZF7VK4&es>ysm+`bPnB?t#Oylr`pPnW+|01XE_E!^49$>eC`md0Z$`ww0Q=A?$Kirxk zcQxyaO~02-&4Zm_2*lc_#1VQ(A3iqQJm9wH zdzbrvHRu7lm?E){ERZ=hsQX()O6%{Q?RgD3)*bK>6w|qFI_?no$Zt}K^i}7AsPbHh z#3Wz7claRckE4mkx_@}Z%gcMOlxVfT8y&_iCn0ak%&aXnp@rsoa}e&ZNlSjXA@1(s zVKClBAtf&q9uDUCk3z-LqzhQ^`NL`%oRGCv?>yL@f;t3woOuxW{T9)^yu2=trt}bY z^4c5v@4VXUITNz&YG_(Dg|1W^%%Oz088sXa z5B1CH8iLBgKdO&AG8vExy}y)zsmG&M=73Z%Qiv^pKU&G?%+oz1^hZ)vNtRIji zG#hYT&kj>3xu_SD*9|1#@loKYM{`C9v)+m*ILVi(+U`YHyTe=Bj&m|A_hQ$9`kEX9 zflR1raY1vw0oC`(vj5Qml~w9P_BLgBp*x6ZZy$-eOo?K+RX%%PYpgX*8q=166(<>IhM z3p1up6n)|}`P|39)XUr1k#Y7ny}|D1$gDVmaj;Mll=n|69HXY7SP!&}x3ExXxBvCN zOA`l*-zdh9V|L4`t8>pU@{`jR9CT6_4_`qsfpy$(u1_bwn{(5@xN~AK`>yjixih^! zXFinSAVo(XVH0~ewZC_cyYAL!Sz2D2psC@Pr_Evn@T&_IT^;g&^a-_EY#^zr!>|TI z2AMNke`(g5-um3wm1rNG8Ym*$;^YXuxjdLb^0>P@=*b|C#vT@My5<jvtp&bZ}!#|~#VRFrZo;|~odP2X@0UNH}RO|UY?)=!WXMR8P1HkKT`H*UwC2Ot8< z%YM$T($ zL2aEwUPGhz zOTc`abk~x1z}Q^L={oO^H4P2GyNfwf*?ac~!;Qe=(+iuhEhjV8%;)tU$r20~>~~k! zXWhIs&p8iJ$7cZ!NYT>-!C{>-Wc1nLu zOE|#pm?>L>YDM{;6*&Zhtc|uu+28BL=d{9;809@})@PMyb!DntiPDb+37&~p1o!)I z)E{U!Cuv{FX-XuO?;8#J_FoT?KRABa&ud8(@vj3u#fv1B%V=t*TD~tYYXAeGiTH#I zIoIr|RqENMt&)f=fo<$sL9OycTg&(`7@dhfQew0ZAjRK~L4U@tB4PS*H~o@^5O1JD zR!)X#@b~5QwYZ4LtI=#0P4-XsGsFV!?K40*IkZ%H(< zjE9${;aAC0KuMaVYc9~{V*}39Ho6o`BLd81vm>D)Sfv6MR_yI1K(wLw4^fDo1}4JB zrEdTL&n$El>w18(WtvZZ_(_0$jO2T(U)~4d#T0jU_Y%DyG?~oLiOH4G3GPyoUp>=S z49T|*Gcz+BZu=>;+rJip%gSx0%VqDZ$RNZdT0gClj}P!vpoLqeucKpgdWiusH8sDX zJy9MW9&-YS|J^Ok!0_;wnSsyo!{W_|rf*;g2kNy-ly$Hc|9xlZL2mq?zR1+WGYk09OW9BW-3M0A0JBzFu zDDL6xph~vS#PQw-lXvw%TTXKdhM|mzk2{U4X$Ads5eQz6_1CPW6_qPe{I`vX;sY5z1{I2+aQ+o`974f)Vj==zCufn zJ%*kdU%f@3j3%B@VW0!LVZvG#I)yY{6~pUs15wiwV{<})XgciF@Q`f$VZx+(k-e#@ z=}cgVWXWQD7z#9&$snxcW(v`teH2gOcOt%*bOv=f_yGk`kGq?Y<3Ac43WXBRBaq(b z(d=tgjuN;GgJ{Ov>lHmp2HWO}JE~Ka2;(>TfF?`idzj<$UwW<|idS zOe1a@$^2BD)$xppm;m?3CAi?jrNzb48?WXCRtN;wNkh9y)3LY8Y-+NVYSe?lp2x#9 zuSVwQd+rmySIgNR#z9fws$Wq$oJ@Mx)h(G`bBr!p+05RFj2G=i=g;;d`b| z2h=!mmp$!24sJoZej_%mR{f$9k! zpJLuee#9Q>2Q#O%ZrK{yH>OGDZ18||!hwVY3`DcBtnQ8{zrAu7(a&IXVpjMw6gqoy z&+L`Y|FNUScho=Wp-GI0j5PaOU{|*BmsvHvxHuI0@5=NSU%%>qi~%S4b=rm+%;2C{ zzoxI%!dr-|d5%@g9a@s4Nd4)8RmF6)lUl2~nj&e4ftQ#@B4CaW4sq$~E0djR*Hld< z8m{{bE4M36f&PHd2zHhy zGvQG_-tNRIAb;|y?8rWXi16F6;K^oBHahunuK|RAJIHqVQ9s@lpC2PzeNoz2nwTUr zCROp#O@{=E41k*tz-0Iw@J`pDjHCMc`YBLR$2(kDP&ApSt)MYY4Hq{M#L44TQMo0XRvY+xpxZ&_{!rhr9 zLYLH|$-WO&q`zW0w5lpr@}a{DV1t zqJoLGu|=cOvnrUnUsH7l5s zem?j8kk5w|&T;2|&3?=*lMhHpKlj!l)1|)+n7omMo@G3!I9BL-9hHg3%UlmYzeT>z zb)Acod5cVI3w|Hl=DUW&j?ML-ayopC^L}*`gobWNlPV4~A-(EhJ+Ah6!AD5R z_(5*eN=F<2)P%KTe*3#tr5*j;=QuG3bjj&yQ}2xb3}fIiMAmXo-xIlDo+KQVSuGkO z)2j(pj|0jP(RUnVufxsWJd`sq)65IfPO+M+-ZF4{!;7hGJk%D>eOUKC$)Z4|x1-C=?v_jrbrCC@ABpR7;%Kt`BKLteDtjWV4q3s7PH{Va$ z7(^3;Z&Bz#FY)GWUi0&l{dP$lB`S<(l9-~x^Eo z2!8XzA_^h**6wax$*|5Zzk_GCW*T{BE3E+gn3Sr_RChN~Z7m;f_CT?KiS1qim#+Xq zqI&UGb!JXC;@1&eB&<=Su%l|+6H=Og6uK2cfr9=k$D7vqjvNKaEi~MCfxROPX-l96 z4PXu;pzh5H{=y(TYXkiaNxYWoPAyDqM>UvvTDea=m2wvb?<9AoX9_ zFOCbK>LFRi=C69SNlUp4PMe-Jbv1d%!Ei^*IpeqVbItcH>4@t+(AXk1Ueh4=+Zxvw zAM<{ndz;lLP_Vptld+2D??w(0TQ-aOzMlC!dZMuq?$QtPM*biTydkDyZw+_S(fMoJ zz@nMT!7DV>&Gx!Ca#pisUosAB%n9DxTRM76Z{Z;m<;DBo4ebxWsV5VciR?HbSwrDs z0a;*VF3Dniv0kVm0jkP@z#?oOzPy1e;+6lKq<>&CV18Vg7m5eY68*IzT=}f$`AqLI zucrJfS8&EDHzgEEb) zkTrHj#@sdsPZ${yt^ZOmLlpG&saU{8yq(1#jf~a&ePmxV6dsxXqGD8Twz51qv7hKo z`JYaj%N}7dXCE@BJtYoEguHcb9i$B4+%2kQJqGX8fQA%2-AY6Z&OPXLx-OZn5< z=-;^C-;;w=0HDj(3Fj5au32?Lq15RT7VU2zqg9M*rFCWf5hQ`Qn*SXWe`d|C>o>PX z(qL%UoW0bma^-GYa>3^r*dy+YuH zm-oJar2fCFh;Ix`3EDbpmW9AYA4L(QMT=%VW@7vTU*V*Ws* z$z_3tj*gzhACp@h9n!7T4!WrP&uKI-_*`x6SN>N%P`~?AO%5_Lvd2J*O~=nQprjUj z{4f?ulfE1It6BDHY1qbClYx;+YeLl6;I*bB77$p2S***&I2ZZ<|IatnQ9kf4Y7voU zY0Zj1S51(b2^6Y)tBR%U3;jOZEWZ?&I%o|az5zDOz$w&8FN6WlkA#xUGd$MRBs!;0 zF9T#<>J`HVVO8LZiET}q3M9q#5b)sT<8N(Ud4%gXrMfLeYWyrJ+|`gBgYFBA9(ar zBHODI8m{r^!a{6I%e~bymTDxJnG5zx@WwIgc;q%#ld1lFz)E!Fm|ZwL@+Jrw3F%1OiUJb;OCWjH!-&^peUjX0{g>*NO}g0z@i^QeEwo97+4-^k*M?V z7@C;uDx+OgyZG#IDw|f@bZu-sbLt-)Ec$Cs-Azn%W@)tF2o!cI41L*te3>!SHqwg` zfYX^tA}T(~$wC|)qvv^Ls+$@dl5(Mm-IN|LP|$HY zW5GN-%xGc(68f!;-kmWG+8y29+l6v|e8sAAPCxhj&6KSrmV|lI6b|pqw}1qm>qKdJ z;e1wAr#0)OA0;I?f7E-M+F0-Y2k;awbYWp(-r2`8eNvQAiUSHM!I-ZkV>YTj{U^Kv z0@Q3JgouONMhS?SZc`FAm{J7TLg(K7XM$pa1_aP4DGwZS7%dOr8tjxj+?tZ~PuY4- z`UDAo#v2n+%&M3o8l2!kN$Xe>xIgmq37HFS_BWl5gX!mxrk^E0Yb?|#E2e94>CUrf zUr!ATuydGW-FbF{+05FouqcHw7j^CT*V#@^8Rv#9DI4EZ?_-S7F;I#An!=_2{(ZP7qT+M5U>S~PTtRtVwnFmv=6NT|8_)jc7H~up z#HPj9gHowlrCFL%<~OX@Zf$uv&41L=3LApf$E=zh5)x9kaI(w#=gteZGRKu)iQ6No z;^8ziI}7{u+9Ya7Zw|pc?Y)fKm}M3r%rOQU9m$Y9#6$s7$YhtACK&lF-jF1WvgQbK z|Giu&DjQf?1l^2j9e48NSLjoR5?!X>uaj)$53w%M-_$u#SMARhW*1l`wzpO#?J9F4 zW(CPq?qMba@VYC7c>awSF{VdzMTNW`v={)&*`4fgsHmC(>yy_JaQGS`cYCOqF#wO& zGh(#*Qx}}*WF#R_NdEA_^$no9=NlQ`0JXs*K#+RDOx+6a=y+Udxo-~yfW2xEnpaVa zAtgTvk4VlLDyI1p1$Oq;N}iS_?v&b1atW`(5$4xj^rHQA)z{H5E;U|ErHH>DZnc|yaduu`*>>jI#$w|&P%(MTVr;Yu#Etp2wdK364XCRo67zk*5)^7E@fc#^ zT7aoOu}&63H|UlX{qW(ReMxjx%2e#sbWI!BA8ITL658&1FPDMDOs4fWF(lWZ9p zqU;XqhUr<1A1TIjJH40}5@a)5#i}fZxZ{3mejHiA!m7a9(Lp{;iBHUL_{Mm!hH#rC z&m7qiyj{*_z8)gQOM(vEAj+n4lRcx8RrT?IO2lI7w-ToT#M*!>Nm5a2!Q)3GFoRuy zDu|s9O-j#o#g9SV$X3cSh0mZwSX$dZYxK%RA4 z&GHKgR(2<|$$5CPT7kRLfgJ;I%Nh1reRtA|?Qjq|Ue(zZn~JSY28CvKEBz^=RBE^t zXB^jjtbNrDmz__03DkjVrKC9o0|p8b_0Mw2R8xI#E1jgeiW=^Ne$Cc(u#!#XX=QL* z&Hge7uuT$5{5F!&owwQO7W87)fI{_jgYR&ry!f-kB-KzQxr{vL{I1 zqS(&eJz=IhJTA^xi@s<63Kjj>P%I;=FyE171 z7#*A3*9XNvH*|AhURyQ$%4{FS%Qk;<$L2h^O1_uv{M0J1ic5%DPGHI~Pnt>$4Ji1n zp;b)Y4JwVXn0lMci1+s%iz8l|0Nz$4*Fi7Snr&|DpLQ?zO{!yg$o4A?0kK~!f6|MCX-X?wECF1y zB7?_{jrax`Xul*~-|m$qP`#bT>7BcHt!q3Rkft>rLOfH<^Lp0xwFo^9{k0M3-1R_x z7Wb&CJA_>=TCZw+|@lTqv^7FvsKaP_6t8`o>9!XC+cu#EKPX4Z?PE z6u3CsPdQ)#!XFLk10O-JMJHZ2w?Q3{Iy>n%IjInp=RMDret$xv%KC6$cVy>ODsrXG z(KhR(vLToySdgbVo0$68b@kb*?FWwxE}EHCm!--Fz%mUC$*_Y3_mv`n!?Dd+POqB} z^Imh>)3cu~!dza6p-GxuG=c|CPfo-Ga9*;1_8_m9E^F-gKK`8WD>k|lN>6v{KWzm56qu?cP>1SVcJr!~s}{z_P^{*v+CvOzVWUNSapn38a0!ow zlqWdWg_AT$21?OEvmqTZ*p}0C1Oh>v2YCSRtK*uVAci-ww~2rf0cpG#I3`SWnO?4zi=6?>~8#LwM( z0z|1c)Us5}KYTDDG!1!j==*~CvngdPi2a@w7d9moYQfiZZ-4RV{BzGP&2F5V$zaN) z@GJk^W@-KH(1r zDg?h{xSip0vjLYyQnlgfzJ-nM2vtfrjV*(Z7?9D(#bT&M^MJMb6}nS+KQ-pST#)CyxMn-D4QdQk4-D`jE~dHgA-ZJRfvm<;*E}ujysCKLD38~a!$3i&iA%NGx_I6!dXxGAp2tg?7qh=RF`MMy|Z?v|m%p zw^Za}LSkt}7T^nR0f~k^GGN~yOg@hJ8m_9=PbZjF!?e%h(&EmV3G35>fnLz z76n{)_r3LJycb0s;gk%C@Z3a%@mmsErF46s?(S~E4hDlYxGem^l~;$2_c*#<8+@K; z#~#xq$0kX<_yGk4L6<|`OKk@W0wrc+XiSjZCD-Fa?{F<`&#Tb%k!Fyk?m z$lV!4@P0Mw@&IX50_lDcBlH&P&ZrJ4h-Hu57xeheIa_5`?1k}0(_nCr1+ZgN`k`U_ z<>b&c-CTnz!NsgP!HjNhHTo@hw~K_J1)LdcWeAd3`U7cdxlJ?jvj@(A$6V~DmO$AA zg2}^$URdXfYJPrxxOnLS*FA8jRB&sCopLuvtx@g8d^deBK)&}t%qMTL#~IiSOromH@exlx=PNupKM_Z)?jhZVdKrx_%T&ArP3dfKdibh*1B5k%gsU0y_b!yW)@@I8>0rV`HZhF9ghd*}C# zPjE12h1r7cOBrkH175G2_?=y?GNj>>@a2|{l?S+O*M>>L{)EKIv$n1lwx*4^7ddRq zs(N>w`8*3ODf8;#Z!=|^RV-pXJxF-7L@5z zcQ-<6-lyMZ??d+m^4uismkzxyQxSs9>fZUg+y$K*{c$5@MqisgeB6v2T1~u%N(s~kQfS8T|`{!d&x)?BZ$jQMh!Q8r&1G)KQU^HwY@!0_r3Eh%It-v76wot9#eAZI>2z^ z8LGkVkBezI(n<}!HGs9g5$o)k=&bDEINw>d+q=AOo{rRbu+*ctmCa)jfX|PU;G^kq z(A{=*p?5H@xTTT)C0_r|%7vw^t$tH~^75gnY8@kcz3m6NXex5Q=wC8MId#^SEM#$<4xW(uGDJAJu{p48OC zc-#7_(AsDCwEGCEErB~{c%#+<=&7PrkhhConviuh8GFV&gbqf>Ko$jsa?#vQ-`WhQ zGkgMhOp+5mVGN2!kBNL35Y~~qy|s0_BR8tir0ko}?jNc`gT`(Bxp-DO3YlrP8y=Zq z({xN>oqSH`6x#gR){1i$s8;x);dC{FA?W=}w;HI)nd9lv<e8S0xrx@`SFt>5 z!m%wq4|KG(Rn(FnPh6kB%tUx^N8X!LwzMrT>*e?c5Jo&W-3Ix4=^EWDD=DduBVnGO9ccb+G!$~xOM#mIb~f0k%*qdyhNbEgoKL`XtH zGP^W@vp^lrRyaM8a;?Ed*Kmf@ z?@?knEN@4r#D?Qd}@IXX`-bozMPek`}k3#_S^5R z>Gyz6@CHk&pii_FEgcUsKTO7vv2n9vo$||U)#=tzq&?RNKeh^=GQPHNhOsmD>h74 z5O$rFW&)w90VF)%h^yXFw0l3Gg+qO$*8D1h+k$&D@DEbHe-lIEdFW2E|M9PQYlfyg zIl`8>cn4XDal=9?B#j_c6r!y6e-u2Mg~(eVIn1AU%E6*$E|Pun+WA|Wl7Laj1LRvz zg>6*%Lm8*afhyKoezDpYWaz-wG}R;OjIJ#Q>B!Tvvl+VPL41ONQ~n8V zr#?7`Upj%M>&bS&QHQd3Co7rvdr?tF!u8{`2JX{a#N5IA#9@TS?ZBq%#cW+dCI=efv3vE=E>;TZ{Ymve)#?Y`!oWAik&|vcn7d(TNs9aMhV4Q>qq}X$M+b ze8g0%N*lNth20eP+sSNLy7KR@1=JuTwj!_dG2E=PY$lWD`(ZY6y;58g@XVyVMnpp_ zt&e`*lSn7_r>UM*kM2L8c=M(zo#I{%6ePrK1(c;@nT}aKKk>O=*sy`60SUoi4-zt- zn7yD0mAw&aTZfBJK5qK*YS}%Fk@?9eN%$Erm4U=!i)o$gYy1!L*0*Q$f8yVPW8Bt0 zoV1bDLqV&(+NowyEwWH^gwukT*d7B*dWnu2?U2ed!KO@x`dgE$MAzM9 z@BAHeVC5n#SUQSo`moQjyIm}0e=>uHVcLhRhs#uxAZ!O#bp(6#KY;dbRuDZn_jFf9aOkqSC2A>y#ZQd%BHhd{`k~@xOK`=he*2 zj~+y?tQQjft>UcaA|467dxv4QjkO$A)pkwYQ9F%YJ{c`l{=^9bi`#z2<@sFS^o@^-=&KHk=^3iiwXt;=&=wEZTF5p3*I2oj3*YVghi& ztc4SKjyz4SLAp*o4lkg7kXn<9pi)oDerYTLWxQf0Iom8RQMN&|6C)P`E$ zqhC4On(B_T!kWFm?wkn=Hmt`zR#plX5Mfm~cVQ6W4Idi$-7^|WC@CqqcWs3(ZxNmYBc8?nAM>Pv=MTk(}_I*%xBeXXn!wtBmj^+8_EzpyqplGhb zVon|`fJaH0Q`g_Fp~2041)J5x zQ3~7d@Hy4p;xc_MAy@aZ`aA-qj&JYWVw_HSOz$Np@dZ4^M-fur!ehka{8ENqUQ*(r z{b-;!4fwU{%-ek?FOZpRpil^w1?yue9(nwxqxWb8RWEV`s9mTdl^)=V2nBpb`YkkH z@Y++OzcQcx0FQ?Vw9P-XNL;oc&!IUpWp@aJ(wMWAIKs8gBUS-Wm?h(}O>ijvE)q@kX%l&F_e%dH} zyBemY4BYntcU;xz)RMuS9`{#RC^p0}e(2)W95)2RPeB;@0KI~z-wR^%$>)u7^L>-p zc^KUWjnT_d)`j29WU5`ieNen}!8k(K!(+rEUay_^ZU`3_gF;CJ&!P%Xcj zygN%yxbM$)suOO|kEUjLnnq8V&yH*Iy6>?$?_H|S zEy^(%Xx`}>-ku&bD9oiePUPfdF)Wy5V*z4U-m5f2>OS1QOu?ULarC#Uh`kZX1qBY= ztjr?v{&MRk>={kXOQ&60t|2MuWPlSMO$nodiCs~N`R7mN)3Sh@zFEzAeFus~mGbn2 zj9K48a47jQZwKfM04ce2c~N|a=to)Md+Cx6a=|pKA-(-3+&s)z1eq}{PHyA42lC0^ zqB4{fp)VYI4Ema`uPZv@z(oaYY4oRtB#imSI}I8-O-*+U;b!yTzbk?1*Yc**yneGp@!ZEW& z?r&ZqD5w1ZOa0M-041SL`K{BgF;JarzYr-<6e;pT*m!p@xGdxp^%ECfSywtQHO6!5 z^WPy4=;8(K5~MkzE$W?G`&eJqNaVdI=!g#fpIf(%2%jQPx+hk=jP-kIfu&jG3e}8A zB_p}=Z-oyn+Y=fmmA|~pUke%Qui&Y7cpcMDzH$9^cs9q**7I;C&m_X>_>sJcN4hc{ z&zsv4VlYfJ?TnG{zoyb%K|^xQAl%74G3y)>4K~_gL(LF#hqk-Uekzh)F>8-UjU~_S zZ-z7B&?&$Qi@>RQb#a;y6yW}_{%j+}UCx_Wj2uCHFlF}oAP_TPcY-rB+l2bolK^iT z{g>Odd;fH33C4xN`(@1n%{qe4D?y!#O(Sx0gTS$@mjWO39jm#I&Jn&D%;6sHj7>L= z)C@zQwB3mR_7lRu#ObPah>f_n7`)SHLr5D(M%NTrji&xOi*q?y|49JpRsC+b=Vsb| zyjDtRJo8bMOKL8xBkFja zSR1Jr5~qgJJ>>3HE5zg*d~-Th!PdTyZAu zg=DcRY~N2N6=6w)5@(e0sux{6O@8|qid9^x% zq{WcL(*(ezHN^`$&DtcI@&I_AC`Q@-?mHkh0yYr+m_;rw&)erm;FGUTzs<;1j)RJx zJT2?$9SA!9?a_a{LX=<1QtEic^8cBGO~UY;vv6%r-cl4x#L9DjS- z^Oa1HMy6cqiKmWLghy}q`B*80tj74>M`6+RS--mHSJtV!z+g|VeC<-RSubAR*6JDu zg%(D<6dHnxil3R_Ml^+Lo(zdO_l#v}{87(t8WVgst6V8bwfW=NF+Dv+V}w<@iw>})UY(?_03JHZsL z`LcI&Fq)+2xxE}U!t`p&azk=rV*og-703j#o`Rx(n7>AJ4!MuBFuR_&YrT<5y4n5X zd=@+vm^g}J8exj@>=!Q_PvPQw2ul2%7d63vYp}-slEj>5F5nf+!`5!WFsAqN!SB!= z{oGD03s-&~bj*`Vu?9?X-w|(>8r`8<^lV4$$AjHp;7JupW!k#jkmhFvegCGQzL_r7AP&1pC|2wsl^CYDT{Vakwy zP_x{Xi5j@2*!GgmE3`ApZ{93kiZ%~@eSKBUG!4+Q?dCau&{u}-mccD^&0c)brzm43 z?@j?XwTDJ^{_6Qk<-nd1Kv^)MLct*q{_5v#2h~LW_*nzd;MKO8cr^Rk?8n@lh-dAt z3UouKp9SQB6=3N%G9m}=?R2%gK^W}u8^nL;7mnv}W_urU`n25EeKYQIdO9o{LL^%Q zA#Xd=qqSeP}hHnS8d?I69gaJb#3Ocq4#=w2E>rzfaebB zBK(bv$s}}Qpjp{j!hLY1{gQQ#^1tVo49{JQqW8VMl5`~sqztFJpwdQb+uFia~$?x0hg}Kw62Ls9=pzu zeDz{d(jU^LRi$c>2Y_v9huLVYbo>YoDf|y{NXiKKH=jS(?u=m!zU4$4NaRtU>Khvy zJB$5eb>*1_T+nsGoF9yT$B(S3%$Tba0d4N4dpjQA)F8ABRX{DX8XTYvxweIcmy-in zjYxtRHQUMuW@ctRgM*`5eO(55D^TR;zsEd2K9?E`3(L{jnSUi$@jEV{6&q@e?TTlX zNE45)g2`XWLlAV3oQ#Id@@O>BMGn(;5{NR@@ckEe`T@JwRj3gv4Coc47=UXzdkt{K zbo8k*N@GbB0{KAh)L^@T9Ka6NLxA$#BWUbe|+cXfBDWC!a2(4s%-Q{ z|LgJrFt5MRNyF?va&D^cSF-f%%KC-Z+kCh({l z_E)5nu>1#=;(`P&v6YGZYn2&KPtS<>@H7Qgg-^C@gUK?JISh#5bUIrO_J@q>g~M8X zYW&Z}=Jm4qkjgRtCypR6cxsH9C7v%MLnvkE@G&DJV?zTYE|`LOcoR=iS=qG`4}c(g zhK8oJiYA6z;c01lZ_LNCGnql4+k^+kZu{B)r%fX)sS^#9ad`C2#39d1>&G}bS`$E z)=rGfm|A6th|~eHE*)AK^6$=nDh}#TPa^DB9s$&Bk*jHGD}2oR7_Fcp;=NUQLBTKr z0(^}LbwHQ3vr-Gq_iU#iEA286@`4PwHl^AdTJ#88qFXkwtVp2u?O_%wdZ1&K_Zhgx z{#7`gzx>e8>Hh;r{z<+XD^;fJNd7UF(`Vd}!a@(k2!Be4&Zb!dZ6N(h*9!ns5J3J^ z9mz-7I@u`$scw7G4{{#!kO>(7XF&qanW9c!AD3y+Q{xx9T+zuub~Qd1r#jJ8`W@Sb zTHk;)T6Xv=WB*B(cLRUI8(hWzPm)!{6%#9{9H7A?Bg`6TUYqMhTG4M9KYOMC+pfR! zEH+23blXfg0!DIGFSS`3nbYn{f9LR%v> z0J?u-zw7I$8kOnA#E>UNrzT;|kG3=d5aj!Woh5C6Zb-0ZIP6cT!G8~(Ls%rhNVLNF z05Tdd`QlZLHt7AX$m21XjfP&Uuw$44Nxx5p?^l%@zyDwgp+NsP$S)tl}82;kKIZ(m42+ zV+paaF~{W65^DSTHp8}X#q|ZvQv+W4#e*URZagPJ^X^=I;=uZI=TqWPo3&fHFb8Fq zrO$|l1R-axo+X83A^A@4i*vgaSZ(PMa^>ZR26na7>>X&pM>~5NT5@BlWp)ne0x3 z-pyPkll?)#-p%aR_?Dn0$zh_=$)T^!dgZDK=by8ePTRZc3srL$f%?z&A>j?12dGCU z2sIy>-lpi~4H&D4s$X#P6@p;Cewn3mZ%jd`*m_q&VZLt5P4M=zvfZHi41iYhe%458 zEeY}bv0J{-e{*-PC!*J1zBS>&Npbnkh8Bh?gG&(SP}*3gm$v=gmZYfm!QK+;Q+E`B zB?xYNYRn2zL*Wod_d+#}czq4{4A~TbA)-B(YEx@|Iqmdo{Zw<%K}|>GhOpfoSF~7R zP_M=BK48QhG(kA_HC_L5<**V-B)odK6kl*}1)rMpaba^Aw#6c^z$CnHDo>DT`bH@& zNHNhv!50Aeo=h?8Hre(l!N%K=SHz1Vhi9DODGnC5M9zi2Z-ZLi!ZGhZQ@fn}e~hP< z870HvF=pfpud|E7>entb*nO?tCgS0EM12h^+jv|ElQPkz zz~^C@!ByjR0ck?Br%(!oEm~?`S{9ba2;@o-!onp?;(D_*yL2NtA()E4t@7Y{30b0> zO8eKvlKrcz6`p7^4>aTwp3E`#F*GDC=Qw8?IFY)V4oRGhdt!)bFl;}Ovy=;$!*K26 zDBD|pCw88-lZe~3bb+Z`Yx6oVaFAcV!RT#oZht0Jyb|L<`}#{;hO1F8ovTf+ zg$5^8nd?=VD^}ObFxF18e&JTKV!^O_$%-ixV&W;c_Ls$`{hpyjXV&5`fktE_Aeqv= z#*5<}P}moHSF4ydn%5kdEhVhTSD7;CvUm0z2jTi|=-2BJUBe90;kpM@HR$FK;gD^{hH&gX1%(3EpX z$6_;kX}3Gacovk+GUze2IuF!wMY`q6C0fT_AysdZ2u)tgcgfscTk69t#{<$K+}2~R zR$B9QYscwEvQyw4KKckd{VoIbKF3=lBXw!S8ii*{I?vnLH5{yWlQtfKWh7JgCFqh1 zvwAgm)B6qD=Pd~bmJHGtc!Kj&I+Tn-`b&WbWP+7LKtQ6fdr0I(@(UPGV(w*WzxoI} zlhso2cDhLFEFv~)#N1Fjz^%KaL4KBBE^JL{hDY_PzM91LmE=!R)4YPw`^Inh+4~9zpJ%iB#v>dw zGBdHM?B-9}r2S#r9KX?AH-zoMnvydYTziRjy2s9Bhsek#@GN)UrZO4Nf{j5UDN~N1 zP1a%JMq~5VCW4?pc0z6E?sroTY@c1II?o|$+pX@#p=RaQoa5Sb^T>HED31Pknx-m0 z_(Fy69-4yN&o`u&edbnka!WwiXp=C=XpqVZO5U`*$r+ayzY!0wU8ETww5K?CrqF{|nw7n}tR35I4VvSkka~T~vX-AtD^}Lt06lDdSah(; z#qNY32@URZ!pHovos<}dZQt1?bW=2_#9*;iAH@Hz><;Hc6glTCwg<6tI2ZG3De#`B z1b3e!o6oe+X0e%4ye|q1j|05G^hh^O$6T)wE|gsZ(DL&aN)0Ucc;D)cUKywIY35M5 zA_ot6Ppx{L;%a(w<;be@`H?h4hTWOOWjn%Q4&tdRT~DKD4FXb!Db>kjLC??2Y)Aba z+jsRe1~pDpSS)eG`_nBOi%Xs@9J}TPRc(kkZ_I`qhN?77&SSWN)`)+=9N(|w_ah;i9 z=uDa#l^)b0HX@(M)P!g4{;OdfA}8}k?QS|Ie~Z|8XFwX?wKy52J|P)>V_Wg@1qA z<#5##$M#I-te|hj)tYhmP*>;@(WTpAF6rVDCAx59Jv-v%Sd6I~N=%BAb4GllEs6T@ zH)|@+eIYBM)m%#=MfZ?-sBRPDd-7}wIae=c^4EZnIs3yo8V-rnxmf~`oNr*8S<9O1 z?4}3A>qT3|W}Bmz_8AS31l7N^esotisl%!NUW|br4sx z6f9@^3*9I}0pjy$=~4?9z8e1`T&KjO;VHiX#;$bWwlY?h@%M_4nwr4)byJ7NTFO%O zOQk7)bJ;1d-t{%8_BcNFNKM4*FG(vFcIfYSwa_AM>kg{MxQRh{_Ni_^HFk|N!W3Jrnx<`-Hg9p%%ugG3No*|9^>4=SjDZX5>s z!CGDn51PK6mnUu$CBkn$d_DiU&U9^X%iiF@D9_9BO|z!ipV*~?rl4t(t{qA~Ba{?V=}#UuveEU0k1pdTDvKmN{0X ze6ja#@8rWJj3%)0R`CR{eV>kPy8hmvv`e~}g`KpDShfT8ed|MQbDmxE(tTbM(!H?M z6^9iXTMF)KNzz0TUdW7ee!^I9ZC3?XBQ&yGTZHIrpC6bSxi6hK9)=2AA7B33dToJ5 zG3C6Pu&hxVrc;}-4nw{uoKrOdCmgu2i`mX_{>Wj`PBfN=c`bWAR4K9JXftSL zVEARa7a+&dmxyK;#6r7^)G@}JAbt?5m%^>-Nu|9cf6qd!{O&>3Xg9YcYB!r~Xm5sA z-`$+Ks^i1k*-bUy7yioy+|ihy`%jxrb=Dx#caV727xdfuTUTF&XWrJd8R=<$x9bM@ zL`EfUCdsD7$ooC-h-iGwxb&|^XpwhUxY|lldP`+ZfqVSFmZGk&-1v{6X2XX$ z-Ru{os~KyX;4kBLhEi?b=&2vmiV}~LgmD*Wo;hUkLE|dh={;*^9!e+^IELt`Cb3_i z8Z1#yGLuuwmXnXUtnvv3Rjhj@u52*`kEB+QUff=l)(546_gN1&X4Q{r6bf!_|@M#y@IY*S!yu2IbF6V z;cdg`TwMR-6ITy1>GFgGek1=no%4+xai4xb($BLN@4GtPLEN~B(HiRX$jpYECr(1K z+_^KY4|n^IcC)1B-AxqM_uX-Ws<%MFgB0D>GuJh=&fWS+<=7vPKJH=>q8}dfz80_+ z;Erc%cG=X=6hNbzCl)0niYis7M3qRZWDSRO;)Cuuab0rtpOBvxwM^DyUvJ3(0?C1H z!KD?Wv(S@}lKB<4;vsCXz}U*<~85 zxPh^KUY{cJKP-h1^nfr3$jTq~*V}v_RaPA?dc7f1W%x5**m*nBFDpLUm&}uujoH_7ePYPHJbN%^Qt?G!Se{|G+_j!kve~|0BTk9zus0@m zD@l0;8H5>2;`epdD%b3&{}Zgf=z2W%6x#QBytbaTXOK+eUyOp{nA)0N z_G5M$>QhkwbwG5yZ>+q=(u8*H!B0RS100G^)dV%1u@Nx^vKYC-r{h89+GQMaG6SSC zqoi@gPt^yl)Mr9F+c7Pr#!VQABAmAa5pZy8?d8%r_cQh4mN(pakeU85N$ID)8Gg=D zU~?}e?xGeP=c?;7uKAVBg`vqD6#fcE3eQt{Dq*VHbpV`D-NJ?+2t8=jMWHB?7G0hf zCuSw9L*1bVd$2t1k36JyjN7iChqLx6^2RdZw=HY0~i?TpkF&L zxR`Mn0$JAc_Rz7RW7<`F_d)E6pT1FRH650~jnGlEUwGypU~k%2;#! zyq6YYHCfLfY@$lr{hq`qv{=8VFAs7ehP4mcwV1uiXAtQyl_HC!{*@bCK=)qeF{h5c zO{8be%gmDEEi)@T_v|d^18!r1LGT^#wLV=62(3wFTkV-hVfRBJ&?h5-17@7y+ayN` z0oLd~=K|H;<0HAwxebvjXgBP#u&h)Kp|O(_0kt+; zAVwuhD0CzD=2mPo*4U#a#BPsQ(I6_@gpXYesrC~Nz31#!ZwPtazrS~1W=fA$5VNmcBEqHC zM2@Do+!zSnbohz4m$M4}3;=uKIuZS*0YWGOT$3bZz6Fou^p9ns_~SZD+^~F9pOP8G zIrx^Il;4{?+|G>Q5v%>U!fpGBXs)iooKx!)ERdX4ULg}6#$DcBWpHcW7x65{dKqv8 zq4w5TzYpp4(kN@fi}NYfmoy-xw6U>y=6KQ>6;~nlV6<{uIg@{xXSPoTp6nPRzkwso zKQgbf224xDf7nzR443WQd#h8WmK*6qMXc;tIzjxVAjlF6vcQ0{HTyNCE|-*!=tn>1 zs{itSGd8MpOeQl!7@Z2(iniEBd>nOdwW$rZ?ua&MI%mKnX<9RFLtIUMTvK00`RvlA z^%0+hT)DM{oLg2YGx!er^g5m`I^>*`n~2*zXkf{|<^S>ao?%UGZPzH!INsl!O6aOTFMny&+B_6N#P&x2L6+RcsTh*ilk|+mXen|YCUHTz``uoXAs74u? zL62|@oa?Yq+f(+_3X|dMt9uh|;hRhITQ}c7pDAZzMV*U{r&D}iId)f$Pjid-{?j`! zv*@aKSi2<~<{}xkB2UpXzXih_63r}qA(TV0=v`iRbJNLCxPf8E?iK9LW|~#qL1G}1 z$jDzV8LZKkH6h#L8yTrP0+flJcVyDi!lar;gY%{Coi3D>&-1jUgt5yl^s4dS=Ge1J zTzoy;Y(p*Pm+~>nFxXq$h-EY$Zu0`uvvGV`pgX8SDx7^zSSLNq%~iwtCxxo+sZ87} zqbE+%%KPH%){AC@epEq83PT?+A9suHDwQ&#{b*83#oC5Jph>bmclG@<(2F@dPR7nq zhuf3W=nSvh%}e$5lp7bRr-ttxY_{2H=*+PjAvE5{;Q5BkNf+>U`1u)y8FWAEjma9u z1;AgPN5paRr#q}n3~xXZI2UX=9~z(dDGQE99lWgWd+W8~H_^w7bHE#4aTlm_IMkK* zX**>(#JD?ohO%=Kgx(`ki((xMcM)C)dDYL$`;w3eHXm<8|HPm6!=xN6=n1@1cAxUwe2D-57VFZ!$KZz<{_SAFPa7k-6t#lg5{CPNLxAAYOmExr%?%C6Gl zJT*<>8`e_gJ3Efe`CeQxi+0(6oa#9%?s=gjrie{@%#$`bWbx^`NJ$%XT_soe)@;kP zu98|$H^T&Xh-v4!YG5g({pe&Z@;HZ-QimP17mgHqGY0!1t3C!4>W1#H zgR`tunp{Ps_A2;FQsoTx3y5}6i91;ASpHMRMzL!^_sa#0@3&fC)rk3*%T;bO$eeV? zk;Bhfo$-A=E+eWlG}Z;(RJlE_{oZwzr^1}){#7N1rOG`>`e?waBH@6DTYaA>JcGP0 zhjwd+NgikZ<-P$pJ~Y3Z+m*_5B-#0TF=w3ju5m8TvuN%^R%C728skVW))=tLShUPF z#e|Ijfv3Y&!l!+E8U+Z9`0YqO<4?|cv^0<@qnfD)U6_f+ACzP+PpCO6A zI@|-_tP&sY{8CbSv)()VN;;sk3?E(BGN6v5FC^{^HK(=Kt{ERQHSVq|&JoElyTY5F zWT8HPctu_f%4Ug2%c96wWlq6ba!J9E(vE$lS&#_lrqEegs9;ryK-iew9@@Ee_hIO^ zl1@6mDfjv7H*BKKW;i<#jXpH;sIPfIwpJ}YPWXiZH8i=Hc7Dm6d8RQ z@eUEcs;0y9U_||b&QhEc=&*w+3{|*ku#a2%j(aC8IrPTN>tBMCexAa0XfK0H9xT*+ z(Y^KOQFXo6WluC*5U#30PwLxb1{jl(y_7vX(Vnb6rq_L@CoCAAYddRDsS1oHe>)?$ zyH(Zub*DC0gr73}pweNx3-iBLF;>rdQ;SS){`~}^?K~bnd4P>P(zo`veR=TwhIX9k zdIV^8-0kky6(7N+^YVG|O=*bA?Fc_}Qa~Lf%*RI5ba~b35Kr6XNCHUBg!4ftuOId6 z527S~a|E$od@{EZDss66<1gs5H{I!SoDQ@0b!Z;_YontmWqblm;+`gP*1(o-W#1j} ztI2zyqW%2t2o_5!L`o%6M6%RpE8F?jFVJwHWYG9iDOFz|Z{g1Wu+xE+Y1!H+i`mSN zJvr5L5&3Fm-&5{7;^TMLo*&K~#U<-&!*KE6Y_y`HWG@fMn9~r3c?HmZ|7w6E%c8Z7 zGQ_aEXVm3ihBu#p+#V~}7FC|^F2FDH-;^=1@jsond_dI_3JW&=*C*Wv2zW!gbQl31 zo5X%U%g3wcuKB;M%8N{O>#sO1(%*2@y4Pl^aADrU_ESaPBJJxzvL;n*~WfCYx518>NdQe;u z{%=ndl3Yl=3|0O8%ISfa>JK#d(V|jftek`wXHt zFJdKEfCa$6TDb97G8<*Xso&&r1FpddL`_38dIVT9+ype20=gOv>fgf(s{}1u?LMxl zJ;hw_j4cESR)@=)n!<^9!LR7wu6(_^_1XF6{0b9G7LZc$uO{jYEVQ3du;xaJ@^-gM z1NjNo+x+c_O=G|k!=c=5?3$I8RR;8&z$by_;Uy8XRlheSaS7G17GA^m=mPG()HnTf z#a6Ii`G&c{S-%D9{lB}cz_sfCJKhKy;|=(!UY08{4eaZ?j-u`A@Bi%zcze+024Ky> zD^#|1{oCM8rzrP*hZN71&*SO|<$H5?{L?z5F8E{W=YCXdKJu6uw3-Ezbc8d9&bK)~ zwz9js0n(V$dDdiM^9IN++P*w%LzIOdTUilWVD!KS_6p(ZBKLQ3edwY>tk#YTiTBQT_ z7gfRk+t$_TDnEa4fKYC=qcgn-pnt5D0CTr#yqH!uAir_E$D^0 zamn~0n;1bb2Cg9ZW{vr_Ew94*|IQoA$>mt6z9dz)ba!!|D@CP$n9d_4qzu^Q9p**; z(M8iMjD`Bs#>ci#FDjlru1yxSj7qThSyWt&Avudh!1 zL1~Nlk@`M;#W--oI#p6qpj(zteaRvw< ziZ>*-Lo1dW`TUGmAugyZ+4==CF^7Kds3uCr#*qxH0g&s*i%#4(wJz4VZm1m{I=K8e zT}=P6nY%q7A%`O)CEZik`F*8QZI?K=*HzH`+6hTth=PmL^%3%=7D5IMH7v!Sb?xxV2NlwMWdrXwa z-sPS<3W-g(G~cCEGx;}O+jWb|5>xzn_c#kdxxmA+aarGLZr}6k;}uxyD~RF!%3IWR zDgM3bQOUlSeX~i&Exik&lPz^_XC-|)vw1s$^OEK&CrXOgAR&6^j*QdW_F?LrCAUw=-XwM z4s=@{)w9tzleRW(b}OiQ zomY|o&eTfjGyX!59@T0xr$Ecj$oqnKuYToF4@}5U@JZjr?>;jN_ycgb$wyJ8C;{%% z*FGsDmf%(AIjBg*%%`Drk9_t&`JDJ}dy}Ai4<^QOl%TwUC;1VnH(8-fQwi70R1&&M z6Q!2;XQZeL>H>SrX^>YigySlFI4mf`g;fbfG;E_AO%^CotBLif)V(VqAZxl^3 zsy(8Z!=N%GApaVD_u!Kqu%F7bdHdT~vHpSHtA#v@I*(&&NzdycDAYwjp$SB|J+F46 z2@a*N611BWN5O4$`Es`TY{1Gm z{9&m{_gDvUA+343YzZ=f*-CD{%8zyiyvZ)e`0lryG)osO{delu+AMMv+#88w6*IW2 z4fr+Q16A!}3M#{Rjcq@;JT5n8iHU@_cYl2Q)q?N!z+wwq9hLO{(@^G)?aq<8NsepT zl8=h$hIZUu8yc^({96GF-eLay;}NKt9Y2KVCS~m>T}tgRN@hG_uhYe5Ow1TbxhPpo=dG< zcN0>c$v>6X($czdQ`$kd2yO%f?)VxSpN}ENvyDpHv&{^0vuf^NxbJ5Eo@D;&r6-@Re_mH2Kj&8COTZNeN9bw-5{NTX@&bxOXuZ!z= z@;Er+;_Z=qc6YRnrzAfq zu;xqKEWr@q12_0=RB2pse6jbQU{Fwy!%RZDLmY^imKJV`tUQ?CF!Vf2*pqW97wX67 z0r~Fa`kcL~vON*BNpjH4@l6QJwpnh_UU3ApUc=CV-u_J7A-3K+HQsNhX^GdM*l_?t zaN*l7>KboocTe?CDh_h!dz8Ug)93Bj(JSIMj-E)+1s*uU?g)&qy=tzU@GBH(O36Pa zrw31Q6k4XsewiLfg#BU(gL&JGTKDxqp+`;33T`WdHyf7L)@ytYlx2=IaSi68o!!+j z(wcgC?V-(XD!KB3!7Ghs{xz>!=rL(Erh%jsx-Yr|{ACZDJp*b>aZdErz3~DSeC*mt z5BD?ml5*%nNzVNSp!U_B1#NWUo9g8(ATQ&1M`{3=Vs=8o#NGfH+^z+T~ zb}#*&Z0&#$T!!9H)SPzF8eNGRt?KmlG#Pm9rK$sllU8py6&D_=9(XG}?QNMea$Xn`c{Sz&+n9_~m}F1_ zr|$^2*Bz3AROB|yW?L=lcyfTHB69upy#8J)q-5|idifY$JMG-o-YdXZ?5b~8JdN^o zELa&5D@;9|js(-@Hu;kl)V*yUp2+7B5YQ*3(w&rh+o^2cj7F3)3BHQ@vG5~qy5g(P z;Z|`;$)?zbbo6@1Y>sW(S@Dn83kN`8#aM6{1tKp0P8-eN2d3GKCd9*ZVR{L9xq0@k zRTc^jvL|+Wfi>TyiHn?|PIK38+!*rw28CpEq-1pro);)6`v-=iDVg4bLgQh!&rFSM zd=lxB(*4g0rF6vsd!%9IRQAfwN zzYjCTM9tWS(EgpB#jTg*+j`*@ymbz}w(_N$tb4N2Z@V=LT=WEb0FVyK zkj|p=9Dn~*k-M#`B;O3V1YF9_h|L<+E9*hdWd9k46E|!wkf4==f-d{)!J%u->HV%M zf4@i46fF3kZzmynr+>fi(tq9|nTxv$`s1HxZ`pxw_@|kD{=fQf-L|cL<>i79watK| zEC2k6xs4@OjP!}n;f`VV$3(VZ^Bk3DrkN4UlFUtzO^oUL_wtwqr%>dBcU4-?pBLa4 zmyy~K8akHJtgQQ9YkfuCXK&l^V$Rdw{r_{!k574cq~>Ic(w2P)hRilecik~?FCK30 zXM0U2Cg$k$^=a7&0>$dJYu9zsXHGYS8BP_<2}R`@2DDoxRmP2k z2&PPYKynRx?KIy-20Du+n0(@YI>hezvSOlyXP@Xba`Fi|^Yl#J;&_MICcn7&c#JR2 zD?LvcY@7NvCUsM`*rJ^xDW_XMLbFVL^A^pRdaCcfR`VXF8;TWn;_{%EG_KyvuXc{^ z!N$N(Z6sYkKBH36U4O@HRL$SXe?df~wGR~dPe%<#`N{T`Y?$F+`vay9N;Qr99#X!G zi4KmTYHZxxglZ27#z{zHDJxz6#pI~!x!u3i|o~gud|B8;!d&GFdChbl>gH2wZ=ZHqYKDnsFxGEs2f1`rx zg-oaq_J7R-w|`mW=H-2(;*jUPx3o63AblVO&5-m`*Y)#bBw8kY_~5@qy^Nq4J6}=h zTadvOr&6B)vYKe||b?`r~H|HJ0U| z5x*(Pnf3G|eybmnP>?zTiI{djJZ)G&_=6xr+F)f6CZzj!SO zc>|)3)U)JoGzj$d(toM=So$E9YZ?r3m3iC`9W#4Lp$tQx6K5&j0x)_f%45 z3QcAiZDfpD2!;`iQ?)HpJ0-lHIj=5DYt4&|L& zYh?Xv@nzhWNrY|DXUgUYkok@*A(%Rv)_A>AkWo=*fBL$VE0U2tm zXLKV__4Dz;DNUrS=j zMVXgXz#nK|F)R8h<}@ni*NkwUF7a2RiH%9h(mN)vajx~hNrBuEqtLL+!7)zQs|36= z%EwQ9Ly#Wy_rMX>*2}%y!@8*ZI*ryfVjH}=2FRH;9GhMcj~>^z&z8cg_mXQc>wTnGK?7mh z6d<(>;f(Xkm}-e^j3B5;HXw zaDzP1nJAUnAdaP^V-ZepSMvT5|A@r7VaMfAD6#mAIK|X|cBBv#QmZ*Dd%9uU&#`P^ z=F`3CdER7H{n%^PyA0P+T8P{5lGtnF=9+Rb?wj#g=ttuzCghioY@CIlE4k0Y8JN?= zU%$o7OWs}`9sBk^kd~SO99eeUCwwqdwd@VpLAgZgj~y0Cg?<> zQ=R%}vd`!pBlzxaQe<~O@#dd*)wvD55y7fxDN_gc>G*5SM^j(LzKhh-r}^q?!Lz6a z9G<5=9O|*z*q*xU2l$5d5hf!&;cScP zwR-83PDhVHvz}Y@=!w-k^S8diUVZ3el|Ev_uJu=EDXZEQhd_T>VnqvG4^s~%(s2oe zlfi65h%w!lucff2b%sl%v**#6n$1^%gp0}-jGGU<*r77Wl8f(y<<%Zi>$_b3j!Kcv`aIW`j=BDCe#_15|g54PYmYfG6R!5 z*HiJ6zDux$rsld`@(6Fgg^$Ub3zjusz91ls3=H7e;w67vsVS+o?Y7QxEI%Fv3}#|m zC+1_P4PmZDp@DOg8t2oyxs{}jHjWVeYjMV4mfEIj*;Cy^ZptwzBER${u9q@FRAgjr zSdfqWIt@5U50)47flN#-!@gCe&E2J^H7SIZ^6@~}b9Ti`g&xaBd}YHIu^kYs&aIWc zVro4)IvsmlgHPn{t&DdhUKq~qiD(2*adrOVYj!_f^6&pJ;w~>yRAwryz8P2BV)3%x zh_iJgV8YH;Wj9WuO!?3ucW-|ythPGi8HN73t(@3am)6s_8sW*(^4TH#+Pf_fqe^Mx znW13H#|nZA49W490;m)H(^-(lss{8}&e&PDSt>t=J6D^Yk3ir#{h=>66dAA^<>b9; zVKlrNwf}P)nky$u^hF6Turt2>{) zqnZ9==J=Ep`+I~?bJ@I!vtut&@X!1p@@9`bXpz_Kd4kjDsZ4(*uWn_~*-oYBHX2cz zes(g%5P81BxBOh*;=Qtp$}JYUt#~Fr-wAb@-Ia@k{;wLGmDW61!l(xY@q1Z7Ui$fu zPNU|4RP?sg<7EIKOq20hU2l)uX4Ix)?N3|WP-v1R3^d0@SIA#RpPd}n%VE+ai-fmz zp-|}QV!iD?F(2wMyPbKjU*zaR<`#A30QBb&Y3f11?Ah~vn!52z+e&|qk9Y2!TtTql zhmF9{P>7s%VNIvZ3kuFZ$R!g{dv0%Jog}Q0F*VHvQZ4Q)!hC;N7rCYtOtyMLJ9@v1 zuYmjcP1hG+LIwBk(~eHoOTASU2iLQ6%ZZgpUyg1;hl*T#Y311HWc|{c{G|n6eKCpU zmBNPmaF&f;h9pv$QBzZ=3`CN^XUoz2zQ5+=ou>VMlHSzP8bTQFu%7=~#@CMkHgsyq zV!F@mKvJ{3+pC1>>c+kOXGS<5kXUJLq{G7G&LLfZsCn^3iAO#+>`hSoD^agTNzlFI z)n~2MD-zggo{UJJ zEw6(%q}$x*OY}W$9U#}s1phBFEw1Lj?YsplZ(Ikqv zD`>FY&&nkh*d&QMY)wZ<&(?7|#iDer@$igqC*Vu@t(VM6DPz}?+0C&HVzskpTl9o* zcjJVnanl~;+g^!blra@hLg+2Uv%!-hAIiyTEG}9TevX_o$)cOjHWv_M`@vxcvp%zl zJ8BNCCF12T=H?IGfRvA8iP|sMK;Xl|h108lUJ7rv3K9hORE6eqRweOmku$Y_}a-E|H>nricO_D(l!{-i$y3n;nEn&+OBr!mOcmk>NZ>AZTC6XP2>7Tf#s zR;7UH9=#5Ky%o+g4zZS5aT?=zyFm9bCS4FGf;z(<@?jB#i}FL`SzRE4S16ZxB4dv$qu{y zBcnAc;<;t!`8WyW==sTe23g+zh=eJwDQ5CXi`F>69(pof55VJeU%!^ZN!u4u)5J`( z7u7mp={9+~YH{%#?1(YoI^fLJw)JxEAFU43XEHTn(K9`r7^Pd@J}kvefoGcn@!PX5 z7ga%}Kk9hObVo8UHGL)IBf1U?9BLa>+2A$RRJJ@R{XG`J&dIPpeA*ZF?S04fPPg|o zrr9F~Jhs728o4=tLP&VeIQKB6a3(lpecTQ}P_JCSZG{A@ZB4#t4*0=!x)LH+HPGKX zpuNx$?j*z7>+y#}!#fu5pK1d z(=K{`hfjsRIX5!?CzsFQB^>DVCJy8s`$JjZ9&Nm*<8sGr^$BWkXoA~ORCQrHK#@wd zSwCA!To>O;CE|-Te3fi-?i(Q-S28rhy&SpDQnYb-+de*Dsq@&J2le1ZTgvh*s8skt z>DuTao4NZLNCUW&Ogp8N?x4kf@*eB3iYv}spQ{GF|H(#m`tz65XjkvXq+hz9q;XrY zSpnc#jA(M7@W&)47ax;1Yg$;mKa*Wxr=_L0-|g2Hb^Do%V04d!qD~u}3cYYOH2VaH z?KQ%VI5)`sE1y)fDbi~dcX(#q7HfXKd)RP@q?#-*5gRWt%`IQDP`b=%lk%7b^ES~t zt>Q$}|8%0G7Yfw^DmB8CMnJBC1yL{kqYZ#HUsPj_xtD)PwcU=I4EgauL95WK^sWRq5$id_bi`jd|4R<%OW=1{cpB zUmVSK*GC=TRX4F-9uslDJt;6j*^Z$?AWzn{Nc*8F;rlOF*C+HM%2wLZOYRQ<-1EI! z(&YF^C-^U*^U(=Q3D=$sF9Omi=7cU7@!eAV=ltNlg!V^$?p&@<+tRkM8JpWR;FwD> zv_4TKu_Xmys|AmburZ-~i&U(HLg4;eZM7lylJjHuy)TyLOaNJ!@i2+}X z1;X&aM7&c-=jQsXO9mb9Nz5Ez$|d1OcvJFs3GcjYmAGp0q+e|3Gv0M?$pV3_xw39V z_c-u=pEL-qlW3p#*nNX`AvOu4M!QH$SX;kLu)~v@jolyk$a#upEwJfN!rG4z9I$vN zJd5BXC3V)nSGwP(R#PZJe;iT%{>iGru)nT%5|UZ{KkF!p|2!ulF$?+!i2FYPT6aS~ zt4RNpy+Tq(1JE;Y7;H-iSN%eDwA6HJYZRvY^>inAlY){G!aAm=`6}Z{QgDG5n_TTB z+dv?mrO*xRMs^*C0lV zwYLXtWv_Xc=F`|*_yzt5v+XDhhViwrRm(UeV7BR%mAgLn zYTl*eP=DrTj3A#dnXoHAw}AMz_!-xql-0NTckwWiMh3p$5Q7tJiaRTJc%sZb|BcuS zU}{FXAJ%C@-1#S<{!Go7PvPNX!4#`_S4~arpkOhn&G+09f~FMobaVwdAKx8&tPI{W z1lrcEKl|qqc2F1q!RMHsM0o>skjvWNsP{q0?=JgBdc(?x^zYX)0&W6W`hVIl<&XdL zI0;E3wZ(tnc;FAp6#v0{fzRMM4iTe*_W55YF?;ynfevwZ4_DYgMoy)#r4=m&u^Ic5 zNnjLr{vlhG9F^CC`HJjJm8<~VrgL+FNv&;IN6+&y($iYsI4vQecpScvDiNY%WCTu- zh+bK_9TXfa*;zTrSD4D72FNo|d1?wGTSe=mxhh51u3d{wPq$4Vqlzp*oH&kGTIYN@ z-1f>}%R?Nn($K7@xQEyLUQ`-+pGZ|}=pj9sQwaPL;54PZrT(}i3~p*k?>AVB{wnN+ zzG(w(@Z2o5LDy?)MwNZ}61bHTti{?{CI+mb@=Oj5k_QsBZ>}gh4QLXOtyE2dD_s(N=1zc!eu;Jga6#mjM3q$*Rzj<0~06xS9&(t`KRT(xfEU0Stj zGlUoE{fQ=rp7(}ce7VM})pF)m`*ZZEVs&*o>d#X@w_#OEO#8I#BWYn1cvZ{eb&ApW z(QECf`#*k+nRs(cnxQT~Iz zfIP=Y<=IW`x>S2^uC=`%tyt93f{Xg>8;F8OpBp0_5xps0lR|)Vx?}s7T;~^?-`m-l z0#Fo;<5Q3MxC&9PU%##fk6lCd{lVzthdgoglx*wU!uhUMs&oQKeh=KZwj&`eBKKsz*S5;pU@VAXNm ziKcti$s@ta5bAzeO|w;F!{D z`Aq=gVr71D3}8MdNY4W_e%Au0^XCECH4gfiyKjL>f13S`QJmvo$wSr&m!cUn*aG@* zcwhX6jo7=UZr5v*6>3xcF{8El#-FMbbiM$}nW9TxFEJ&CWhZ6>P5be~IBJra77qoV zmdkA(BWksFm{FvoN?%u(-qti5S`PZoxyic-1?$XfPYW^ZEVr;vl0Fvm1x2deVVHS8 z{IWm!uzo3E2Ug@Mk~GMnWnnRIRPQ!5&mWS&YiNZeS641NDf~9qwp&xMJA1OjgH@w+0Wlpx-?;9CAEYOSk2Iv6+hEjimgu zHiZU+22fj=WDjEX_KH`tw_aOtSoxmnU1zi2Slp6d@TE&BPTQw<+4mXq$-Xuh6J46s zXiAy#em?e!XS3*ftHzX&XNDHmc7|$>Jj;L1cg4OhoJOQ%*Xi{EKz5jJ{X*Wabc0p@ z?CJjYafZSBz$wXdBCx@I_d)n-LQb2&*Tg(m@ra4?G_*U11Xjqpggc!s3vEv$V!Z~W zr9!@R?2DRu{w0DKrLeF;G3NZ7?C9wxC>qwwf7#MhbMO83;Go$r2378{{F6+1{l*;n z(S1RSs16fo@>b(AGRf`WIeU6e-bPS?3SlbLh$hl^zF;jV7b_ymH^x;MyAi9wgxgLG z)z%hiP`-_oaEjqDlYMTnwO=cuqML-86g3^;P#f$@d+2={jiOyf3#m6yrIFio$CbGs zP3qLYIfRq@qVXI|qUVX0zNc>~wr=Q1WG@@2Z7^n&1o2Ew9ez&G88WfWf^CVE)%{4yzMvrz#;!u|&>d5TD`Zohv}=et@Su12 zxxDP);#~S9S+we4ywN*xbJt@T&AdfD9KkH*_!yJ*lpLx0{P`y*xZz8;oZg8|b!I7^ z+U0BDJOyHe*xYO-P1u(Jc8^RT0vkNGY>Y!Z zHwL@oVM3Z%DXevax9k-D5@`s=RWO8tIdhaB=a|r#OG;_ib-4|K9Ys2ccK{-3Prvb* zW!srzQ&AAIT^4_iaza*{)4;UgTpA_{zNxB7L%n3bBO2i_j*Sy zP^YJV=+!J0RIOv;k%eX)HjgHX*&XLGbGtT$LYjB=!natmE17v}BrPLb(X9FW{oLt) ztuZu>P2h0b_kog}7-&ACZ#K^bvBW7TwwPFIu^{SrQo}GUrWlkQ&n2S*7I#bBWAZ{+ z2ryA-6X%{-mzWegfy=b$6QeedT8-K{8%8w4w-|CZ!s0M>awgph9rk_m>%q|so(rZ7 zICj+_V?FRe{F5LEb1}A*)#_qjpBqhMUP0wWIj;K>K<0yGY|8Wi-wWLY^_^>D>bg=G zbE_l{b(Z2S{Obv2Yq}t=gH}cn*Tje4-OZaF;wH95louZ6F;ia9_;YO2=W`*fs)i>P zyX2Z8pP7HnyXqB_J(K8uE|kp$SKJi13$kHMo*s=X8c-l-HXsS8^eeei`b%- zl0?^?{0XbjYQJOZ%cP{(3Vs<%JtmKMm{Wo;F;#rSof`9iiyskAq1W2Rw%^`3AmWQB z#pC_tq1zJOk_E8fPZWM~fQO67GRc^;1p$m1wE5$kX@VFg3;dZLBf{ zodJaMv$M5X9l#hgu)9cAQ}Qf{vAVhLxxm?`g!SfRX;_#C_KR4hb+u`eKavtWo#VPV zS^YT<+K;VuC~N(20q!|7@DR%qi9>p=N3+Ugq|5CTED0Pvp*Ioy9=rri-^mIf?6I!d zHXlVhovuR|I_)x!-|NODpJp1qN+NKOa(i{FqJ(>o!-)ILsNEB3>&p<$*%nq#+yAg&?iECo1%P zLsC$G-6}cAxX`*T9Bp5+;;D3QAmFthS<%F|mepyd^DN)uNw8mmu zv`Z-j&WB$>LPwtm$~4H?QLb})1(f%!$l$d1sK}x&-j)&)K7sp z`p!OCUL4o?iXucD)E9)JeBWbq)E6CU>%ylSuxCZF;zD7O75T zt&8Ure>3a|D|>sqs9&TMX@*Gd`o^I)otx9z`oswc2ThHkP*I({O-q{_pQnOR2ytAo z08oosI==gki8Mn1SQ?fM+fwhp4a*aq%8-v+kYwoOQNksDtB5}T+?;hJE;*#($1Uot zn$S{X-LP$nbdCeWRO{1AQ*@R7#Os3qC|9+oHi4>=JNxFHe#vfFP1{Kv4rrs$9+tk1 zYPF@|$(JP84YI!k#=WmTZuv&VQm+BH*`}q6C5cgtjV+YwdwU0+5n9vZ>T@d>^S#l* z;p?>2)FnAN2sD8^u(z)-9Q-w^K)pNFX%JTR(3nx&>Cr0w?FPCV(aYdku>-!zk||T~ zTkUorY;3din<18!jIwa+pUGagoQNsY-}NN{?&(s9Ei=>hjUO);_{537u|u?nd9`IQn;PXZc~c zaN5+#b6^Q@6W)5upi*sJA9*=qxIMXM`OU@H=TCz!|K`Tvt|~dye3+Q*Y;1u}_5*W9 zT*y+wq3^YvchBN(+9{f-T#k=>{+TUKENfTnQD3#ZW9C&*m@fLYQ?ENhw59sl8ocfk zswXL-FeM{i`I88rs3omlgEnKa0-=o<-~v1ebPj;p^%#`kQlecT_Qamj4IbJ+WnRpawr6MkK*oO;NitIKBIaXqH=6} zp1&D4@Fv!-1u3wC3(Bi$?H-#AAepRpN6HdsA6}y(*PoS5)0^uQDC-${x>4u40PvNS z7fu)*0D&7~5jR9RH8+yoSki~+5bl#axp=`bAs%k|db9syV!f}Mj;5v)qX%SlST|J? zGCph_TH`n+?!3CwtL2+_Ih_o4F{Pof=h9d%doBhkt`ATi#nF2(qZ-;0vQpLBEMSlW zLa?|v<~MnfXGt`-Zs#bVk6%4;W*WE$H>~KTUHiu ze58#CdIe6 z*Pmhzh1x9C51`A&CdWb{^(&93x|?xQvr<(&M%<-s7 zVrCT~F7n)cGleEZm18D&_e}A(t_bCo)SQ)FKc@kMqK(EL=gt`8prY3kSNJL%q{dwl z^8~P(gn9ABOK*BQTwB;ug1b5EPG-xdx}gDg$*tbQwwV0TUS;B@<_rsn!-I z72egwS-XF>WnK1Jj9WgyOO^5PtalCElD-i5VYG5Fnx9^czA~>poea5) z->R8knih~9aX9yDp8@2e4oSy^>wLy$r}?S&_1<&e{(zgIo)GcO_BI}kZlh^rxfCNl z=(eyxOry6u?X4y38w@WE14C9_o!Az29Auq!uFQYy5zMBskM8D8rSMEJnz8LtRsk&Q z$+FZJcWE{KGLGw8xOG2KCyY+n*}_wqqd}CKeO;-R<_z+#C5>g1;iSExq1Lb*FDMO6eZPc%IdCbGSi6BK0v?${S@^Z#${16 zZyx9Ot=ue(xMN&ZS7*?l9K|Ag^xxOWr>=9$FPQu=6T7-a#!!&P|dp`aek5LiyWXBLCxL}PEC6X_uZ;-E| zrD^>gus#KguB4`!Ej6kz~H9eaS*7}Ov6}F4*`}?S%mzo7x zGHRmXU2kior)f5g7NtTji8=V!iRRxNYFN}7sj)WK_g02x_mmN5-yjC|$f2F_w1zz{ zEO4_5ix>)io{MMzX36Umw01ExsGqAX(Zx4ax;VMNdh#WPjoXV?MoNRWbE{U8s}iQ8 z6Sb+He&2Ve#AIr(cw^zZEo-WOz`Fa$u3I35bDJ@f44s(?_SRD=tQJkawH!%G>>Z7v zON{-=qP6fn)mPRXtK`4r2uhbT@7?TO#TZaS9>d^q zc_N$Rvx?q3WJWomIu7)oEN-{_HS>VO1*@7Km}PwbC2Z}I29XP*5Ej?at%h^4@USYC=dksf{09Ja5zIo#U+XxXy<4EzF508lalu#z&c2Q)u3i&U z+8+l*ruO>jPXL56R1w?Oef2{#T%Anl@-=&@m-G>L@`V(z*j45C*Bm!xrN>t%sYo8y$T$>J zz+b*>SV`OV)A-6U16JtYwQTs@2QY}mdP5Bhcwfk-D2*eDRu?}bx-anwID%<(A#dPcLvgN@8 zVG)OMX=~D4mKnsnn!f$9vzOSPBkwWaF`(wxqhtxz9^TsB2Ag1kJv#fvMnB`3$9+F+ zvCO`5MeV}4$X*@})pEkM$*07ggk+lspsv&9X28-X5`ww=I2T} z^l0&3gvtK8-=asjb3*=8sE1i+1Y}fWVf%%)30hiaFLEFvOyihOUYSKmKHH~*bFi#3 zN5m^mme?s3(m8Z8{{Jxd-eFCq?Yg(!0R=`;8L7iKf`EYZ4l05uO?n5F-lc|)6%+(S zX`%OCq=X2eC@3HuLJ>j$MF>eCl#qlP_8pZO-*><7`qo;<+WXu5;c?7pNb)@Q{oGeM z&)?&qY9N7^k3}!j85H>bKp{t`k zBVE-Abtuk?CL_zNcZ(C&_zRuM1rV#PZjS3>maT3LSAuiWFqojS0;}}0{p~KJd`m~2 zK6oFilMq9u6{ko(dsZuvAXWHHF@NM}czp~m-{6{hUPT2vl3=6y{Eab~Gd#F`*b8cU5{o|ElCF7Z0hw}3AxGZJW z!qV;gmZ3Xbycd;H&O;V*(QZarJ_ae9N+*yom+Jnx_A-_k8y{y}$MbO0TW^+Xy!V^a z`VL@f<>FTBvUg6PY?9mk9?avjQ-d|$rCOQ^MZ!M8Woj5=jzNjxc#VI~f?pZwt9MA) zQPuYX^^VV~;F#3u!0{rwXIi>l_ghSx!~8A%#r=(tOnSylOlX#AivHeCykE@* zYV;YGll<*cdoM9hKDk63`qi%G_ilNGAMOol#IQJE5T@Vq~OS15)fZ{wI&qXt~Q@9*dt> zj1@-fv&&0hH}b_0JPL4g=H50KC*Z7Ee#?oDM++LvvzCUheObR z?ubgbLT@C|y)8NM-RQ8_47~S;x7e}o*981IU8?#I-M{~4)9}CjvXK<9RL}EqS9S`@ zzEAspC1@WmvzDexV5*DpxTz~aD6e#4`KlEVH|H?Z!n8J$?do=gf#xS!KN&0rfqJ6< zm^KIQ!~dwu{G(C2@78Wamq$T9Gg?Vc(-P^ z&m-`g?@v@7K)RmUHZ^P zgH|DgJ_{pT(YwZae!lE$DN+POsD+geztzlO879I9LUC%c3I zpRjc}{X|`(5HvedPI9n{L2lw@XVsNT0{PE;$BEhC)94H43aXZQU7Ix{>F1qcm_P9n z=oOwdwwwVLtJ26#N@)l}>kV3Z^zq}zPF5jJ!02}8^Jjj3e$Rs5(&U90DHr$4m#sH8 zHtO{DuEkJ>ayVmA!}dB~;^(?_t*-7PM%gL?@FMX`x^y4yWYy3W4F)+|KmkKB6|{O) z)N)2ae6zmN3DYgu-D7ANQs@2pnv4mmMgV?`ej&?)xvkjU{C*WO(eX!Rmo5jDa@w#2 zJ%58W*it>-&7^1+JGQFLo?p_=pqrV-R&+;dGH<}5Re!!4j>ms1;qvwNCVnd)8ytfa zzvPQ_#zr4x9n|$0cqZ$v7OFx(WzMGfN(Tn4l6*P6Qrehq7EeGRh&Jug3CPTQ6vMPP zpMU@?^si@6q4wN(b=?sQz2{I9cmDS7=UoBcjz~eG8uC3BQ*?K-f`ohcyNdUG*4NDA zg2qri83f)BEmoB$*|bq=2hCaw(;PTh^dyRe?`(4| z%#KRpa4oth+xhxg%zVR3nN2kQn82}dvZ2+khTznToI#;pyv50i#p;X!GXZse44kV# zuARrRk5}7iZm<(W0mPZPc$s)2M}BGG14O7V$FuS3X~#y{K8x!<_(f4&=6bcMAW<{t zh_sn|a4+^^#R8ITvafP+#6qRaM_;3aY4+BnzeEA4fxn3nj~BWKnq^Ng9E073IAUmm7`;`zMnvzvd zY+GR+YYxx$I^!J6_KWI!nEvgLl4X=b(W-6yuEO-XUGGo9GkN&+t0*0^#THAru>exf zm}&K>|Ad5TZ10)4ql^aQPh;cZy;q7aIG4Vj?;KSa_|XFcYj1kuYLdU~nk2VnR)kAQ zry8l*x1P}8k-AwMV^kB`1e6(C>?10SgX;*w@cV(CVMR&R{fAL;@kZw_mA18+2s_Oc zl+?60=gE?|%AO%PhiV2CTp(q!@1hn(uT!q2=A10}Ju|jh&LVKQ0v_?=_YHx|cPdvR z6oQT!nGm#W5n1!kYT5R-z&^~GmhK}g6*c7^t5b=2!?pGw({H-XR0w7$71>(_T&ZZ- zh@Nv1tWoDQ8a;J>-F?KqZ~KaHx8D^p`<{`69S|_Dj;1qkIm~tS;jmLmMNomB_4r`% zx6oJVX$29Pw^X>|fA!V15i2Upta9%9-#hkok+g#k$+EuD)sBjb&So6M4$RLfgw>e4%KUMtNuGq7%gi z73CFUe}77EfJa0J;cHMUtB7=}Qkty!t{nllFO@DGj|Ez$o40en+`Vd$jxZTJ>^64e z5G^<~fs$=#5uF_LC;eUfYz+}+W>)1~-Q9u3g4nhCCp6~1@XBYpQH(B7P3T1k~zt_Ih z8zCYd&%!6#6|*!`uECyZzm9>sp|Z=^gh*%uTRI=J@8!zdpAe8uiweF>u83BJ@a zpk?Os>av3r2f&K(W(5;jpgS$G^8n$p4xFVDgw%7Mp1$}<{|xLRWb&T+J;{I{{7OoF zX~mYVxXlLQX0d@yT$vGZJqJSI$@XmwyUu3 z5)rg)ooJf%=BeLj=LiFH-I|0{9IykALI~;Q`WUfo;P+if;Ou$K0gx~XD%aZ`-nIC< z6}dY+y;SXWv-sg7<+V|k^x;q{(H#}h`&e_%j z0C-IR4-mDj#goJJ*d%iidG@NxkD)vrF0%_WT+}70-i?(k1^4G(8|=6l7hnZ+IWc*a z$kMHVWs+$4F#WF8=UpZ@|D;uEW1ts$aP#_)Jcack&GW8gmro~3q|7?b7TZdGny8XM z`U*O`0r&ev0yNNi8zwcrb!615w9QK4%1rxnT{-yt`*G*>PjswfsI=0u_lme-2?Mn5 ztBHrN3TIar17e1z%VP;cd=4mW$47w|TGC2Mz6Y0`^5bo976^^Q-7`8nJBO<6EtH0C zaRDKSo;G#vEL#Id*CAc)ZCqumHTQ@rrnjix|I4Yvhb3X$Bif+~Hv%YkHGQ`(PF?r{KnP-rQNSTDoyb9~3+po>O$aQb@%j8As74&`TA8bt(Fz7q) z#R#?SjrVexOx#5Zhiq8y%zex#$<2K)ND65TOsfD&I$h8c0D3Ok&;9B?K6tkV;$E(# zTj#w3En#MTvf@&Z>!;URTp{9JzI+I$VNZzLY!WoTd*bMkBV*;qjmj)1r_02)80)UN zMnW7QUB)U)85IU{=(Fvo^Mo)3g&zrZriO{1!&XO~W;@+$Y&IAT-l*{Ecg+Txd#442 zxglF%@q%KwQp|-Y{D`?pq#<9s#;7mCLo3_Xj)cpx&3DwX9Wv_b7D_`neOiu;1BG3h zh|cUCV71)zH!ua13dIVx&{uBIAVhF5(zH-{%Z4o^z|>AvRTVpT1CDE>gBQAIP{~IH z?9e>((v~IlmI{H+xHtrmjIKDLYSUZ|e9EJk{u#JLQu4XW^f>u87A@c zyL5PUEF@x*pl4p^2ci-L&$&V!cNY2NI500vV(3iFU{1Du7JXG&JuRtAEjixE#~;bW z;>=6hauCAnKAA}w8**2*#T+sBI>oYnHbJh*M~jtrT&mT3*x8g$J6U?stU`o3PKTQ; zE3gR1ZX$qwgdDi?EZy3VEO=2~!zj9LWGviy5ff4H#LdS63JO8shZR8OyBk8buQe`D zln{on@kpEP4R_r(F+plTO#KwMG&cGx$yPhY%5mUn?)IL^5#lIpwtMYtw3Z0dl zaO1%ZTbNJkF9h6Ud@U@3t+=My()27SJ?(7wJ=5F!kJErI^f5QCa z+`49w`COX^4mF;}F#Q}i*_fDoL<08tZ9Y&bu#Rh3)O$lPhP4x^A-k>h_ui|;b6(K- z@a_@7QIsrra`JSDTWBW>aYRNO8y0_4RQ_9V{DA|k2=yQofEjqVFHRYln>R3I$<{38 z#-0;4($$qyxu#!xI3`A0AA~0y5acC_-MAi2l^3!+9peK33gm7bk285b)q@rj%0J8` zoQ^}|m#Su)=7^;lm)5>&jz1Z&8JqYm%#Ig{>trNs{DNP%i9)tM3nM z`N*N?4J3mGes75IQr32Y`ZEA0fPrPF%=Vm(Cl^1mC?DnBn{|@HE{gFl_!?*3n?;OY zh+Fh(nn~44C*EGHmmoLsBk7S709S!|aVHuYoN)xGRxs4%z)}C8`ra(-KKs5p`OQIr zp-ScJnCi@q-V8sa_<7kad-f%E(e|$DrejAKd@LcEt7&~gL=!LKdo!P5gPBF5cC9;4 zoVdlkl!Qv^{*dvML}vt4a_W&LeiWBFFQX8kA7Zai7cNA)PEWre~=w#(-U>CplltEe(>L2v6aKLvGSnn4<8S;EBWepjD zzrDh7YOz_*Y|b4iW#)D2x0LL2i65W)F|dV}9f33Ge9pJM+4pihihWMLJ#Fz9$Fmjl zk{Im6@+;#BO&ZWy4_qi#Fj>5+u@hc8v>Nq<{)QIPHqo!%-Zks#{BJ66MvWBuxFjxG zsz9A#{DAA!Sl++RonC%7#!~V1uAhZ#OE20P7NnU^Z?@Jyk8dkZqmM^8r>tsPNAJjh z!`-ml@#2|(E*BMEyMhauEn`JXSyHD=WAu|!%1XCAvA3UjR@FKhxn=1EtodFL)nw+4 zF`%4z(rI3;3st#uvvK)|rZ+i9cXsvTqqf4pX)RatS6MHYPcg8GX{Djh@6+!ye-Y+V;YBmekszU}F_5gDPKX4zOK6U1kp?XOg<@H%T0swdcLRRlK=gK|_56#^pqgE||6^;61{@B=uw1b2%I*)rVhxj1zw5mEzBdfGiC%aW;*PECN#7sFZqB zZJ9q%`q^SqMx9e^t`;hoR^EjQmPFG%4_VJylEF+;Nbj1be8_o~cnnQn1(cG|Pc)tW&M8+Sp5 z;lffK<{0eQ)+^59tO0d=hNTngIh>D4-_2oAQs^rSy&|h5Vmcn1wUMVw80_hEWiR-8 zCBBTVG+lYP(cp<>ufL&~ecwpxSOsqOu*{}mPR=6~Y(CFOVhcGIK;i0&V;j7B+S#%+ zsJE0wJk%N5V(N2`rCh5vp<(TzolIZG&RpcC9mE=hhpq?wuOUWaSNqILwC!Z@)U|^> zL8v+_=_wQC=!CwsvW1gHK~tvollG==Tj6DKjq7I{m&r%9HmZ>)qj0{a;ONi5)5#^sUWh(Rulm)4bVL5DG1VL6m zK9N-*-X%E?GbvuN6>YiU{phChmw59r@l%%t7kbC?4Bh;6hM$?%a>QVdj2zTia`^~6El+_>k4!SqK>ZU4>-`re$qJ!YxNy=PKZgl7t4pIQX=S*yOUDq*l#NJU|!HQ#I$=~cdEBj9I5kV6-HoYs(?y@YJtJ`g-gpmpRNC%5MbQyPB$SsA#Er^<31%FgvwX`BB0J#9M!Yhg0B8G`qoL%FvsS%;EuRO6R%E3(j{{`+PaE z1?tjp++p;|SvTXt(};7!kPfw|2G5r~ABz+(VmcE>ntDv3wIr}e%O_06=2b~ zJryaLbew^%a(`4fok&TvSr`r8@?24(28JPDDvu-zSzHX=Rh0yjCl-%LI%ZciTTb^X1Z$|d z=#Qccux{i;Ep)Qk@Q9Dy#j82>34X&O@4Rk&+%MsNrrftzM6~P|Ii7+~DgNvy20BL) z9S<8ZQ=?Lu+?7HCmHg>jMe#ZjfBy4KA^#C`umMbsD$|d8S91 zu-YvI=z`mQW5yuCK%AL}3hex@UFW>E0L07nSZ=vR z;3tAnkFigfBi$?zMuiR~L1hpS)O3NXZXo;`d1a*~lS2-UdfV|_VzhiQcEp{IZi=8a zF}3kK-SJZM7^`zipg%X(9_FZGdv&^Glyb5)4_8mTK5au#h{a)tP zg7u7xQurU|&bd-r!en@8RpsVpwYs`GUH}$pj2BeXJmwR_$RX>vkmSr5gYqxMrCXaG zyne05yXo9gNiE*$`yvusjBD+-+FtmG3G_DU4ZMf(tFbFi{;HwBEH$i?h1W)$LkFt0mCEgv zrY?RU-^wucL67Twblrx{5_w4r>Hs|=H8qf020L5rR#ruc=X-5}Z!I$}V~nLY$XazC zxqR^L*?tKa!c?_pD8J`yz)GGH%41*8VqWu(3JUJ7=dJ&Ib4_t7L@~wCBzSABT1gr^ zGhr1n7EgZI++I8^ps!nH)f#S*%l#-Eh3d~@so&oQhn~^KkCf>c11+BdL6$V{kzS4< z^PnCU6)&RT{PGtkgpd_em(llju3?s1ViUI169sFnPerrJn1aHt4z2FB)w{J8UO-)N z=^B{n;OKh%28%5LMPOTmqxs^tps2+{x*Ly100u2JB$MY1D-(;cz3PAzEc4fL-Nwws z#HAqGi&Bn}c8+omzXNjqzT<%C+qLT5wlzUU`k{WlMcpM* zjLB;vGBRx@q|$5RsJ;xL*(-W$nVE|c#U|x|A`A9TZ5?8JnsU3isU8sS`n5T?V4SJc z*Y!Sl$96?fXdH}&n(7;$6^Fk4xZqA9V8(v!OjrK!gjI3vE`x6`DrD!#{Fj=6;=;oI z>{m5Iq(ec#ZOh-6B{9cdY$9!(A&+?>BOI*|4l2WEY<=~ z@;pGGRHS8gUAJ;HOJS9(hJ4z3dL|ZBx`JVSBG&vz4@(S#c)6VNduk+ZGoN zmWpP2%nxwSz!!{ERIHzee(4_Ifa5``;BnS%Mc`N)_bEAj>XZW_3!M%#SZbnxspjQuN;-gAm$6Ej zbzYh@r<{$qcA#Qrm`<~^XY;`)vQl_8k@-<)C8y=nDRw(u)WUo(OgX3=a5?m6a`&#C zgt+C6RojnUMtb`PZq)<=?TnQi#IDXOPr}&{8zr@y)fUA@Yhj>72rpmh0XNm|*hoHa zH9*kv^9dRN*9M31nuds=de5gPfNEyoYLvxkH~FgBU)6b}yxa%g!eg@mNIrMkRTqbf zB6-Y1s~Ma$)U4b76vvBqzj?&fw*g@Qzh6(om_ibc;J2* z-yX6m`y+>h29iwW@Mvi#D>t8Bw48gh_UF%cHYwptjzA4bNRH|Ac6X}3Es5zb9wv>- zzOL<-vBCAHDjOWsWEL`HFz#sxZ`w6}l&;)%J)Kfcl<@+F*Ks$aYKR*IlirAV(GEr; zlG|CBG$i$fxsz;vNA?jvli?twKl_Xb7=0 z)xBqzOw?{@Fl9nME;+(Cf|R9c8xun^p&8{c^!p>e90hzV1sC2|Tw}9WDuOdck18a- zcisx`o#D3*ZDUBuUTL%j?6%Ih4A}K6m7lLC99zf|d8H~)2i33lPe>0b85Y?(o%{6! zvv{%n^8@HGxxBiPwK1hh4#6GS}zE|#}gSP5x1xmYqlp68T zTdrNJ1sNMr<0*abTUus(i2+r?kx@}bY7X-rn3Ox*{Otk?qLX$0(Hm|qiee=RqbmQ< ztyC&$)o5jQNCu>DHWRPiKCq^Qj%upE5Cm=0Pf)wbaIPMwOd&h`9{|cG;?&??U8OA; zQz-S|#7cx4kUc`jWC!<~3eso{Jo>YB1yxqxB$8=^veN>;h?N*|-N_dg6I8;p+Ks3> zI200Pb6hBMH^4@yfLo0=E0y}=^TU40eqNkq>!Wem05#%0`FGapuAFJKoFh%PPJzx0FGRHwz%|v8@^2*|%u4 z9!ll};VLRv|D(5z^PJQ*XN`RfF`y6l7|JUNIw7n;-Mt_3($zya zuWv958cX*@pEgArLRFOy>u~bC7SYw!9q#luhsDp0bw6kE>v=;5^v_7%jNTE%QsZo| z%+@C_hfWLM(vBxq!CB4Co4QeEr&&{#eZ2DfxW%Y>MHB!5Ct;f97ZcJOe`pFWD=!ya zs^(`l&-cZ;ak-;wKSY||Jy3Y8r5ks`ZZj;5F?)JTI((Yp?|*Kt?fzA3>p1cN@;TJe z?a*HmqTS3oq<#|+p@Z(W|*2cfq=2Y{4jF}%taGk&!9b_(G9er68E zTWOeo;iib}^Go~INALCqfDz5R*}jXNzTd=d+M2ae0DB^6TJs|GTqsRJ;I$IEG}#E$ zaNg?0*Z;R)m_S2&@a(JjTSfV{t{tcmfT-YmwG{mM-_}X|d@;Up%o{X7sYwEWz|;0* z1we%gaIb-76U#K-jt$rT`z-%P-un1)*oBMoSEM8Hd;H*D zB7Z~G;~mJ~AK|pG@mAn}O#iTVRsKT;&HtnC_LYS>(XZnh8i$|X)J#kapm7{uB^83< z6X+tb5}qyQldRdVmw(yVA_nfU>fhECWPizAC3+g@8w1eJTSQvq9XppGwnbE8MOD?o zzu5QtFwt#MDTTmLW9y2#DOJ61+EpV`weE}N=H_~DY(CIt z)SYkQ&eD7ddLc#mnkn_?f85La)pTGBT=uARU26ncKZH;}CAh)>M)C}tW7^f{J)@r5aHHQPYkvm6``Tk ziM30#?(+P0(~iNd*V-U&@*2JOei2C+FaSd-z-Woik7KS~8d7hNOMz7z*4EZ;L5 z$q95v1Fn07!-*`m@{C+?WE!DzeNC%<^LVtA^J$E;997HwtC+eJDfgO+J2lQIOG*gk z3R-kSk^%5ccpf}3g}X(`vHZB$x6FVftOo2;t{1+a)o49UNsYBjF&Tc_5Z}on(X7wP zQ=uBn0AQqX z7(O%eZKsHMPa~8Z2a9mY18FvPb-<_yj9TL77pl2}sYqXcw3(*V-=vyb{hUrl0VRL$ z?QI2W^%Vd4^9FT+c_Yu%6cM%Vc!dQof;`^~I%_-Be)_it3czKC8crHwbiQ885F1ArL;tVi}f)ln}V++j{mrMnQq`Y`}Vp9RPx&ZB1-3`FN8*xa%IrXJ? zy1DlFLWB9`Zsh~!f|;R?W!64m1`d-na&c(@(^M7|eV0a*;o4i#v|2!XTwJju^0rEi zgX53De&4=AHgG&GbEhv49)dC{cs2){bFoUSj%I-bUJX-euN@62zQfX)E1%uW1QPiw zW1?0gL^(XEL_h2-W^Gj<6jmmn+0}WpnNmny9z&v+o7m)7YeVc$gAWIB4F@F^cDvv!4)3m9HB z@Fi=>Rq&v^FEZa#t$Et7DWx?6rXP6Y<50sb&~gD}vv<=ziVZp5QXLWt?f|)}*NvvgYA4 z|DrSo6I2MNlC-xbzyJf>Zu&-*@^-gWYw6d3joo@D;mo_@5tok1u%-bYD znhG$~?T90c?6kslA(K)-pO9Xfzg0;2&Q_#>j>j$a;^*xquUutEP)kikAdMNSzV05p zM7{1}1Vh!jdtgR-N-7#lwUu^a>hsC{!Xeb${3*&oiB-y|l4YQZiCis0eJ>=#wFvVF zt#sVB+c#~Sot+K)%R4p>6F{ZOR(}`a4&QKimAkxr573mgwg8GwT>yCH$G5VxuLioC zsw-aFIU#6KYYbp0*0tTz1G2d6rcomn8e6%YF)c0;vMuDc_kI#HfE`J*j&uy$?pqe+ zVMirqECJ1{vELgGWdFD6JdS}S)@?c6lQ2DC!FRiHp4ptb%A!5KA`AqUaY-+7!O>B; z{QAu1`V13HC^!o7b!Ak?h}76rf2qQ?_MOX~p=Wx??{5NPOlhery~D(NU%0Gy$1e=v zxv7UMfJ_DIxi@cxF>+*|`>}cUUG4#|LiSt)VJrwde*B~c0zo5N9dhr#f%@LyM-cz* zfBnxzwR`XI|LjnopGOEuhoArRPbXZQ9PQBx&z&o8cFPv}O?)U#Ham3pNFox{go(<4f+(I1TgP zv=-tjNa4{K>4+3EFg6BL2_3*Vk9xZa9~^|bPlAEo5EvevCQCPMog;pVt1iPdE`y;x z!%m!M@@xBq1Y6i;;IW+9WKv}e#>s$Gp_Wqc@E=~=EuDONb~XTS zwL6rmP0a}nU29^Ma?EzK2&`8J-$5LF7-2nXQ&6K4o`c4uz@*CloTm)a)6or2HZ<}J z3DqLU#^gXvC=2BV zbnB+gUJ>_OzK24)1n;H9w6$`p3@mJ?fj_PSZOuZBrNe5IPKuGf!_hx9VH!Uxp!KDrw z0EcMRo!mOolGLrEjO_hi+|TgI@~a-)WE$ECKFZH0a-+-WB5YPQh7-f5q-0fMC}?!$ z$^#B-v5ObGeKk?wxVN<_HL?szx2Tl_J80*djm>u7w9mhP((G-3-vHAc`0|Su78U}y zSgK_~r1uYAy~h!ekuX=(2=b(j;WwY-j~D)j>DkM_QeKX}ea!{gKWDo!Qm(?iigv%c zAt4GuM}2UCg?dxUX`7t5A^5F~rf^x3!cfstmR2wm?XQqwUaft|Q{$qa(u$!i3LiGx znJ_@sJrn7JqglW}rV^xfPi?X3!n-YoJv#vmG1FOM;9GB5Z~ zlS%)lvcmg0=5dlY#VkYgq%@Nj64aM3p#qizWBBFeC(`zwsFz!5kp=r1 z4vrjwcJq>cyPo0P8<}SOQ3#x@h4kIrNl<#R+(uc&4Hi`s>Sq)oLVI_Ufi}?U-Y3|o zMn5;lP^~M}?S^eh_mzW3oHrtD0|4A>Z6!5k?Opg1U!8RXDBJvfKHb`-?T(SR`;~`E zuDdr<+myUte*y0aya4*Qkol&GP2+;GqN0Q6m6Xn(=c(Jv!}bj+ft`3C#INPFNMhL8 z2p5_Wic2tbIx%pS`yKZnH~eiZKws~CP@!LTUr5;dptp8uMcNYc^dtM0PJXjdUayxg ziv(Wkzt2PayMeruev#lwr?0g8AqHlPw^P^|vZ`^}IN7*7nBfYkVp3dIZrr7!0w_E1 zh1LAPv7?00Prh<^z7l@mRDH+&j&Mo~%)?IT>_N|Hy<_>d4))g9zg!|lJKmXFg}zi$ zjxhSV5Av3eskwgXmoIlAxHaHT;O#y%J^YIpnL&%?m7r77zZEb5WUb>_PrW__W z-%GdL*gDp`ZRI#HdW^Ik8CpdYzR8xy=KjV6z)VI>0iD_G$kMN4{JJs-uL-{O1Ar+0 z*JpCiQvAN=_qGH0D+c@VjPiS2!@XXR28?1cn^mb^AIZBOdicJVxi6VfVPne*TM)B* zt9hHGy&x#yT%wq(zA}(kY-xx2}PXQy9qch zaA2g)fI3r60Eu4Eq-uS+tfAB#JUVN;OlF2hsrd^QV*y`Q3f2aR6?_0fYMCO_s<@XG z-VeJ90<-mmKmr2r>anqji3Q=I(iX4W8@2TECku<<=v<&^@x|~c3 zVR2yEC2hhP$YF4a&D3X&&HjV>K1(CWUM|S64^65{1AsTK!<%XJeY8(|urKG{#bux& ztuujE6D-i?wZvw{ob&2u@I7$5;u@RgaUK}FO!O$jZ>1nqrCDP7+r1PAk8(8?e2wO# z9p>eGaoqiHCIryVc3#^|sVK_4oxva+&cFuRN+}M=x#K@=PYct|4zl71cLX;ozYP54O=7JjJD$7>S^`iKeC^I!M?f5gsagL6XuJ;Lb~@ut>Jq~!~IPR zC_Nd^u8QyTOZ?Y`%RcC&B~lc0pK^R!eO|2%ESQ+SURGCFf6GLoZn@*pl&Pw&B!Vs1 zo83F8#nOV(Vt{Nu9DWF)C!O}kAEx|LkQlkcR+qm$`+w{M!J&e`p8Y@eLCt4#67m5d zXMAHcrpX(iyOoC>J#;F;v4k!{{%yjNSD;Y~FI~EYmn!vA&*kOZO(R|}Rtw$0bCIgs zL-JItgolX2)Q`OgFw25Qm{A~wTRCH^++EH#DBF~%%`WtM6igrc>oB5ypeLJaKOaW4 z4Z5*SKuDT3cIvu;&v6;P_l)P6+%#w08uh5V^WwNcJth4psZGfqo2b2g z1<0JRU>n+DQnvp0ou~}D-M)%{EjL=>IuqpLJ7B5~7Ppd!eXul@<1uQUO;cqEQo2Le z!;umjBX~f1jYbPiQR)u`N%nHttFS7uqHgkbc25+d@{OW(KFd6fdM`TP5mE63(C6vzUCn*mwrp)1<83)wX~^($d1q#>>lS2OpPz-zS; z`n?2p2z}H2pJPR{LhT=6A(PRkwc&Wb|5(>1_){B}ZK1||=3%V#92~qFCB4GHiMkN9x13oIpu53%0BG=`J8Qrk+VZJw-0o1hYoG+9L+M+d2em=K z41{etagH>C5+i+?qGrcwrTL6d+8_%9<0#NVdwcD&^0$bYypqa90x#8`J#FjuY=}ou=Za>E zlf|YJ{Mx@g8+nKJ&Ox^a=cE7$F*5Sh^wcN8y>FFC{f0%VlV{jBdH!FwU}hLls1hT0 z&1MRBfsI$j&22F`#v{&PVwlIr~g1jXry83nMF*YBnUe6QS0Ki2qH zVP;h|#OXt)EkSqh)_zM7-ri{39G{A76fo14r^Z*_*R^qrYF69rExh%1hY96ouV8Bc@X`X(e|}19 z!JY!OlR99k{ZC{)O$AxFnm2{UA3oUgxRCxzyZYp-dz1nfj7--^R~2@@WVno}IOcfI zFi7Exl(tQ2owlxNjz7#)`0XLl|B1MAi1L4WC4;(V11q{afCb549h=eYNg;TsPrG*e z<=GcA1&Sa4`N;fN$zM}D3qJ1Kqdy@r@%jtw?E1-{iY5OhKHcQBD%bnDHuhhbMgISV zNZ@iGw@eKI6Nv}jr|<|!)<)4EqhAA}m-!hPu5o{mQPul&a%&2EOU84-pSZY$j51EZTfUu7Qx7G>M#E=z4rYg>mQBns{+va{+HLvKqTogbAn* zbs#RcPly8K1vhur`1kUE67hOw$Hr^&5s9&Hx6_iR!SD8F>o|V<(GJk_8sz2WvC7${ z`Yug^{D zTO#(3f_kL`^`Wr$^yQ-Ry3HdNUxGaR@r%&j#B^baActfyQhWq}XT3)6Z+cQJC?x<1 zKx9I&qzP|F`gn}zMLctJZT@p4U#o{@JUnDM3C!<+W3E@*^>_ns-*~NS3BygGqI6N% zoyic!w~j3VqQoxp;S(!bE{QlYRKPr3Qq(H9^v1|}q_l>3&h(n}61)rMN%CAor{K%8 z20>x$9yQCeptFoo zO6d#4x?&to91JcOYWzySI96>I8Vu+R)a}8I>)1|%F2J4tqR31tYElf;CWp)|VX-<^ z8Fjp7wG{v0xVTS-ea-8cEv*AZzS4XOy+tv5dzW?P)VE!CXXM*__q@K19#HvATcG)aon8_$O4S6=bA0W)@}^8egDS zTat)-arxxS7cbI-h}})c)IXsY@+%);FpIIl@J3s^NvxP(|D~PD&#k>L56T?<+Or#| z*}MeSRV;ZC)%AA&BOl>v+UhZ4s^H9zKK!tw+fF*vz9{Vmn^<&}@gGkrmw+7)T&c`j^Iv}*>$#CPI`8Man0)mGzXoBl5pTS0-Z%## z+qhlni6@Uo_BCYGrqA8=Q(Aibzh1ao#~$!l;!ZPhzki>0#|+ht1oBEJ<^6cn}XZr9*U%c zU#$_?Q);1id#r{eZj>%1{-?$S8m)Dq;`Um)|6|@6N4#Tg{Smp*Q{!KD;-a9}u3Zy4 z0F7XCZ@Ov!d#1Wmefl51Q`E*ynfw61?T_JWQ~h;%#ivf6{zzSDe3|s(qP1#_!iX`L z#E{E>(Ip`{nRRC}4dJV1Yn%7J?fngNmxt?xOXw<}oDc@zg-`rROd=xO1D~$;9M;2b zQ+h_%rsmHkCMD&TV-6aI5 z6<{$Fal2omQA^0fr)YOeAfKUeF&ZCp`p~To;_|)3QU$~sG4`BlS7EI6pYrloEkkDg zHv*_E0SFTfjncTp#E%JrDbEG}K=`^Q_P@So;g^+gM_5rZPLYX2R97{IRX~Tsy3|rn z|2NuDilS^Qf2^iIflC;VeNZ?nZuGyXd(Wt*+OA#nQABKrqJVViQbX@W1?doauhP3T zLyw|VfzW#q>0NpU73ocSCjy~_76>Jj5ccBvzSsA8&-unUf6g9b@AXG?#IV-7?|aT` zUh|rB&N_f34+3U=QrQQcHd=vfn2%yhL6+YOlT(bL+fjnhh98MQ#aiFN4{OhgpZwcB zWJ?B-R3eHkEwjDl3t~^?4E5bRHGQWZKLH!cX)d$M-j5^~Fu8OP^!oJ%IgtbN+grNK ztpjw#Y`~fpij0|j^;CIpH8weB1JXU zRfYX7DH0U0yq-_jIe)UdOG8XS%7zQT(UX=~lRbVcKQp4Se<&=eD=E-^7t|mF5QM8h z*PbVaQPmKYYZ7*O<-2-#0%m1*i9A)VwN4)#pep_8Ax&v4rpZn=n1KP-|!Lr<%);1?z2SZ0NKJT^7<*r1p; z%Ueo909cvY<3&Js)w|RYVuc(vwe0LOwHnxPbvt*$Y)lne@iPrOAp80&;u^F37&MtP z7~sXI7kLsZ8%1MO`{T0$_+4X@>yF}`^4SO0;6xx_t<)aXcjBI(RnW(}b#&{gQba+4 ztmc>ru!?$u!0(G)FDogF!xK5zK1&6mpeIpYzV&fpfH(}IXeKVBocWLK}Ml4pQ2NpOfy9=vIpedS)J0MoM*`j8y(YVEU_GT|EqjYp%)F z>SI^+li+?P@8{7~cAJYT@g17U5w!}zeN?#1 z_U2GnU7$Tdu5Ud}mslYaun5nL<{`tTjO-5eEyrh{x+a4Y;vXw%dnW7;Fv@qTgF&Q^ z4L`J=Oskza_Aj>jB8G_;8KAx=;Huwgpw$YZ1(5Htr9zhEMI=N`$yA!0w3 zn^8%;N@dS9XwLuUHQSWRkeQjY_^5Pwk$@IEJncpW7$pa!!yS&ay0-i^?~x~OrzGlo zG>65D``KX<`_b_88tD~BkDz{G^TA;F4v%toQ@zymi&`#bg~&%`6B zBn?0w3tgn@Gan#=OogzuNA++!& zZ8rg1L6#|6#}&kI=RR2|tRA-e(TtRQM!nnVz=L<3ce}GYc+|$1J4n~Z2XX=WJbEi~ zayw(ODwSae-*7(#Gd4M4WLMsbdAmEl`mu%#vnc8bS}i+6MHpQ-bnaFj z4sB+lqYHCAd~6%#Wsgd|er3p`yuuatA9lzgy-pC1hI`LZ0bKi=2CC`YuYi7k0bOG{ zlN49G*2#DI&p|`QX462#)qpk6lZDU;{F-;enn0CoSIqhf9#-VBYTzxKs zRi;Vu`!TOmxC6qSu`i!&%;dmI4QhXQ5!BxJPiO4lLO7Y*xIQG+%S2{cl(kZ?<~7jw zdI@MS`TsERx+bm};akv2REGs3R9!<$>zSTmcq&-hu9(l&L?JEzi#V62MPXF$BA>di!EFLC~g4^Az4wstc-1*^hEPZT`N1$Iy3QxcYM2PZ-Vm zD4yG-)zpt3Hj*Abm`O{p5_d>%_;|yA#S=e@3dX(zsx4P|RIoXao1`md@u|0>oiio` zhkJ)zL6X6aGz2>yKYS)`#)b7o1w0QhY*ViAeiw0=x|irz%S%_zb8$qi8D6 zk;~v*&*W2@|D`?gqae$_us~Vb==f+qLF)2o(E=$k8AQSB)qoS1H9g)EQ^VB#^x=1Q z-mS9UL7}(Z!RcCh*=rV{$jGSh$}~wh6!jrCb5aF|+c1q%xu$iQI+W>Ishs6g?&WLI z=OT151I2AAG&A%Rx~B5&v|>j~cDig9!G|3IqU>6>EQk2>mwhv#yV}igo57%JP|T(8fi$_4b5-T5pHox1 zyA2nT-k2Tm(aNqEt=R{zhyS`_-e1A=aryu(QtTA&>(YxV3E-=n7WDe9w*Ll_BsK3H zCw>8wi!6&gn1$jv0gtH*v>&s{YuPU3&_~Aw;9yWmE72N@#LwX=)61=A{C=v*+^_sv{5zX7>MOMoXAUkg5r@0ztjo4iT3fZ&%|6Nq-G8tD zkR#2t7j#4vP{1NlK`;MeXv?M9xfM?m1oQOQ#|+U8H74}@Z25!;0rRNxF*|L+$axrGgCPzW$O z@!p1z9$*}`q_PA(gAaSQIn(MLYnotBj=Wcn>J_~8Q^qGJ`v8rYRsW>QS?yhGWBT)I zOfL{Pv57&PlX{}4^V}85PJO_|Wee2#Y!jKa(Sj3?-ul0b}OuY;2MqwJDmc_joZL;oZyYOB8{oSajxHxu4tX@G? zHA)#k_vNi;vaGBkUre)Hha4sBtBSZYb}ld%+Vo;FSFsKgC#N(HT_ERSfy``e&ihhj zB*d{S#51v#wF{MRf}mJSbSj7;5J=-`u-GAFT*&RGEjh3lA4BAx7^?Dn_B8^U9EPl+ zi;dhaRepPBW=7=06kG8@`7JKrV=M9s;y=%pF*6MyT#sPNH_pK)H){AY1@56bZPK;% z_3425WKk`9qt)n5s7Y+-fZ!2@{f@Lr!kR+ggOJjQ5O;3}=54Kl)2K(qf1ovX#p!m^PV*Tc~ zwTlJ7jJDJM512j)%mSOs1>6DM4uOxtM>sDK{tXvVl!zuu&{SmAQD-+(-$&QD)q(ec3PTuBtWI=fJ6e$GWnuzdrQYA)j{!@1!@UyzocE1^t}An9Pa+^ttD)f|bLpSk zpx4*me+WM^XEOkS!85JC1(mNX*UO#got=rPNt_(*(!YLvBNAP0Dx`ItLLElMI>|s^ zh8!zs$L@e&{pXiy>@aa`%?*5f@(STy>;WjA-m~RNf9v|1Adm|cth)_p(+n|^33NUZ^ zg%I<78h#@wS_%8o8#nPci=4g>4ys&a;WWWghK9NfKo#`pSoiVfgj}HXOBK*0pVQ_& zk;@hlA5+dHUs~_&7v3i$QvDWSthR|4lGbfct=w;a)u!tATm|F#w*Xc#AFYc1u;JyA zGQ)h}F~wzFU9Zmb1qbMw0_hO>e`CM}Tf`kI5gOfDxF)A0 zmwHQUs}<0Ypeu3h{lhFCp8YEKb-{(-wqT_lY5!Z4-yFbyzSE;cGJ^r|={;8FeKU!+ zG8`Nn@xTCY!RhGRoV7$pDRTDzKIn-Na-k!sgGr1ol(n+nrft zliQoECvj>8msivbwWv(rVkyUp{>o1##-8sV8oR1B`(sx2iT09yM|1D*nL!`@`wm49 zKGRhfetoW}q$t80_wPdw{ABbJ{J{`YFI?YzO_f z!&emo^xMQVizedY;t`zVsrL5vj8d1;kIBd=4NLi^3aWYHLX9Z?2@wlLn3(e9RaNWa z6B5V`3kAG=d=!n00)d_1G5tCNK$c?z|NPY>cW6l*6t)3irdw?6xHfF(0?(O49+`Pz~0A8T{rb>l{m zxGh#i2H}Riz{_nY?D3iu{!T}yVPIe{E>;ViKe=NeF2NVO?U)zHiE5Z%t665{zgcJAN?g6Cub*H z(7E8%syDj?=!Z1zyGiyH^fP7wxC^%xiAirS@34@N{ai_~3*h%i4MFvcS6`CU6_YZF zPY}}6^Drl`s%mttM_Kum?-khX+H#J&Yi-IQ;d2-(;rq6K2_T`wWat^h`XZ;`%u^=r zarXdA4unUmeeC5)A5U&vp{xt)v4X*P%JNoiu2lD+LqOjiMa7zUagHH}KmW4?<8{J( zynIZJaUMIdbcS_~@fpo^l!mYl4xg)-GJ;m<74K-X(%WN65e+w)_O+E227qobl^D{5 z$CPhbuta2VT!F6~D>TC}}#`tMRl;jH%ie{2#EAt<0ebyHw}P#6zcWpSmw*e8_mU z&Tu#O;|b%%nYQlyQyUn00VS7MjSC1s(Xl5dH};Q@6M=wbEb?=0ZS9q}jCdkxux8{{ zgfr^jj|t$bi0e(crcvWx?I=O~H02Q}mgiwtzoJaNYphlR!_3U9Ww=GKkqBppS7#uE zD22x+HrEHyn<;TFvnrZu;&zIIHb;p`zQvS#Qb?WK{xB zPEUJJT;mRXyP{H_^6*rm7ch4b!D;2=Fw-Ou2^b7KU~ZmgTuvSn8qIzgTaf9kMdQC* z!HYQ{vKls-J1Bni$53V54ki>)0(A4M?JjN1msgZO-1m}1s{J{+@SD)wx2kbN85*k? zZFU6hrJpj5?=szt4bO`4dFZX8qM{gl_&}VlDfC6A-x-P*`aP+ugfq`55xSq>f?M0- zb(wjsFX-~#ku^Me!x?x3M~gw|#M!pJ^I=3;UUMhVc{Mq>n~F@(@iU`tX$(yAOoQ~I z=J_IYOQ<(eKH>ZAF3ME$D`TQ6-JnqCFr_+KVVdI-`W2Y$CK3;khc#{gqSGLL6nDg5 zTcTSrUeUqpiO0^)&X*bHI9p?a#o{3{n`xuPF5hH*3<>IOsaR7qH_zU`|Hl}8;nTG2 zwiWSfcn*}3=4ROd5UZ++IBfd?Yh`8S^3qaXL3~i^ECtY*CJ9G5>7vI%l0f3NH3;dg z2DdZDv8afde^yZ1kJ_g1U^ZnJns=+mE)gXuT7TUS%--9(xdO7XDo1#Gg~Rs8Ie59B zGuDI{1*5*L%;Jc5me?0@|9&_iec6DL&-)hfp z%2(VRhg!R|oQAiYPL}utct#wzoDUHovM#;!H8eyiQBVIolqeqw8JS}9+eKmu`&6@u z!9l`H765fONs_+o_;J+!F0Xw+(9wt;vgltejze^y&x==MCu^IH-XgWH77wPJB`QOI zH>r2NJ}b^9)7wC2!mw_%f8h&~_s4l|uyS>(zf>RJu!QUOK4J{q-%S}wmiFIkWw)0hO!v1 z0vwNxU+S{=X`Na={PDyahihv(EH7Tr#l~b7(ISnTi`KtaS#6QtzJEWaS~ioDW|w9Y z&a;eK2W?GNB<_gSDZKbjhXj=7_kd(nMbZo_)5M8vOi4w{3mWee)}qqV=wZoAz5A=H zp#ZOUA;H-#CobW@2t`rMn5uCR3%n$L8JTp?yl9MZj1qb(zZP0fZ1fd8xv3ik%sP@Q z&S~!Ld1fI!F5HON)aU)W2oWHIz55(Z5ZN9UY82HDefOA*gnXESMVL_rD4vb9DM)1K zUg^Ot-xL92!iBvj6FdeB4+yAy3k^#ExULFx>UowS1{oP6yK(bI@AkpEyrpIQm7{Z< zZcr^QDakX1zrUj4G-Abi-{C%3o&2iyz_U4ie^F(Br0BgJV`H_6aL)w>wD$G)S?>K_ zA||5*l}usF3%)Y_*$DOyKYskk`}~cc@u6qk@#ozdUrgp`tx_uBUbLjnFtoWg)#tm- zr#_p%+&vy&A=hOc*_~Ca?`_^rBO+@*SU2my5W^a(&EK%?2C)w zp3g(h8d{``yCKj-flWfdGkmI;tw4;#S?XP8kBU44Cqi-Aj%++_>`8A^Hafdq>NBO7OVg6zZ6cQnxM`VW{p(X+Z(N>DgY&;} zRjopXXkOp(j^Pn}a#JrONIjLBA;)TX(U^r}i-eklfrcit3X)Z%deY?JJQ|;+}%A=BeVQNagjQ7FA(a{fP>ij?b4HZ=eT7hEjky=@gl~rR1 zR$Zq;pA{zl*6(*)>px6}y;5p3&lm+QEp6jU-8myOv(99hX^G8GrLENR%K%)bO$0<_ zSbMW6tuLiZS(Q&ohAVPxK#+Q}XFwu3ry!yBV{m^rjkiGsR9{`4>-K(_+Y)|~y!@L& zzw@0b;K(wsWYo3)`(PKAxmu^rbiFj*1S<1?E_6sQFE9Tb9F%UmiU;>{E)|T8SGviY z6pbO8uBrO;qE2rtt@~3YwqB=*yEfe*AlO=ewb`{dG3CB`K?D~OB;owy%0`es-L%YP zWC&a#kTGa;)9kEhZ@-wWxWkptqZ8EjN4v3B1~aolFMzu z9_rnU0FbT6tl2;Dy#R|G#b}|G$XGJcS)Aq15d(e@-7ya<`}TZn)NuUF&)+}8U@hW* zg4>@zKLs`ph=zuyi_~7lwr5~iim|h^vx{_V;0SpY4<`Lhko@bLzwp8SU#R|JjMD3W z-V^ZH|3l32f6_KPK`fKE8sNm+$`G9(Ub? zKTG+ru?^5e^!`t8X)88XgOP>B5NQ1!@edM0rhFJ6ensC#BmxxalfMHgcVPQ`5xjL3 z3;ko^od`GUGEzLF|1*A(LB#&Y_5k(?SJ?kAAL%W#P=1-QF9Z-INC2-KTU%W@3NE@A z_h|+MGBXT-q!YvWT#;rmK<7+&$zE;Hf|qJ)(H}$V$%u%EEbcyhIH9Yl$#8la0$BX% z>6bDHsJ38Oxd=LA&V)6OXuZbP%g+aubTQa>M5Z$A_otWyC!YVt-=s z!j!KsyKb!iwH|oo?RzNJ(&ldJ_D@~Ky?%j>x%#i{WB@3r{H zi{=6C%{_L#ecNr%tbBia+zsJHHDk)<7m)7am3kvjne?(pa(U^|mt9Ad?E+0*c7fF( zjnrn1aL7Ph6;`F|u7~Kp^ExLmLw0#(2Qb<+%WJ@dZobXU&D{Ka**^BZyxbZYHa7MM z-5wI|jHfm{kx%7-HxfOZvtq<_sUlPU-b#<&{!Tt!0U7XUr0#8POv~8}aJzr5lybU) z(?5j}0DjF@_s$Q=*STSSCYk&nGk zPc?MjYDuL6+^MFx#9UZKMV8FRkgcw8Y{zL=i3c0cf8CCa-de97t2_Nw!iy=4CZM2n|zdgAf zO3u+K)-EV1?WAw%Yni-Y3CBr{wi@PNWhx>lh17u-26ROk&Ec^6dLO3>lx)@#&sJeB6lR;%^H_i)51vxa9!^6{Vh zcv;2Ww4yR@qRM%#fBbb@vP#8GF~Z5?`rq$1Rfv!W{p3ZahI2}y16vtbZMAv!C2JYK zV66x)R&dQPMYy5;P<}6N!4RV;)2_fv-zi;njF=Ex)XXV5T{{ER|R(9_lc#vNtN`*ecJ%;33CM zn?;G1Rzp{WMAfGkTpo5c;*+u8L7@fEiHp{H7-?t@`UT*_Z_XZP8F#=NPGEvUg1Gq z9Y^-K9D!@2T->UP7!Z%-9F_)(pmjUm*st1a#_uAH*>@#%lzRBmh6!)4b$U#?;A99Osrc*TL7$Y zmm+8tTxwKnR6uTP>vOndV>y~@SLHLvMt%P}-iL7!KUvwRC!BEq%gFuSH7oB7oidY= zP4kh&G0qI`0)lF51MT*Ne_*;$54 znt(J{_OxRM)4NeUl-Ln+*K&Pmr%Om=eOV-_f2SU(g|!o7#F#}fmIy@nn$TPr7f9D}aLHym_Ak<15J2H(kvVnw8b=xY)9!NB0=H4&*fxw(-uH#C)?B z6K7fr#PjPQyYrn(hbtaR4ByZuk}K&4Rz1I$&al9cyiR+%Ho99OkcCq@AYiR;3y2It zov!#pCxQ-~7kE7>M#r%-7u@~}0z&<*kV}17-~s>=_siF8Qdk`9^OTduJsOvq=BxnG zrG~zK_VN$G6_Oa%xZxOJiqXrEEJC)$dVG~(C?qB<(76y#o`hebAlkPe< zP?I4d7vxqKhwLOxmP}TGJh2mQyU}g2BiVX7E`1W&FWq_~D6Je>g!HSd3o7$cJX7eC zz?2+Ir%G0u?CAlz%L`6TMk2n&_ckb4s2P^?++jH0&yXswZ9@j%UtTkiSY=cO+xu0i z{?vRG4umk-y{zLJS@Og}f$`xa7`cEBzydHSt-=(w+j1KF))P;e5U{!MvvVi?A=~gZ z73m0Z*!7BZJWCKp+RjR$;KT*}W&rZcc@?a@ab}6i zq6aeSTTu<#+A1yyLhDYzWuJ5q!TUg;pz*V*6e7jWMqW|f z`Gb_&`BwR--hCL z5(A3}_P>40*4Iz*J33lN_ZHAnmGrpSWzu9WuWy0cB=aA-)Cr$SI;5g! z&IY#NtB7sgpuPi7hGNJsWC1GgON$Yg8h%?zdCTI7tmau* z7j~waYcdPZPb#2$1UP=m41aaQe1&&cZO2~?!}~$4214j1tmxhIO_WVjfXaK+cc9F5 zFd?guHXo|fjjg=Xp_GWr&8lb0%*<>eW|r-gATF9dO%XpmgoC}rG02-M6Qq;bC5M|6 zD$9`tTsirQ;*Yv}5Y8%%!!avoJ%=&m%W>86i_oRp-fN94h+f^b{-IPoa^ZmVk4v2G zZLFYnbZN^(szOvrF!;+D6&jRyiVp0+=iMx=Ud?WoqyuE6X=TcmDW>*Ho2Ys#k&cP+ zaW8pvs9o$#u&S${tSNG02KTru=6x|XwccbAcgUyz)hlGS%5bgqsI=vzt9$gsdS4!y zKBGHI98tja7hy2aCMBbsO#gU`I6=-&FVh#t+S@nF1d!T^N?c8&UN&2wwtuMy6Hzh% zjpumk^vH;b*;ZFc&~PEsI2&!;^6%^qXqZh_v>ff_r>lPdUP8adIq|_7gB0*tWybpc7Df@3y4_74}0)@C}rC{`Zwr45D>)yW%ose97%I7 zjdD#M3S*k+46zIGqVjTeA(VV0Zq{&ho99t(aq&xx)(uO3wG^N4+nyjR%z4tj5I<-sPy%<7cy+Idp(FIvf9>_^`3<^1G zH$QFr4y5+8Tcq7KqAj*ugKh+E*vXELH3Oa$WKrVu91MklT%V@-~r6JWbZ0q#sqH)AKc(fswPd+3#RSH#Reo4)9sECKWD2 zK}jVq-!>vvX={8I{#tsy_j!qJz*D=zG`m7&GSj$Qb|7V^(t279#uKVChakkj1-}L*#Rx;2%V~qGv>qy(Y z5sbnSKYm;qu*C+@0b+tLAHlsF&oZ!B>DV~m=PuPZr^uHcM>=GTw_ltS#$u$re^kcYp$pwbX<1ne z&^Gi+I|R&eLEh}nmX_VDm3Y8K=_Z>0ko)z?Gf!i@)m7chBp0#(PSb#+#*e|ly+C`I z!^cX(A-3Z+q^jT00zaj5SD);JJ0zr!>%-Zj6HyCEd1J?b5E@JkLsj}lu1fUt?#Ouo z1rP0H;MB2-pLig=RH=2)N6R-1Ti_kn2wTHz@2xOf-up_^AHb1$EX(l(FLYs+>!OY0v` zQh4&RWJsN&7uF2x7ZX_}9fa<7>0nadIj{9*Bkx*i%qWY5{!8|^ zeM90Fn}!DgjUnKHuczc`)UN`l>PGbK5r8=pHc2qUVYMp)h(!T@Ab`|=*$|RiU0c_;}#|a#~}(sD@raR#o&AOM9R8+1U8G z?4d$Z6itYwQl0nDP}oll1}eKlY4X(b%+&a64S zM=s)5bB{t;29?!{hF-w=QHB?yH{$02gj2rTO{u*|kDCryX^1whM-Cvakeme_ke@QlqyhamI$~ zyr>k0xY3KK^;aIEC4FnVC6*03cF;RbMbtol$dE%`E;6ah z4kc$F3q1G3(&xHQsS?|J!=}!95TBS84U*gYgb3tc7xzg8Cy!p5Sb$<;6uK_HBYk<= zAfJEUI14j1EA~(TKY*;5OzNo&3gPvIr!4pu31dEC*V`|3taLT?JqBmto{jf&_|!w9 zGg?ij^q*#Zk_E@x4U9FX9`xKwdRCyLtxZjtViz#n>CQE#*)IF4yRfQz$RbB#|4d|T zXv!6m8h&7Bx8QNgZ4@+-!e_1ECF4sX=bvjcY~prkJ99f@V9?tW(WyS98mjcm+kbq; zO!muwvxKKUCEWWoPgmh9TZL){uja*~@|E_DitAals||R3e zb7On^VkV#`U3uds^OdZb^4XO-&hp#4?`O|J)3+a?mb68hmjWgk7W*|NzGvgM) zXZpi(DXhA#*_Ug#@L3NPJ3=K6Fh7g02dAvsxKiWL>v9t=xy6B9H2xWHTHh=<^1w1* ze!rJfp!NdV;aU8R&nkHTe#|}P+f$7tef)_VCMB2>%!+$dFhNA!4_nR_m-QiPQ3eQx z8nZQ!&j1KDJ=XXo%9<kKM|0^<+{3)xSN3F=P&eBUJmTB(BP z$o8$frd@Cdz{}_tP@AO?y?giVuZwxK$p_!W+8VgV1?oHQHwKoNLgBS9blef+8_Bdy z&_;G1Jy z5m2l2dAM_coUAYXE-=fcxs6`Gty<=KQ$%zM-b0vQF1#i?mQ;;iOy`UBupIAjqarir z6A4T@&1xr~gZ1_fU7QU#$)d>c>|UeWn!LhZPeR$2kW#r^>Am!~Wdq+ryT9PY8o!Iu zozRGH1y>OXcBI=6+IAB;%Na+Lo&+w`;M5RWl}aT zZ&P+9Xy>XM}ocaCEWFo{^3{+O9=YS}AsScyAJgkqet-bog(kTPb)aix%jb zu;C5n7|?h1FzKo$4g4nV1+vstiXWKVM-%So32xYkc%$=p z8zBu^+xj4f`n7PmAMzwLA)!k55@*_V!`b{8&A&1B-V@8T)^#-HxC_;px(43NLKX7x zZq3KUE*JdBxnp?F_^K-_gYGs<#^pnwk50Q$M^)7vG8!Sn(RRrEDGOOZPn*Nb*ubwO zxVe||tYav{N_0x_M*(i=IQz_n&+!uvDmn5XEC0tuDP358!|U`*{Up7gB=l8x`CdGA zNlTwrNV4KM7zJ|SQy52t@vmea^ZCaT)NOpHdFIk`eXWoSeTc_wt8YCQi+O%z@h01a ziN^!}q=a7z_q;#jiLku=CUWd+!7NY4Kxh>NPvPFo`JcHT_1`Hp z{RUYPq6kc}>Vl=^4hOMHLAENZo|lv?Fn^j{3j?zxkj^z^y!M%hAzOGBH})YKP+fLPH(&!`bCq^){985w+J z?msZ+(rPG_ud5TU?OIWYskCs6Xqsz;g@6n=bLd zl%T{i!y688Q|Q8=sOjS4Lg`2t_!q;sj+Y%;MoKYZkMfZ>`>=t2wNewj?rZpNyCT&` zZqFzMOUJAN`?(rtmqg?0{-)5!9l43HS+p&3i)Zaqohl1zWS~=ZpbhsqA_oTNJG!Mo z?qQkU@#DwgvzRN@2=26^;LwK0=jTHjNje`!t(=Jz3Rclhd|`?TKlI!aaRmWrjMTF$%e7^ zH`nb%*wMzDC>nok1$v+w*1uJ8X$ZSdZnu8+*>&lvZrOOtceRoQ$6V2N{Dm!)dL*IK zdV?C%Ed|f(An++?F1*L&<0eW$^HTc9ID-}}-g_T@8Bajl1O}>CK8U*S-g>+jMI%w{ zn5ZcS(?3&NL zQ|J6DEc4^g@0%^3L-;3uzrgFnUHBfJ#h7{f17nvxQR_gBqYe#F?jySEv{tGen2UPg2#iH_}Snj%*=4>pi|$i+=-v=hWUbkV7!~)bY7mxV(crNe) zxloeDaqd}!Yi-_b)ox416g?)PuJf&U!O&opTrO41vFlV=!wn~k*}ZY&X0JX&*Avv6 zZQQKSCZeRG{a55r;Zq*nxK!dds#jU}Z4Llj5Rhg{au$_q)U01#l;d@q$M8cLu)#?oIy! zDU8z6t$2UA3uc8&Bo5mZlRooIWB#yQY_$cZ?Pp$D_1{YD9lpC5U%$o~9R!E6qCkVO z;h$|4rZjU;dwSZ8J6WJXe%NE1%_DqX)D9V9eTPY72d-`j4AOfem#w7~hA5sQEq644 z9a}@p-Veav=mj=M#JMK#v{XhpCc+N)fv?0?85e)>W5RS#S!If@;nVs*rM%P1C_Y;i z7n-Dnx>Pnf-B3D88AfCXLE|MRDr3Ft>}Jnv%q1An8`h`WM-a>RxTwpXVP6Nvrt_0W z`O<^Nr-3av*0vShnVih-kRU~sV?}T1wekrY^kmY(>N?!Oxz%vJG=6xkB3isXlXn*_ zh9DAJe43QbxArl3@XUu$eLdU0+0V(9CXv*uPk|LTLd^^J8g8jy?;k?{6{dd5_-wxB zWg^GfwD8pC5z&;=d%4`)&s{BHOF+nhl8`;~L?>>WnsP6Pi(b-JB!2mT-Z4PM|07ycQ;A1-=!E1~cQcFz`FJMkfnKDIxO{dGZ@9@R?WylL0|z#sZAkiJ z9}M}sR6)~wN@&sVDA(=g5JBRA*FJoB(~TMZ3@E$cmmE1eE;)w>|4>t)6j68`;lv_O z%b5~GQgzf%rh+NNNA;vhp%OPYXx!@hz*?p7g0Z!al3vv?OW7dBk!-4%mdM1*I7%$UF(jdjnzs5Xrd(|HN91S~cVR{6D1J^@*h=YJk2WC; zQ0x~)u1n*+UEyO#sPts%bes>Gw4g6`c&_GxnT4A0nn=tHWf*GfuQD6opUuYYrjNer z>xcD;q1sq0T5y5k;Nf1OyX~LTHK4jok5_a5CP2|pmfVPj8 zmP#!TNSzvIL(v*Y6QBp*PE6a4$G|U)U=g?O+?z}&F1rTw6Fwf^-dgQMZ}^HdW&be1 z3nG`akPOzmo#UmJ{fXcX_2kc=KauJb$xS!w9A_9`-B+E82*jsQ`t|wUmir1}{ni%A z12cn=p~U2kCyhYm)ZfKoP#hlq(}ZjF&k%sp$GtSh0^Zho2H*R7E2IODVnxu|fCca7 zy#&}|77Nkkj)H3eCLexk9Y{6{`Q z2_^ZDOiWAyk&LuHd$%R7-*tG8-RiVaM!tXTU7nKzOuF4mjA7{7I5=;wmrAH9N9wTa zQCJ|Wvb2RASp0o4W0A%0JV6;p9ba5lN7=3W6Q%}YD%PQ{j?^mHPb)7w$d6}Y3qq1` zWSad3KgJEDaM;#e^Qa=V%E{^+)hYfT6zSk}-FfhK&?9BMh(n-WVX*thbYdChk*$H; zjN-YIg0lQ$FbI4hW?gvWd~XGF2q(ZGa~J^~BnsI1{>stjAt90dz(^jM@a7pv7o$or7VnyG(a|#z6%(UjVv=7W zBD=1d;TP4{uMN~}*DHQ9w(-wM#Zp)47&ouLBCF(xg7{`c03B$te0+?Bn8ML<5HCh= zk2NW9r@gO9`r)X{i7+{2D9^3;o!&mF>Hg%0aUc0y(Sa|mQhoah?R{1=^HWy46@*~O z`kiBFNmQ$_7B9YuFYGOUKEtlq4azz^wUa0ZI$MmWfY)Qrf8SI%}zbfXYtoguz z4OCV5VDagx+_?YHLVS6`T6v6iGaIA(XHi((dJj`|bFn*;iKcE&jamzZg?!TTHKLnX zWQcIcr?J}<)$vw%V>y<;!y+2@`Cf2L>?#RMqJDsBs}bDv@=G5uGw`%>Kos>dfy|+y z)bTb$&985#!!{;~FsTy_fLdX8-re0?$}gxL9PW<;*w77Cd!RzEX@5+IMcO@+U_0S* zPZdHRZO;am)7Cnrch zUK%}BydN*6SAS<4&D29-fM#YRWrV94L9q}+nIXeN;_lv|HM!;= zcNzDFVCzKS(5AmdQnhS{IzQ#ZbxnLt09A7ZdgBq%rn;_6+fmd0{d4=iyJ>kvW?Rl< ze%p`E@D+w1OD(9Gm%P7@>U5cmuQ$s7D}@*g1{vS>j-X^~Dq#e{XGPWP%oWuhzg6qK z+cR#K!6O$RpSYn;K8#)A@%N|k#)5ogy2(rf>|0hWlSd!or*9T3UNsXz5J46Sr`W!J z49p;B8{#3kNzs~do9Ia^WQJtDzugp`#9xq1JhgzmcBl7L+ z&4)7qD_xALNN^@!(AFy&Z{4eNnkyC~HFLt(GO+L`aiD5+PBiEJCZ%Rte?}-DIq|F3 zW%Ni=Wh_k5vy7X<30*VdHZ*YCZTO;o3;DT}m4jnk;#*}G;}D1jdVZdTD1wR?PyKdc zJ3N$(Phv?)Y4x=5sXJ43XlF(>0TC4vRoq((u38M*Qp8xzV@f)cg(?@0MIX2 z4n;rUiOMYUjXXa+L)L!aG`D0k`)THng6`GPeE-DSG%%Jey6mCCQ2PD!r}sjwZ_>h> zDu1BD>iyH6<+hJ@Q7tW0v?+G%JM}=}<_A$fJzTyBp&R31RUXd&p?SyPVA;fQYxP>d zepnkz+GMfBVnZQ2NL#(_3;Jg<(|iXH^;ehH`+rS(6*q*s|K3_Dpp+UU(h8@Ke13*v zQ_dmczVi)RC`Fw8Qt{XFnhw*e#>AX4dduIZkRC#^!RP?iN@C=Dc`Wq|>(rxpN{Y zDKrj4$p~jKYPy%sk!0jvZ^YLa;-Jju8&UM$Z>gZ7pM6-0^X}UzdCTDuH}vmUE8^pm zTVFNQBdv`?@>CjLIJaOm$Y&(fzu=WHL3E?i8^Znh_Aw8M_T$a^9Z0F^wedw_4J*E+ zg_cT70=dl>u;(&ulcPXCrd42&e19tx`(gLNbdDTt<7$^!gf4YI$k=tJ)~9#|AMVFm z4|mNOxWGPIAsLBGOs2zchanBrC5%4rnHfT1-=ap9mUgUAZ4y3jsl9fj zK*hqRwP$)|0M?QLCB3)e*&Hq&C6+wfFg`_$VLY&?h31OPrZa%t@+_LsHn@IOj8LfU z1aDK({rEbyx7I;-D@Je1*uUD+TC~pE+HO}-V&b*MQnfRU2`^M3vGNrH?YiVPGySH7 z@bNoLlW$F2PUB>Oo!xz(sPjVG#^X)OBM+`*d-t>d#oT)bHTm`JzG6dBL_~TM5D}2x zOF#hu0YRGdCQS&v_b646j`ZGp?UoIwABHI(zYZ-se5fyZ7vw^VgXZW+0OZ znLFz)>t5gQb$zbU*t+rMGi+#(Bj5c;uKHVSfy-}cULpRn4qS#{Nf|h>wvmam1o1-) zlK4l>P*qI)BYy8Y<=3TTH{OhE+ef^lozi?OqViP#-5!t%+{n14iYeUXNQS;6Ev;m} zRuJ)8<&Z(t^IV(xNGJxK_NEvuQ9iwdZs(B{?#hZ9j2?|qkKRrCV$G+dyqzpDf!~xu zi!~xv8Sve&dV+9!m@u#-i6H(h99VrC)#jha^|F`OC>7Qk%AeyCPMh`GE!Wv^PRO?> zLOxJpid*O{&sS|_$-n&w7;HR8F3CkmZ;rC#7?Ql3CZxl$$7PKB+_WQ3uG*VCIJ!| zGF0MSDa;H%VYnoJ&;k6(?T)sSmT zlF`9iZ46KI-2BOSCB!Lu5Qo1;d2uEWMn?7*NsD_lDBWK@LaTG%Garu(rAnnMDkqH? zRi>7EB3=Yt>ez2R#(LO8#>5t7GTi)3i{3?~P%botm*-(Mm(b#TGkX!8ltD@K=r4V& z-980xMa_oyb1;oW+oioLznvVIFb9K;`CjP7mQMG%W#j7s$azIWQ=^z?{bA(0_WW$6 z0i;dW`;pA~hn2M}We%{UQ75N74uyH6E~cVqJ0*Yc7?L9*`1$3j5~z?}sS0IRfD|?s zy;A5nAAPThAW5*SJZad5bJJDzLN@I#=V3aZj{?J3c>UoCmn&+^an#;bOXw=sJW%14 zsrhS`m}poGr*+S06CJM^`50hyU=q<F(No&i6%U(FzFjs)EmLb#EuZJiu>+0@yw)A)u(2<_;M zi?p)CK|+(hG`+9gg--<7lOj;LdYDg$@&Hcl>mX|0GUbVe26wXNF|XSMC*6BHyM!yk z!55~rMn=s1v^l&-J&TkfD-p>atMZq>f0cZ5x)>P{?u5e{w2GScu+-`{3E_y9Zyrpr z_D|dIv=)-DT|zV+msZ$!dIs!ffdqj+vGhi~t3HMnc1!0t0bsiuL|8ieF&cTVpwuIo z=0;juw~^jnE^cKzg8gL2ErJPEMV74eI0z3x@_Lt|VZ5+NJ;2e!#LV2|#^zKML&@&s z-LSt?z1%N6Q(WD2vGJ%Q>(p*^?LcSKRj+dtKbLAFQiq6WG{deRH9lsI*5J9g!*?dFbgKe2ho*gb2fo8(M{wCeF^! zd-ms}kmA35|3ajrFTMyu$v8*#0>m#Iq~Ccx%YwasMUpSFZX;jkRoNw=FNDCKvR&cP2aNEu`< zr1PM8M^n>a9eUJSkvXvHk<-fCf-_87AnIS4_4K~htX$K@3sUZ)=-{xSIyS<=&<68! zZiw<`!eq3RxJ#BM>-iVR7iuW_NqnN}8$IzK075BgWfh~zilS~-D*sw=wR8!KY!W`K z(8laEbn=#T6Ipy1LUf?+Bfn(eHef}sexCT|%;S3jgp3(Z7tBTy+O5QU9R8Oz9ivM? zzWq?PVzQ@#Tw!HzdBdu@Jhc_3_EEN#5MTDnka2<&7PeUs5c&17 z2y0JISQ-}PhjoZ-_?E)dCSbBk)TN}yZF`c*F4R@u+}^n)l_^47 zE!51`&bJq>9Z}!iHESpPI9UKIK#aPsds!QI10VK*!V9I}@U;h_V<@5bE#qS<0h-wS)tQBY{_j{<~RlaA9;2J!Py34W*b(3A61fyzTK_`WdWg|PZ(2K4NK zaU!h6>`kYD-k;Trdpa9}i-kP%nFs!Sn8D-pqsD6ezg^A&BO?9~IV%TSgF6bpz_fYN z>1nBz`ndAls{Yn`j_Skmgw?h2BNy}a?G1{j#g=A3P_ltlyN5I0&3h2U1dU&g#*~&) z_+9GT0}(jLtP}P8Q?u3`Z0HhEruKQ0_stc(zCl75dN-JodZf5c)FO6L0`e$|Eqab> zJV~JCgo*MY&XcEdxxhy#{EK!#LjbHdBI7vxccBd~yGWoI}-Ow%i*d7t$%Kv;TR}5F{kZ9)= z2d=oj?hC|f=N@la(?2dXBNB4^JY55^XlA5K5S?+c+>IaMg+ok@D|ZoETgyC1DUv+u zDf)7Mx$sV=>-ZA9^iAP(gUa^V8asVXU}%8nk&V^qNIN4_HvL+r(4pJmfg-UEj-2!E zK*+iXHo;5ByCf~2*x;~h3ay2$2i}%}zAb52h`$0uOST>T=5$;lye>0AlPu+|k);KS zD{aH+wCwurZ)O=sNjMeUF80n{66GBoJ4i)0M2!d(%T_75xn9;YXpCd0r~W+_upCSm zL~r*DE6Q^7xQ;i{kDuGg(k1xNYY*`nFj{aEh>u-3h`1$*8a~=O}B*MdE5eTBUi}wOHgmc96ufH0Az|as?6|_Hd&@mEDWPfE@i)1zd z1<54sCvdbpl_&igu-&6h0@7quEA6wX|{A(q(Q@%HRJu6?Dy=e!;fec5uN9j%&dj3#9SYW zYFS0QRj%ab)GOq^h-=fApV@Je?B->>7SM4^6E(OV2OeFQtvpqvW4{t{6q!pwSTUz- zX|KCuSZT0PL14qW2$B#<8(uAvq}XJeHy$J{jGd;3w+=){G2Ti;%aDdmWYc4Im%jqw=NW>`pM7?k@^^!nKDT5$ zsu1%)!wsr<-X{di=otPyzlBuTAwdXVKjV69Btu5kMb*Ll?YX(y3(J0u5i`9jyVtGH zxZYnjM3tY?&g=-}J9+naBK8e#D$I$5&VN}PQAmqKz2X_4oxmUKFEpxj{qZ^uY>p=w z?@SFhnEap*!HqD_2jd!T)fjG@H1pS z{qT~n_EVH_@vu{()ss-Q>cFs=hPM5dYXY^5Ou99QDHcy>OSN!BrIX^QWUTG>Q~#=y z!kMuRp>sG=$n|3G6SDwePQ^_3A9fd=+v5p#Fg}L?lV$jcQzL`iXUKKg8?zSdwA@o8 zj%9zA+I6W}-LL%vX)~U<5tOj~=2H7txZ{Hp*8mQ<(cTCIjorOqZ8Shb3AY}(4b@9lu-KzJ{MiUCj-pw*z~J(S{2Tv{;AtTmjOG?{?KD!M>AUduAye zQ^!|nTi0;7FMMP-q+1IRdb6^fP4P#ao0$iDz*%k2#m`U_&NJ_!HL&eD8Q34|kwkHH z0lK4|@z>})4;}s3C6j*{Tf7NJ|Ak12<101A;x=5tq#zhWSM&vHK92{TnABB*vTsL+RB)_OE%o01kw#xP>WMU@_PcZ2f)UtnI zSHeZpbKfEAn6xV3?c;Q};;@dqsCJT&lyns+D_~zm}k;R+@Iwm#=#eQ86g)IoJRav)@!Au4Xs~j*-NDM&9%qd^@*d_#rh(3 zT5N7{821A9g%}_|3p#@m32||Pwk8#}3BpzB&S^xHObF>P33_q0pC>(nf@FCp6#3Ny zF*NWZIZ!@${i%rsS*5{_{c?F}#*_UGCX7T1FHcV;{xZdo?h5ZEQ3>xEuPO;yfq$e3 zPpmTbimA+c1>NGWxHE=${JiD%9Fp{a9v>r8|h)disHc zda2`a*zyL~{u$|`KNafZu|9^XX?R>J9=Vpu80;WW9+|JGnw`;xhVy6z4!-MiMZ4v+YAI`=9*g6SNV*khY`G2<+##$i3oBrcb==*;+ zZH|gnR%$l41^&weYNn@Mx@DCKZTmO$GVK)uJzq-z@xA}UC0cJE_(3;^iaOd*`)@V< z;@f|AQz=S%K3z*(7LFKRwq|g21s7>AH z_&2iY$B%^8oBtbAK-(CL8v8704;8|;Qu@3=`vtTr1qc0|L9z?TYpOLT3!}t9E2Ag`T`SY^}nf{ z(f_?mb81h{F(pBr(y7LkNMl__Nl}9a00X0jho9;HwEqwG?0;L_hmcf2N(*Su8CiAh z6C%dcJdTbx=)`}RiZ>jv8~gfyJWQ>L!n%BA^70GqP3@R(D$)P(=%oFp0P`QOXg<3^ z%0{Lj3_oVG_>{g^?`%;2=F~WC@cE_I2fsh3=u4c>5J1VmG)g{)7=I7p>NG^_Kg>rw)!(md0xN4;z&r8(_jiU^`}|lR-GA@^%X#ph zvA!8aYx%If0&74j%mCm&CkIn-au-!J75xD)t}7mp63Z{{@auQV3wlPz=&30ZIJ|V% zJ#6DVFE1|xc!)t@W`2krGW-=Ao3(_q^^J#leO z637btAS)FHVlo1mju$5C9Qr~%Jv|9x^K$a?hEICD_xuXL9*G1#pB$@CjI^}0?VnL@ z?w1CKKx6s!?i|_niO&D4Taa&$3LdzR`A6prpnDV36F+J`bn$Q!?>SYBnC-W_1Bl9 z$J>(}Z4zh}n)nvtj2w2TG9{$r6ENSM5#y0B4Lc9E>t2`$s0&=hEX)BSTI%Ab*imci zGQ81{n|%i|gBj+>D^8x{4rwng;nA|}9M;Jtn@m+6M5R%m8)|@43xM!&<>QCTB`jlk z^#UG~3u-dguG_>~_dC~x!`Z|i3rqJ4@7yM4cZ%t){rl`_*8T7pnDCGRE=s!Icl|cZ zgU0A-m0txW7ce3_`Q9_6mqF3YdxwYN%NhOch;~xfV<%+T|rCRYgJ7Q&{f}47R46PD(_i3?PgoxggscpPLh9hC%gC0msKB=J#dV>jIx)iKVO6G!F z3ahI_0G(?NU~hrMRU$bBQJ;KPTk z3Nc?zoMW655E12sa6llU45OBlDC7+AH&sBh8}nUpOp1?LgoUqF2edX}pt$Db;=IxQ z@%K3%9^M#@n&)jVT6#V;GvuvEf6@Uzdkw(0qvd)x6B`)#%soOC{b6e_4* zss~rNnAMeO;V0*ko?5Q;JRVvWKyomY!{8 zqA^Zwn$CD)!+PMvHVyGZ7}1i!=jM}l*a)ylbl$Gey^daWK@5#)^LMIkM+XP!@r9@3 z!V*4~&^7#XJfIETQdG&6x26*omP4lin`2I&d!>Eho;zZ(0uIGK7H^!ydW2}Py>uc^ z+3vdMcX0SiZ&m+LW$o_XSnJ4vYE3l(!R{qsqAPp~EnXZx*59do>;(u=d{h?GQz~ZF zQBhMjfCT=Kwld=Z3-_WS)z5Ajs@D!b!u(h%8H<$7%o5JFpW4e%e1=WO));KXEon|j z&zrM#xMujR@}`GS@rv_)QACf=fDMqN@A`enoTockDw_jFlV`mlVa%4&@tuLUDB7q`~ibg2yfBH%{P zF}B!fe8yzIA>1I$&MpNImq(=wns;*!?jQHp7aoqO$Gk1JFUvu@L=?9smR*F-tciSC>^4vs233wBfZuWIA?wP!{W0StZBo z+n$jh{N+OLqBLIkbFxHSz3Tw8`wlxLY0I6W*rwzryNzEA6b!}2{cB4N>h-WMqe{g| z?wN@JcP1B5{B{{v=;*eehfP5~ z7wp?~H8|ARf9@pS4l5QK3%$zB$k?#MaYjZxBPY-A397+a5E_e1Adc+<6Qzv_zyNXB zQjz+w+NR@X(TMsi1I%gNEdl#|yWHHuh~KxACy*m@$GI((3~XnINXF@E%MqWb+=u~O z92`0O^21?#(;Lk1y(IYgapxH}Dj5H({KUKWKMeJIfgklpGDAb^?OKiM?Yg&>z_8`s znhXr7O1Y#SVou&gGHvKotLJ#+*~dJp8%pi19pftd`}>EbGV$zM#&d%YMzbA-yZEf$ zsvP5hF!X*BKJ}^Uos>tXF{`}{SE2t*oe2EDodpXq4Bb6Pd(KWylCH|@GAj2r66+oE(6n9a z^2Vs!99Vx%v^ea1@y?%<;^l`3&}K%Q;Ywi#3Pl)@#x0#qxk(AxYcy#>k z`}Z@Fw6a@zzY#)wt)FS}mj?XR`LDp@O&vR;CJW3+5$icb?%J+K$2Vb6e0HzLO)t9o z`i89ks0+Tkhb!K($e%*}I8snet^O|LdVqvY-H%q#s_@{^zWvf6*vA`E44h>E8w5Jw z=#4Niu@|v4J-*f_{r<- z4(#(ONnb{Q_bcloL?V+X=~+rJuHMNv{wN2AUK(vW+E>zs%8_cz{lk?<(rSUx=_WE4>k2m|LavKL)jcO{Rq6i2mqDuAK zQHMir*j~yL)Ac+kRed8&{C-Rvk}O94J&y|q;6b&!5GQdsj=s${uMKbn&VAuoQ_+>MYf)l;*C`nBJ}t~J7UStoWLD-F5@@(6xpp`dBfV9 z`CCpqz3>Of#zJUNMYXXJi`MsGSBid~#b<%UT(b9{kWiMZars=0N5l3#!8}Jx zafPg&C%Rh~U*e|NHXvUgw>U19`;EFkK}Hpx=P^B~<~`mIU?-2ZDsDcl#E&M4XMQ1= z%4XD57osbMw?7uoU8u76LdIw0!-kMbdhwn9_jzX@XDka69cLWPGmCYZWd2U^;$4d> zxrBqXFH$BeoZW+?@gT+@JXQO*y~Zq$k+V9%WPEAI^jfo`OJjVZtr7M$9kPlh>_Q>u zc)={t0cL;2H($rxc+q* zdLuQMR)yS;K8aU*`_E;cdP56e2?4V;WoZ;lz;0c){7?Sbp5G4=Ok`a?r%_sZdmqmn zJ=1sl{Uu{-#rhM&PNIs;PD!ww-fh~Ga&}eJUU2`ecvEbgPki#9U8ThuhI=tV#XkW@_H>t`+9a|B|aUEYrcw_rjA`)^t36nQsWDFWkou%W)d`oZMPxp2I8q zLr%e`^RjN3*k{^)gJh8?>Wx~#qrIG4Rs`aY(rz2fH$wKu#*=#pb=Ddwu|M8bLJvf6&)wvwu@c^Qb0Lt-*W z_CD=rb$?D`<)LQAPk0QJkezAC9)o)K@w-m8e2;VaSg**&hxa-@$J|x={X&T8WEDDD z3H9S|qIapUJn`xlfbP9i#lOV{pDvo6ws&fl#Nxc4V$9Fr)4NA-gSZ`;_p|l!O=D?F zqHD^T7va6Dv4+1!wnhP0*n4}?T!{fapCYO%vKM6ydLfRn1ky?QRCqS11XESaoz-G{ z&rORw{#u&d$>585MYx8m$0t|-pVhAb7c!IMk%6CfaSD|DuiQ?@*7!a2E8a?4ef3(X z@=y;jaf9hpE_y?2K4d;JB4P^emT4fg&Zf$#_#@XfYpWHDoafEa8j|IAabUQGm@sCa`9f4%+cP_5RG*L1g2}QW3C0Yll}c58J{t_ z$5GfV|GLfAjAy!3->>1aiDv~A!R>0kh3tsOR2Afu5(?!jlaxEmlDKPEyLBN;Cln4t#haE`O&9@$?Ketv1^D7^-+yjRYiO$L$}az^Dv!M($uielL68} z9|?cY_G$4LkueG6wG2n|!z((boF(d@jP6|J1g{eOuX&B*xy?sS*st37+G;DK3E$q~ zC8@EN6}nh=Ki#S}bg5?oaPP55QAwI6wVJm)57OCYw8`v_QP&$EVh87P{ES=$3q+J8^U55cQ%M8X^gw}*+$k2yEZW!=z9<4 zl(%>O{x14bh+8{^!pqCghSpUW>s?}I1cSlq${Nyq9Xr4N3F+WZQf%ed6TFWi1Df($ zSj9!OKoE+{AwE`RmF{S0F~c%?N8r@w^CJ1lxfty=Oq;Tm`kkh+dy1NV3#|jgkYw|B zJKCJ1sn3Wzd@1_c7|dc*qgh&FXb<=e1RS>qaSCuud`%>VoHFZi1Te-)Mr8CzBK~zc zs+vxo=(ETmJ`@&(-sUuNC7mv8qtw{D3z-`FO-2}>0wcvvT3^9g9||#F#S~}yvV6Cn zk_2;u2aD5AyRa00T;tI(@pzxxn@3F=DXn-UF&`ECa?1-SPDrPJ?>7iMlTE;W^vT$b zg+zVNb#U;sFgfb%9CM>$c%S5pw#D;)~K=42!lT zdFa_~bMV|TN9@mQOk28#2dAP=66{_>d;LkmR2e{17s~)#x z(`F3Sv}61Dtm^Juq?P+S>+Cs-nrYcepIG!deLKvjeF1r*gf`%z+v(61+L64OW{nUn zV)~0P_v8KB!f(#BRT}lPyn(Q_Kq70>>l}~d_!jobqsB|Akc=`?%*2M({`p5%7J&yD z*Ibw8C~ec#-b?beIdyeQsLocR3UqvQ4nnYFQ?Wpv@KHsBWNE#M-wc@zTn2=nQ-m%+Z5;;O;Ga%@rBzW5K?U3tWBIf=u2TT$C2qE{4Ea{q22OkKUw+WG_ql_nqi7 zCh6K?Fx|v_Z2t#^74+jwJw(MpO3$P zL319o*Z$_TS%0DDd*HqGqdbPXC};3FK^!VhLwX)VZ#+SJ@q5Wb)pJ&m{JMjOP!etX zV&ab^(VK#rN%(xP$HM7@7k|1|r}0Q#c&=D=`wGlamKMh?be>JM7#M4q=`JK#kNQjS z#SQ+UiANjc?n0)q9mD$ZA0w+g5PK*lNUn>RAP*br&U*2+Kb{cav? zeW|gP2f^;;k&XO4bEj<(%8>$%ekWZPmT$PW6HOq`&7Pc@KQkk*=1U!qs#k9HklOt< z3KQBa)jy+=$BQYDqhe=K>ptSD?aaQmxki34X0!U%uwqz9WhAAUOdVFu{mw^=^>6fo zSe9}2NcMlS4YAR|v#A1wJJ_yx+dpeRJYPmZ`C(Dxos%}%eNKjSnp0DBu6ucWi#WKI zk2yaHT`oVe0gSK>On$6!9`~tU{o*C~N0U*`=?W7sspKk@Ngb#lqI^LkxK2?HcY6W( z%5UuQN#f(3x}}g|(M-meJ`9e#DPG(!=^VokD`Y+Z{s!eBTtn`|2PAdm#+T)~)4hDz1@uz6 zBYeHzJl)B-?J_i7`}$n2F|c!^bzCZ?RJ`of@?lNQE=Knx7S5Ag_MJ^Ahngp-&ny#M z8xY|&MmKfKgM^8X&0HLEIIHbUoHi*hypj}%^nAIv2Y18au%`ZkL*$4K+U)3fkD%a= zPdtyyZIIWzK4jUHD}t-*LQ>G|sh!;UOLwajm^&kHAH~LQw9hrozqNE^?*cnN4s&_g2{ci5eyRni?_8QC?UJz!s!Te2`5tg72!HP7yB$WsePtv4 zj$re=(C6u}J0qLAu%Xw!N0J-GmSdqH*UKe{f{YGJdqHo(G#P=yf)+@7R%)?+q`(TB z7(e73IWHA@h~;p?^gMwrQ(H=*tlJJ-DK<&_8983I1bNyp#v0DKFXmOdHAYs=ALq6@ z_w~a2IW=!tSQD}-KzNhx+iyu8rahmRoHjNNjR7b-lTi=@;xoip?TV%d&tTWiNs%Zv zNf<$n^ee|!eAO!1oXv0AUJ2Tfl8W1wQEN1firRgSrBGM_dE4>OVbl->?`UpTC`5eRRiNW0tfkwUkbH#)v)!r0~QADr; zDDuSXXsN*GLxSydqmtEUb0JVqlkz7N-KE5^f;POs0w#I%Pwl)E&ysKAhB8BeTv>xX zvt*VZt*cc_jPQ;huH_cjAs)cAleFu;7;jA}a}O~7o%(h(Yt#hpq1Cd9A;X9l_a-jv(rE^CwTz>>=sbD}_(cX#*@ z8|=jxzMhPW6yhDa0{joea(8E$()}3BvXQx~0N|%A=e#MSX)1GO@o1kKbZTO5bIwzQ3mRy2|+%Z&}S?4rtID#lg*5*mc8f+#ICsuT-_r+Nqg)Om;h zW&66TH|&qS42^bj;jW3V)ilKO~#iubi@Vn@15pRmT-?B}z96B8yG)pk;eC1H}Xh-Zq24tCqkY`~Lh z$iecIh$y8xJ0JSJlFZR~;cU{Pq;p_k9JTy~iEpMgR7VwVlzBGpLMArX5({m#9&w4c z#!gRb52PqQ=2Mkp3|wkG`(A^NZ5r91`+4-Ua8{a)q(?o@K>AWJ;j|p!ovLb!tNKAn zC?HpaX({8#)y#PI;lpuk=(Tzej*;P3S^VcoDHVO?DPhBXPJBPe*i{UPzEw!e*&5qM zgC4Q>Yy2$Nqs{D{pw)`rz&*dc4hpEV^YQ>&IBruAtLMIi(l3)s2c>qf=*Q1LB!T)+ z3+Wz)WlK%7Z87%OR=QUzZ&%>D30X0fXeO68mIXm&H zdwkeet%KT%0qd>QkiQbP0fUERDrUIacV!ME;1cG2a~?(?ZCxZ`R7yK+*%t*NJkm> z&VR3r4tte;aI}vvedLaym}%+=PR!}QexO!q>cj53%+GMC@0I>_8-!cUsaL@%MzELzM;_o;4i@oGp^T1JKV}IjSV#oz$I`Tiie)N3!J;@7+zeRTY7R0<~Xxz@#2zDQn!T2QTpN< zQo3Ix?QI@<2S73ewzsq7gDWe?>h!MdiyAQL!K|S*#-g!)coxNLHy+2*l!ykepx; z*={4i9}Q5s^1{W}SH3ADWa0u}l&*n9*}{w~)^@=VurEvZuly`gbwKN$^u|^grqg)R zqNM#!`x<0|4#I@@Mn&0=H~raNMpd2B@ahc3Z07KU(GRRYRV1xy8ct!w-AChddD|Sd z`@8ekW=D=glDw#fpN9oHUjr-L8XaaCZgQs#t_*T8r8 z+cPY2y&+KcFh=M7v3#jZ_d>haa7v|%7|8=LK$ zcr>uSA}83EcB&?j-6UE0g+KSQ4Ht2Y*kf~A_A&`|=xHQ^5j0`(!=aN(fhuCzI5IRp z{fJ3eiW~REm(&hK^k0Z8uP!hDh>!QH`H<-1;U-3(W!?7Xv$l4U%Q0b~O+isn*on8n z<)o*lAe@PpS2E-!`ru$JC_3o7-D4ED#+6WtJQ2rTJz~<{x4X+r*WmzEqt2@M%t(ZF z;E7T0mE$N^L+`chY76x({R^lkWr`^iyqS?P?e=hI5+Ge z=mEf)LJpfzn}Rm{cFJ*2`ogD#ybR@feZ)z*hMdM{R|hOVr$%#RTo>?YlFowTFh}m0KC&vlZmORDI=WsO)yui-#ItD^c_>M;+3-u@edxk78z$I zb{Tb&w=Oj>-iZhz9nY=Eds~;4g(06IAF+xAN7gbuvc7AbDb!AB`ZG=doYs7JBaRX+bR?Wi`8tZxp`)W1i6n}DY;sKsBBQ)D=uIybg&*thX zv;V^`m#!4GwSgh*G9y(?Qj|1+R_;wW{+)jk76k1zGuPy}NzMDu7-fD4r(vHAM?u8H zBT{zKdZVo5k6U@){8X(R%pZafZ_7ihyvA>;AhKen8owCz{0hrzuG&>l*f zn-^|Q)GA*cv^UWT2xvTe7QP&pZ}BT8h8GU|IwOX8)Y+M%kJ1yDkN}3G**E?ha1izF zf@Rf!8I;yv5Id=vpvDmabWqE~AdUiPTay-7RthcK#S9W_JFM*OD@)seDkVn`oZ~=B zEIR7+-%qL*<)BtoY4adDx1 zKSSXr2uS#{LGs$YZtv*G_?>qr#_LdB@_W)?LxS}#s7wXhDbw2Z|NW$#0H*H65flp! z=3fAR-t4p*vl{0TrA`6#OY0TQ?K0DV@yQ5rJ&bC&5QG!if0fJu(QQ5bS^@^!Ym}(m z`?7Q%9$7xQ{yBOQA>bxAmi6i>UoaSfe{@D6|9(>Jabbep&uMg9T+?AIE>+?y_wt$! z0l+y^EfLr98Bc$Kx!X&1A ztzAJ;2vxAV`@o&4ATvIA8*|ZIse@?m=s;k>1CM~92+NOLiJyY{-G;ofa;8Uqx6>H6 zTe6Aotr1t;xsoRmAfo4r*^)B({tonYf_Hw!-5s3ASm9-5HtvB{$6W}vw?JPjChlP8 zbdA!#u$Bp-fZw}AAtj|=z26Fgb6k!$(*?}c&bQjZ_iIE#u!4+lB!#m^Q0tI>Rn^m60bv zb>9=irf<0aCzsuMm~-~zj_7^t2;f=l0(N4aKNl|&LIM> zbL?P&Dr#x@Bv6=cW0+5wQs36fNXJ^pTHE~6L4QZO5I=+|2;$lRvb;?Mp$XK=Ir9M6 z;N;>0&|${<(521kCcsZLKm|zKiGP&^AHp9RK?6$8pHA0Q{T9Q@*Ko*1%8oaOBfWVE zR964BcTLvaIDiTK(h>^xEbc%17+e3}&&14e{z+Xbq8XW9*6d!Z18Q6i*pm9r!A z*B}N0kkjUSZj6HG=$%OB+^j$n-yQv5h8}ZNr+?EO!HHSw!t{~beC~yg2bT&D=Re`w zNhcl>^FNa=urFU_wYl$jRU3la(cxNKmCLEP%KSihRo_hcGe!B7+wPZVmHb|mKYly_;sK?QKkI`+3Z`R}6A1al{QUr%4^lGnYgJ-5LDr7%Y-|EDvhAZc@)+3U ztiSJ={+CWF$r0$JTr%!7mTT2J)Y&w^yWg&^t^IVid(JTKir#QV-0dZZbrAs$UfvWZ=tLT1V0*-93;>A?bdUImhz8t ztMBHp>VW$b7>?@n_*pVCr5_HbNSH`28D#Yb^J)gv7^tV1_!-pe@;v?it%=#D05Y_I zpNN)kOjRwE9yEV`)UPhR!zco3O77O=LRMYI3!rNlFlbMX(7VgWuj%Ak9r5kkEq9CG z>KC9TXNqb_^D8a{x~U|y=F{J<=esK~Pr@b*$!>k1-_5uN7@Mv9wAOM5Xj~6g+S+mo_rS1Qi2XL*Ep>g$MWr}DH1(Wwu9acZhg$_c z(qOay{gd!xZ*BBXGUD%MrwqS49|!xt?`2HL*=ssq8yaqn26-Vs&Vh=G>E+q9LgYK) zJPwJR-J|1UL@d`uknabX=5I1Q?2(eeWCFA#c9Rw*P>p@rxZ%XWn18tUG1eyd%I*F8 zOuLQI57g6+qly>Q91vx=?_<8z9XYw4Pv;eihC)j>2^3qFB}KdFy(8uG#Ox5msb5I` z1K(UJ5Rpj7hxugvc1!Ju@y58k|77vbt;C1SZz>YIC^@ZucmN(pmIQQxWOJfai&8_* z-oqOElM~jHC&~+k(T{=3N*su21R)SwB_A#sHZK$r9=X1~&Yrs>om_O>6u|*1`1|4$5l`vMN=JE-9a$lYm)D3ZR{&GrJ`nfmVlod#(=SK24VIV~Q-{_^f_h z2E$Uh)qxEAzNf-4m_7N7K?g{!!A*KR?$R4$UH8|s#HT%2bOT+wgE+{oV)s1GSCH_dxlWG zTMNUVPi)0SeVp4VBNkf4*DIdwVMgBn`sp`U1wL%>E62wz#$f#O0MMv5CRf%QpK^YW z(NtSxYXf)??%w4<1C@B+t5E;<*Wz;VKp_HOTpfA6@4>88RH-1&XU9ei->yDy>x3{J zImGKDh56_G^Vc>uADh@LHt)J?b@wo4SW?btxMfykh`C!j(kuE{1eQrGJvgSse`uZe zH>d)`=jhKLG52JVg&9Ppcs9YNj)9+~R%ZuWir%v(ee>k-2mv+7{RF&=q9!Q`*1E^Z zZE)!LcH$W?S=-5y>P7RFqmpx2Ufu@YahLW*bE$GJiWHY3mGS&|dpdoegS>DX`bDHH zkix#|>d6H`%Pj`Mr>l1QM%(Z3wTBO#o858N#=r%{EKyivuY*7nmO!T|m(YZ9d4I#? z7@T&7Q=_(jMp_k1R;4NN<(}jmOv%os7tG>W!z8y7Uajw@r3IcbQ|9Isuo_jCc*t%3 z8dhr$Nz57G+3q7gQ{NfH*Zj7Nl+=IKf|^Y4KthAv-tyRMTaiC?z(5js z?Yhg@iIL{xqnv!|pY!@ZW+ffHgvvz;!TcGnLk@hM#^1MLBnaDWmpMFMwfbJ7(8TUM zGZ~R#yQYLMA+3BOk?H#F3R?rTF-LRPfW(u~T^qORj*?TG!?M5cjSx$TZJ(}EF%PTt zK|bPt@%7eGakSg=Fa!-j0|d7a+}+)RyTbs%-F1*4f#48=1_=&>yL)g8ZowhI;O_7> z?>YCJ`@4U9i&-pI7HN8Vx}Rt7s@heBCq2SB?A{$HVJUtv=GlOmLW!HO!i%HtnR@}4 zw4Db~ypVvVG#`-B&@@+dbB}u)ugOv8hito3fT{hppA>H)lOKLb{Gq{&Dcz1-UKT0^ z?PIzhgon#Eeq;#bCf648V~8`r6^`%TmV+(1VMKk4}XL+5?w=1p*S# zb0W@WiQBJz?s7Tg;@LxCz32nfj%oztWRGbrEiGfyyMMIaSeLCzT98C5rm#FG_lgkc z+cVHH#Y>4il4p%Qe$=MUi3>#WGaKS@TpWuXpQc0lzmjp0gZr zGK4~p72#?*KRjXee0HZB`+yFZjMU{q_CY`+-{gDRYx(WRkKBMBLNf4%;l5csR{H|SS?#!7uSw!rN7`k|fLaQwkxMEv zWfeL3Q_9!dmnIS7Fzgb8Ke!b*#-_UAuWW!zSQ8qDt(dH4myZu{iwF@ES7zk_d z9=&$){@>*>8D)yd%=!9quG#uuUx(3%<{-973=&8fb7)5YDj&FUuT{m|CcY%gRW@4) z9J)nWq4s5M#$kx{WNiyCM2hU1scCC7w0Is*@0bEwF+g4|V|TOiHLau}$IR71F&nJzJ?jcP9=b#8XGklns#E5oPv7v}XnjvgA)MNSf0p*9 z{Z`Lkr&h(+*AG5d{=iv#8$qLw6c83AkBa)wX4)ji>bE#V$6$QC7AW5Tyjq_;7eXT7 zF|hOuR*yaC-*u==VySm8!&2t4Uo6^0dy>MJ@O7Xy;701p3S|RIi}~)GDb^QtVNN|T zDk&*xkIAkGzBb=maRidtE7)0NoAF3VW#r`%D8I12M{8^Lx!f%w!eu*T^zl99_6e&C znY|nR*=d~N+^<1CCfbLi-e_V-r?3ztjz&)eC{G~_9g&f-*YM)8;y53bLS$tWQuU`g z3OXJi9xiJsZ!(43?xCd4xVZI&99_uhMv3c(6=N8}a49Q)MV0D(uOAz}VP<{MHuP>o zCG$@j!v8@STGnMnjiKo!CIMr&TBV7{%4!4GhRlZY?t~??rP%hByEGyRZne9M@2IBj zE4WwY)4HZ+Yn>Et$%ue}*vvkL`gH+SCpcFVn0x5wEm3`dkk?N?I`P9NT- z=2`qNx0eszcdjp~SL0axwDXHJ`u(3HLj0k;Y|*#!EYI?|P#)PVVYmz7mt{bn<~X74 zCU|cz;L$#nlq+ggG!p^Wss86%E8}M|ACVVv6e0-fia)r2^mo^t*W2i@m+YMXJF>0U zPiIo6rxWiGMNtX*m^Z7YY0zOeqi*2WXOF_ssyvh>&4~&o5i~&ANNp`)Vg3$iSFpEY z=WNDXvgY}82C2u{yy(OjF(q8APn@WU*?>-KGB<{ey`Ru+q}$o2*meDY4QA4kv(d<_ ztC#)!kM14uqA55lCQExuDxOBF^_43KdugH&D>v$}iwz!fvi{d>9_ANr6siqv#cN!x zqb8taMqBRKxBdUUnG0@2E7%k=0L!qkAKuwf_-uNy3ioIGEN;}U!?TLj7Kg_{m$1i; zkHQ4pZXucPOnZlnq8+XM%PxQ1B80i+lK7;X@MDNdaZ7KQ@N+A~hoY7d)UOB1Ihvuy ze#hBgN!mSaG#{p32A`GO8tq=r6Xq7(R1#|vEHoMULON`LEmZWawDXNxljC)W(ANWQoewQMJJL*qX^t_AJSU`V@)sX03SLv`kC_$2v_l&|(O7P@=J{Fm0H}ju;A1s&Nqy zqDN}qJpp;DZYJ=#*HGZl}KN{15)_q9_-xEN@<$RO`eRd8`B`x@CoJh z^#VlRb8pF)je<2JuOT+ppJl8c5dAYrV`ln-h+H!=O!o0RX9pDK8=c#HZ%)C@afZ2L z$C+(lU=~WuBEvJA8Dq3q6Y{L-DPt{cs0ASUjEa$7Cw88it?R4(_yV)=NPmh@a+B=*{-gftbtwrPJ(l2~$7DTV!cYUP8nw3oS3N z5Py**LeYxH$+t@#fqhDD>>OkgKtg=8+B;0?sw`ggQi^CN6O7GRS^0jlPB%|{^B}Ay ztJ?aI$}rcMG1A=+YI&0;;C^GYf^||stHuM>+w-~S^J<@CkGA70;CUTs50cXm1>xk% zU{=7`dYc%rQQ`BG7#kgSeP27*T%ZOtJc(oq*BE(3jh;m+KeJ4=U^ookoqPOxGcq}m z?yaalM~L|php6j8U5Up`5C9CC;s<;_?FZ0npx)I6SU-e}>&kcZ84Ik2+}%OB{}QCg z%%<24>;XTMyViLROX~va&`h-rq{KlnX$)K^3t;d;L)xP0V6jc&g-xB}#D%(gjSnSu z6p!0aANMq;bUf8d#+Je=cn0p=%%xrj<#xQ*VVH`)WQQeSW3BK$1=PLM96Wn7B@JUA zK0zA3Zz+9CT~MEFDcDCuIu|y1ELEZ__6hVdQvg1pQjuUr10bE`qV|gr2Y6p@6qu?2 z0Q9fF`e~47*Q$vLOq$B!*@ORbk)xCHf1r~c^!Y$9eIiw|+Cz&Tbv?zumh1{!l|%mBdn1098I zz5ObLfk{a4&m}(&f%WoBCc9ynX0_2F_86Iu?I)m4TV$N;t*)<-`XUv?bpXr*Ra{Rk z_1DhgjdhI>)LgOwbF`3g_oQ;e$qVP7&ixM=e^-~P-D`%dl3$!5FN%mczZ$p6hfs1F z^VSF_Z#N;GyAJ~59ccTjmDuZ_qThiGqnw-^vz}0Qwes%#I@Zs;U;WrbMx23;uE(<= zKfh#uRVM4r#qVEw?{zq)^lpa7R&2RY*Bb1;bO2toW*56vlkI*;%j($C@Vv=4hj)<= zJhc|6n;1Tw8_zR*DkfN~!QEEoQu#^=cD-b7_+9*LDdlX2ZK3DwKBIEsu{HjncxeG} zhP@cvddkG6rU|ExVS>)3pU`xZK+&74%UF}1KKEw4CUgA0dz)zSSF;NlKsvk}oUi%m zM%J|Z;1t5n{5<^LQbWgu${b-v23s9&!OZ2w$z*@jdBmj{8~^!Q7xD5g?@y7Kvan1S zo562J7q{i*#S_j#95rF2Lg5#P{nq<>rfG@_$qUMl9>8RzuxCfvjUqbUU;Sm8bQatG z&!&CCd&jEx5#9Vpktq%Ud3t*+GJEyfq(|$a>r$eJs{pBQG07b2%Lr(k_^;?o?jx)R ze(U%NqT=mcZ>)TYXa?lQ7dxkh&Q+(?e*N!KosR~uss89Y`9Y6MtZix%reJ=N>7SjS zU!+yS-J#8{BT*7-7YUA2Z1P`dUX%Ia5nef$O>{bV2h_A8!4=p@&v)|6ro1P$HJ$C? z`LQlMO~s`MVDx3j?kV5Adxy;V#}u+W%2AFdn!iCjE1vE=C*rKpVnoF9b&CNhM;T2> zfBtaNF00jHd-=GL98F)AMU2Soq+99pa=D?vUiKBj9O4v5Za0?6+v`-?y5nVbpO1QT zoR2?t8B@R~KW9vV+4JAIAXa|B=u+oPzVrOaV8?JwXXRvYn#L9J1NGA^wiGa2gY8&nZQp`=j5Vx z6UGqjU+u>r+6eHUP>gJ*1P6;xH{B>2nd*Bh{yC4=GM>gMNad`FN=S0Xs9BqD4#>?F zSNjyP60;v#S#7DDH0m^XkFohbU&{KCB4x1Z`#CW<8C?_BiLo5#fo^Xy%85{dfm!_* z2Q7L%EUYH#QtJku;j5sv7(iw5nT7~)S?oeccjqY=9%~t; z4`$}rZ=DGV^S)75p%tDtJ4>ZM^EWG{i>Ugt>ahaGrW=oJ@B?B@Qv@G>vT*{9vG2(Q zxG(8%FQ9wh^MB?G_-nV&>{pVB@@EoAT%`KIE7^W?#$g)ynxEsvpZ3qPVlJzVEnBxw zRq1M_mIw_{H${m=iHv=E;YEN|RTmgTj4xrE`moMR9a*~lmt9+*o%>4sg&HrX3|Dh%u#qD4JnjMO>gk4 zFQ>p#wKW0kFR7}ujvA;U8lMUlAva(9?m6fbvB6EK#$zB*$AY(f}XJeX4CH7>xMcYVLPEPA| zK^dKd->h{*4+5c&?f^FBff-)x!p+UMW&rj#2~2qcQQP0zz(2SjeI@V%^fEc-R_ErF zfhOf8Z+qV5{U0QO+GArLp(B9VyScgfXkwCG)eAqz&Cjm~#CuWk@=CVOfT25GC-+5- zC1qt*V8!L-Km>8s4EKSu2F{qwS79>J%uIB@vA3Jb!>);f@l36= z(UuHPYs7V59tumU-Jg`Uy?fof*G;grjs>CvkKdr9!~;fiYL<-DpOIPqg8H^Zl<`c*Nm+#0!Gz<4{j;|Mi97BWMKwULY`Gs$(RPOj z7*jN!Mz}h?teXhz{(mG1^4o2JOxnHsS1)9w=QlB=;DQ3Yk%R_xnW^i)Lu5-O@}enr zc8wq2+tB@!FC0zDQ5G2!5r5MtxqN^0nf@m*pgcUVo@)*y`F=}V)CA^_61#K6PtVVj z-4Y3;EAATSy)N%qGh~J063S$dM1h3gt%Hfpe88XL?F^e9aI-=|pDzO_*(hZDq6jL$ zPrCj{(u7fDJhH7z-ttNg!_kWz+MlR@(LhNf_vb2Rz7+2k(b8$(N6t~ce}7Sx%R`Ui zTnELl+b_DRYRxgg-BO!wVv0XI+ssA%mZ0vIPa*v?>3a%2)~Vs)QbY3EPvgCXnpk+m zoBhn@n=y~W#l47uE0XtAA|ghy(=a$6{vj8yDl44;>-HCRu>xr`dn-|4h@p(}qr=`C z)eF!&$T#xGu0y@JA4I^Gu=u?by{9tb@ZaJ>M9E`9;6MIeJ33NASX9fG%T|G2%`CU& zOTWQ2(Eqb61dmKAUJ0M$dmhk)mb~fVXo|V^c(*L$aP8sg@pqKS`+VhKzOMrTUs6hX zI9Jk8yB0le6pADDI@B)GIfYbD zO%meZq$gDH7HBJf79wC3uTWYV79o}Zg=xRmJ{)!iX*eG}G^M*O6ySPmXN$v_TUeZW z4m1`P((Dhml3(`wKGMl$#JF{ruu5Hqf4V+4WkEpu?q0Ub{}4m_Ms1ucLR6KZx&B)) z&gDp_fpuBwWcabjvw;9OFEH1eXoyhIoL&Q#4K4x=G*_BL9ceX}UCSQ0~Q<>%H zS|WKZz3xe}={vn9Jq_7GK3RuM7D7jfdP_Tf!x?&;LmlVd49}8a9ZiXsXDzJG?^k?* zbIeaOt#g-?W))An+gYmZ&>453g(+y_f-}})J}~0^dqj82>%cJA2aW4UIB#q3+cMji zvJr9RY=6x+6$O3P+j|uqG*bnm`*do9`CvNFnImG2v+!n*)N}Z2X+LTH=sUR^CO31s z>q$PJ!H+c1M6RR#d^h3?vC%Zurq-v6tv~e!W$w929kV`f=EOXwRqQL1^No)7_VOp4 zBaRCT8)_fEIIycX91fO+f->d=%KI;&WShY|I^$Gp*Lq}tF3-soGZibAz1{hCv?GIy;Fo9(Q;3hzro*xZG;jQ&q&> zf5EQ2!S*ws{)`griEo*pS=j1c;vp#72lLj%OYXaq8p%yAPAUKMa?4c?M6+i`bj%P9 zbCQn?Xl91z3JVHUy}WoeX^rA(?pO(?skrFPHP00K04oPK7A?jMN!?mEEuNG zII=PJfKJo-C$9+IpX5=CcXt8dzw>OFR=abSFj@WNf+zg0eO|1nl|fUBDGN>N z+7MFJU^QmylGfr;L`PJc_yA<{FyEd$X2jM35ry~jU?ls>P_e`^wi?&|Bb!RRt}{L5 zZ8B=mDXNgqB~%8eiPHn>Y-Y6JG%%iohO)~Etv>z!+EvrgP(k>ozsk-8!O6mcI1QdR0d{Q_Vx&%h7x-Zx96<`5cM!-dJ&?n0n*Iqhn_sPGpHI=D#>&J zZ8F4P#YqKOvrJ7fzv69%oM3N4a>{2(60sH}zZehnJ;}Bj?E;;7gr+~W-f$TmPN2P5 z=mX0Z=5@cBaeX#h8VzwQL{MhMWb)kv2kTzUpoGF#(=*J37}66p^-m!_HfFq{Ac=RI z=m&-GM)jTbR0@-s4S)5E;$Hd-x3|~2FTL{zRxYG!{W2S_83)auP)JM<5pF`>Y(l+7 z+?~20G&ZxmKd-sXqrl+ythu>e`qHZ9FecK^14J_M0mhm+b@FmEUxpLkX)>v$&U}HxaSfR<*A0LW?n(-v+klnKVJINld{Q?#Z@W$6vQwckbk9Eli8Y5YaN<)0;z`5 zi>=<2%4)_l@3nqsDDVRQK5h;)ikTmOG}vtoe)%0Y^!>YA8q()^Z0n&~5$mY}G&iwF z^!~{ybxlith8==@4J7<~+EV?Fv?L~@?u^|>$YwNd5{)ux;f$>oPB8_xj&i7pN+2u} zVt{-y)89>e7%deN2h>+=jlcP~V1koxpU1XmAY3+*u9n20d^8QDfI`9>3Jf-6_ZKPa@cI%!4BB;U)VVWijW!iDs0O7BjR{2^#H;diBs zCyZgPs7f!Iwr~LjhrGi48objqt`e)ID<;9OzkZdsRrh@+Ot7e8T`B20pDok(I^*^T zD3#OSE}3iwys(%>j0N@r#TX~kz3y-f6=c_EN|6$Mb9@i;Wt9Ue7?7J{X}xkX!9Hx8L0k2t$^BjZ?+A^t@VG@n84WUc1f0cmqE88NbJnk8B%;GfFyfPGW9J6=L&+5w4>N60XIhYYP)h@DiM z)Q>P9wPp=Eck|bp@0Mo#f;1Pj)GwU`W{Ndq_ivlg_IU$L4EUU!M3M{J*n5Lm71BR- zV?BpLJcGliw=h9$DYd1AizDA4M~*7y2)T4)x%Q%lKrpI@68KFI6?LI}Ni&k8(rDak zzNssZVbF(!wpcD|2gBv+HgwhG^+1Rr5OaI<_#keT*U473r^}mGXOp z7gQ$mArmYtA0O#RFJ+YHOr4{Eg3`rGd5(z-fVKOD|n_#N!UQycwTChaLC%KGm7poyb-FN_V9-!*Y)r&s=6 z#Ma{!;=-5dFgLf=%I72e{<%+w!1-y%jZJjs8FwDMH}K+4{1k*z+Nom)u?)`$mDgZ zCXYj#s}Jx#ekA&))b0D1iN+VfGFLA7$w0cMLZLt2=Y0dlfcNF3WMX57hdD+i}C=U~4eJPO_n} zA#Ml2oop296O$@*x-UuALrpjvv>PbGt`-&W-S(%8lc(?kRVSuuP>0JwhV)~Pru$9s z-^)N`Qa$yk2x6EKg!aOQS0g~N+W5Mt2vJ|C`Ldv&%rD~Zl3*Y~PY1Rr5S_of1eWOl zMLd#R5S@A$5u-Jj6l{j-4*eZ{J@4i^+-8ZU#Yy=~_ueC8=2%+&zMf>}b6>E);T&9u z=Y>iz$U}DkPNypzo$pGjOU@iM9a9>y4@TiCLmkc=O+l36kER1_URNdKJxeB*W+%s+ z+wgnxbZo@b;^I4Oog;_*dC{o0A}6%Zrv!by&+kTdx@OxbNprNy7SBW}MT}3T^3^>d zpf<#coH`_Gij&Ngj|2MxFsOFS#bS5%632OflFOBp(3R?$RSdz=L=dk;5^Ll2nvpNd z`qr*g=od6@{ zavBye4h@Z<_kNk#Imv-(BxUGf+l!DTfmsMf6@S;&J}<=iyvk?C8)(A$fj25EI`Kfy zko^@lHLbGm3D3#dMUIEPfJy@OC>H;q>+8hMYR-y%&ioJ+;-wXY*+zH`&|ftQ^>Nh zMKgI05G}4ac)b%&fuH&8IS7pZc!^_s=fW;P|1=RPuQ~jwjfo|g$>OrruIa+Nt#Y07 z@K5fRgvN4783i2d`50GZOu%7Z`MeZKNOL8nmy5^qj|-quENBLUCS*t&(aU6ihQZ6n z-0@9ZM!*eG|L3HNMpAR+Ca3SSIpQoCAF(GbNM7NQXPiV$M}Y}rCNcPGxkkG5l^MXk zgbao|`}@PyD7h$|H$YRaYj?ttPLD6`$(<9)b0gLOj`#`W(6p1CC1GHq~h;b?Dl*z|A$aaakP?@7LOwnn{OSo9|bcy zv2S%wqcJ045<-!|!Ja)ZND1PEyrCG>+pMfY0qexz?2Sjy;}zKsS3B%RGk#LV|bJgtS}*x@U)N~L7P%&atz@OO+4$pIb46CJg{c+HAF97W@kK5uOM zcZb4CEm*9Trk{~89dO|cLjH99V$c$Sdx$({t$nX(?&rr_rZhI@yMjfl9)bqw zN&SZwL3-oqe50#}SeTfSvQR;-PBLE1u!+j;s7KDd!OvLu_@)R#SA2n9Na8W_8w6HW=XJquiA1Vn_V!x0+qZg+H>>I4><2Aj{II(4Ddb{B z^j^z_kkw9XwDI0^6BV{aBjs4YsyI_6%<@-E#uD9dm)UzQ=>8x~>zlRH9KREi-I={s zC|O(W`&cJpQeK$#CeFMkGxe0q9lc#QF!}SEAv^}w&o9LaO2QR)Ec-5lq|{7Ywbjx( z++HN2g+03y&6@hcF_>W~l3!3W)E-G0-+yL%3$i&W6-|W&o1=If%u6Rc-(}oup5XK0 z1bn4Y&&Wu3q5i?a!7+W^I+%_~-C}~tYj)e&^NPGI9D;#7t`Tx3kxD~NUCmf;y_^)y z{S}g(tsM|2;s?eUh_^L}y@pW>sBR*F#)1TLkp#0Y772t5G0L@d_v=vquAt3o&V8CqAu+sf|t_MO%GnR)DWJp!rkd<#xyTBnsl75La8A8DvM5H<4)jJx8nwqT**Qp|y7QJAXH>vh$2 zI^Oji0GI2XW%uGSR;Kw|_nc&O25I<>z7KQou>DLHa4;cE_CGSts?n3Xyc5s7Op z*9UpP#0Tb8lPE3m!Dt99U-w9k1Ud#{2wa!wn-#&`-=+YRxXP*;(^iDpTp+voR}i^3 zB;^Vf)vPiWPF%e!CH#@2a&8c?lVA*gQOGS3h4P7HfM21Va z={Y6NDfBpO)~NAOB$cErMd|d{^a@+7gSuGy_YQ6QqBZ|T`$VX&p~19LgP!mMIX>h> z47nuoako_aK`+dx>$Aq279VzsK!{ekI2sY>I&M-R)LUmES~FvhU=IiItVXP@yG`(- z5A0diD8-kICgffZWY9{53AKLfYZ1)1|I5f%mP#j zYs@=D3J*+=*TIzE*@iep^M&{!=rcv2;H>bIE*hoG+I#fvp~TIZUY!Z(-k##+KG#;8 zC#zVLN&qq-cR0btgO#*#CK6-X2tDt_p-PUjJ z9To%fgT&xCRbS5o@VJVBH(MgqE{BQk(iVux6hk-(M##g#Dp|Ci z1K;fR;7%8IOH4KADMQW<|7|+(W|voztW%P! z+e-G4?%@SnAxT)hS|s!0`y`_H2g&*~)?4pLpP)*c+3Lb=ipe1EH`|{%?N?gE+xFPm zIhvlP%MHWY=qVTm?g0&blE4Aam&ajDJ+j;A)&}i@lgX~kdZ3_FW!H~LVlm1Y+Q$K> zbaCXE1RwZHJ)?!bt0fTq;Z%=?Q8c(%Z?DfNb0MZzC1dHAPxd=jAZH89ug=4MNj=3T z765v6W-A@>t}H`P7fM9pba{BR{Vi+VcNvWDUwJ)!d~7=X2RX@fzFH*s)1O%$mh9kh zvsAJ~5W-bk?wIz1pOnq{=PLg4uRF1lFv2xij{c~2$YTa~8(rO(Ck&uM&M8p&T(o|U zxW2>^E&PHyR$yg&b}gC2x|$3>sh~w74$E}*WEr&<^OCcD)SYc0E+o;9-_0jGtaMLN zL)+!qKa=S|2J?y2m?--n42St+uVAeQE7MTz{RP00nML0NGUTSt%=m>Rv4ff))Op(f zz{{}>70a8*Nab_N4K!d@9D?Y`px~Y&%h;7aq{en=PDWb<-6`Uhq5+g095dkEXgcG( zo|^b;+u72s`!xdI&_^o@?Om4l(@cx`u#aIU0^}~RW@Psi{k^>|PwNf!epD5ii8MXJj;NPvLej z7>6r7G6bIsi_A+P!~GTldrXZis5ebj2*G6>I~e+WGio}6xPpR^Q~-aIbwBDgg2ik@ z%bA50sTvFAhP=uXy3uTigCYPPx9PKV3lkF}0k9q4xrJUkE?Lwhzf~vZVvQrl@wgr) zD~$bYZPT}f1KdYlT>?RZphwy_k==5(AiC0OCD}m|$VaI1DMSSMc?JNE_1b;uz(F_& ztfkY%hO1|22{Fl5D;=&V_C~Wm2#Q6t8{hF8nqB@ChmPasJ>jraismSmc+`6BwCWT|NGEan$#dC*^!>npoGby0%W}42E?op+4kMDB>RgMfMg1jKkG%W;Q?CeaVjbH;jm`>?&?> zYh$#I)dJ;K1{%xf;3hcxh|Pao<{KVTOE)4oSs@7sLGDuMm9< zpyhozCXVf|6gsQ#jJn?p4E_{JhQCds3?iS;DN7n17^oy3l!nGSaZtE`YInCQfRq3C z%fJ@kbY62X0RQj@a{#`MoEOm@-o5u-u}Jr5$bk1mPIAZp{3_5Lv|*YS0RPcRf*gA~ z$QT{$E(IL4=H;LtLBJLM_fdOaTaL2}v*ll`&6{F-WfJJb-8fzSooXDZ_6bX%WXs44sO_nv9f84 zb=W>bv}1Fo3YGUm#TfNxW91U*74cX=wZwzMe|NN%NLuoSc&1#w4-A)}Y8I))PfktE zJWevngr}#=sgxwy7nz^W;$W!;D^XGNmj0Nbv{ehc$|!QkxU)m$b>3*)j`CYTL3^DG zpvmeAkD=)?H#b3FW@bMsE7v?EDw<4ZwSI18(61-XF978Y^Gt*ydYA26m}w4(%;}Fs z!bvSW&Hw$s*Y)%Th`@9pkV|G(bBv&t{@aMY`HMre7ww| zy;L{Zq%E5SZu0;6lp9`cq|zaaS4j(UAfSzIB4V!zuLx@}z5gaIA-?CPrWV6PB6td& zD|xXTXo~3>z)|mjO}sk!8G%bb8J6;Q{HC@m<{S`f8d{`k3c+#ZD znuf+wPF{X{HSv;3evobu=bc`7>+nsStE}ZRXQK8TH8u5LFZc~1peDRoOykO>)m+9N zue?UTvFGb>f$HMT52JMyuN^BVGw1(3W3A1XON8KP)^I1f(WRPxKLE&$b_K3BS9~KxPKy6CblBkQRQNJ>0rJpn`8h1cSmfK3d^F3#&gq zl|uG;EtQ{~%GL7e(~~x7VoDxlb6ufNdb&6`l(Ae!R1KHVJ{4n8(ekr?N1M5F*F=MP zhUbwHMtbdX;(XfMIWx6;P`fXJi>rGs9KBI2i9njXG#ou5guT2l(>?zoF!Nvmu0v|M zM5XFIQ98%ulkQe6!0?x7seo!P9}sdlddr5ywcih4UN)qgoh z@b;1&B**)KTJWD;T8O8i;y=m3ArL zEBu#g<1go0Yo+dygC`yO{e77>@MZqDF{;6-sYrko`(0()KQOwS;d6P=pYRt1ZOk(A=J)-OdEN+c zg&G|oxJ<^r{-vd4r3|?NAWV=~cBW+Rr{07+Ew=|55ffl;CZ8`4{tbNawEWHQh>8GQ zyf2|Mv6~(X6@<%xsFv64A3!{*kyO#O<#y|`U2G9edo>%p4s$$FuL!0$TFIhG!3wRe zm9d7-4s{UP{Suq}qxn== zm_M1yVd*xtRg$@!tIq;a6^cOfM;<3^2{5{foQLfj0m%Hpncf#Qi!(u&RVX{eu!Ix= z5q?7Ta(Q$#8ZE)!{Bgdr(s32U{qsA+PDMFG&rVRVxJKK>+N#VcZ z+!BJg(mNy^j77!Tt)xSOaxAyR?piz#rS=F2?8w7l>7Pz0+zUc{DunHROYs_nvY>{?I&TtVarfEj4=3kiVwKRaIW|o;|@*0E= zW(uiXW2ViiZ!-<$J7oBL|Ct%=^Qq!VL-Y1et?V0aCmESJKp@aT`QFy)<0|1yZn1Yw z5^)T^v^Rbsd4g91pp@C!4ejhCPm&!?3BQJxo4qp+YmN~V=Qmaf67Vwd zJ4g?OJ3JqQH?`RfTCl|(y!}4HoLo!$IbBI^%ae`W!sBQ@0}w$Ox9KY96THEG zi>Keh+6zt!Z3;4gaUJ_Yi;t~#aCmWoX5(s zNCLFM$Lh6dc$TI$-QWW`6gLbq!4Fj$$W%3i{x0X>vR+L0O8)ATc$_lh?YZYAh%^C1S%%AEM6gwADt_XnW zK*ApNACnJLQK`)3%oo=p2{Rlq)jPxa@^l(?a=cB=&67YJz=8x~M-OSxi9z>j%FoP) z9Y-IW_a3d5O!|k=Oy}CnZd`X4 zO1@6Xms1H&k*)=TQ}h7C#i0s?h^Vg`*L=S+WviB^)cn-7ou_ktXEtW(m!!86HyqP@Dt4+1m z=RUwbmnQT>O--#xH{o5a`@IGCfXNvdqWqSv*NlxuK@>()Qp51rd z8Vg)t+V%^Y2Y)sSBHK+8MJq2s0Ly^gmKrZEN$JM@C$( zQJZ-^O#|bHU`nuvshz(hDAa1TUq5<)i;sh^AmC|VD2?``xR&O3`LclfJ_hRil^c~A zVrGbX;d3t{D}Fvp$+K}fOH^%6d$vNA6`XgY+1`6ijH^mvo5flLBM!?W)Wo^4c~)dN z;#L5f$qJ2Ut0{*}HSWCClm;C6j2D8Ks|&MWhDNn8#2obNtW33~#~}4E=KpZf8h7xZ zhu?7XNY~%Lk$t$E?3I7;q;t<2)#rxPW@))YBNR-O4O=ZilM$RqGbo$*J7qo&bcH~J1xLyZg9z(I-5uFCI2g_hYW=km zc(dk6?CQzT+eg0d6yDj>6G?OTgUdetJg^YeiB9(${f#y`jSk{Da+Fz0dZ;rDC>{AZ z4BQim-B&g}&9GvDJQ`7J)9G4axP(5PC+muWhE8R<^DT)(QozH(5>6B-wltg5=aFG! zNJNzGulxV{0!6;s>&ul?-Bh=+4Xm($Qkyc1ggQ&706*I zxiF|btjK<+Gc{+h`t^**fhno+viF~^^~Ma)qPoYRd~tj* z<6VI2!i3ND1Nf@ZmoIWE7~=_1`m&$&1jWtSIXiBR)!$=VPSa%z8?=VcE{~%>V&x7; zrQ?{)ZWZ)+KeYoc+^Bw^;hv#LP;ce1PC<~|(kv?bgRt-?08z%RKqXB;ObSl`x1k#U zc73+zBoKzgw;WSBBN3-BL?A{qfwl7JZaU9_-4{SnNkzYf z$oBGiol~S!O}bqfyv;Xg9RT^h@Jq|CbceB1JU8%&2vt=`q?W883*9JA!Xrl1{xNkRyB8Jc{$E)MwHjWjtTNq;%ab!b$>vi`q zA;QyT{w$I}*23)UY|h(?@QbZ5FBYDo>1~b3SNG>J{yq5NJr3T2E#v=wo}QG(aSHp4C{6rYi08l-3xS zWT8CaS_1>doQ_ZF>30}wXz0q5&F-Ig%}Oi`=1N3-n%{P=MVWqNxWZ*Rs15qQWrogX zYlv6rNd3;1{JBzuDyn7>?~E)PCOa2cAEbb7Cg-^QS#EGjSlsIqn*L*6e=0|zyZHyv znYIt^q@Xz@^j}?X?V5&!8~Y(#46bG<0h>CYDPZaPfg2C^`Ld}SBLAOSKSDiFP5=`;b_r#2?Lt3QF>~33Gqz4vzuf;n_TDlmuC;3$gpdFU9<1>YPH=Z?2qDl(@BqOQ8iKpK zyIXK~hu{H%ySqCC*M^41Wj5!$&-0y;nyH$apYsn28ftg%eXq4HTl*@-9&*-6q`Zdh zY2UoWs0AdczGnN0P4)H<{)zPFU@Sehx3zItce}v-j(fT>OND!rPyH7|yX&4;khy^Y zz!0!}+QfhN&U~&iD!jm8f0L>mXX{OA!YYn(PZX8i-sCoT8357^FVYTHfR&3@{3^b! z5+CHqXQc4qUoi%PtSz5nI?YDoZ5H~4N~o(6h9k7g^v znmE?ppuw7ehE*YkQ4xfy?J2ec@x1U?KwBf@!TpWrlJ$B}M+QMj9D_;iO5cx0%~z~c z_zAtNunDdG-5-%(@Bq%CD4sWLmq&p+1%^Oo8NN$uyU$BhZAaDD)r{lXVdj@Iv9i8X zU1SyLksEt)k6f>74Ty|gsT@TE0#vg1 z8bvp$&kcQQRZG?F3anN2<{K<<0_=H#tdxw~8r%^imsHv`*&Kk&m@O4WYU#1i(iL}q z_Vj5v9mDy22|#?DR%kAPe(IoM9~rQT$*`^JQ2fB>^ujGvZI>^jZ1+i9cbP6wh{g(b zG&oR07kHbX2Gn^IEpKDoi3Ghe^TX?}7TTc!oA&d4-F$iECxXn4CnIl3Req`~>=?*b zz!PR!Oy7Y|mm#il0z+)z?DPqQ&J!{!DIuz#y9BjQSlyhP+1|dl2UktWp);7s;jCQ| zE6puL<4x-AAnmk%(pKhgWZEF2Ojcd=;zl{NXcXP^q*c5mGy9-9@mp?_XCUnpMn8_^ zYV{;{bBn^acWw}MT*|qm#{b;`48VYAFA06x*`K`zfKBF;i?X*TXb5rsBDzAlH{&$~9?ahW+kIgbg;VKd zL3uJsimCGmmX*Nqan`X$kL_cTw0Gu7JZ1peLJf4*9vebm=`n-w*6_Ih#;Aiu`Fv7{ z4NOl}{?qsa&?nEl>+gSHPv7s@!fn?V6d&#hZ;d+qYvQuKEp4g*&g&Y!FJ~>X@^Bdj z;xzN@(V1Zd7dRv6fk3aO0~Y|a{m-!9E)(SVk9-~IRMx*-)QQoH9f0YyAD(gr_RN=} zwAX2Vl6hlhF{SZ5>NHCN0Ji=!f%xu+J-+f4Iv+v2lTMvRJb(*-?h;pI`6~{`{Zc`|r>HKYT&wTvh0ZR9p*i z<-rs{^oZ@zf!aXr?b_kGcwb*%oW%?=7fgq?377@Ab&pe(`kM$Sf<+All4N`my7v|j zH~#$j1O)Pxle2*AjAyb%n$YAWodxyu^v24CjxzV6cpl439F#xL)#>5ujB*L?+q5)I z0I~4iB<3TIZ7-7V3P!eNgPjCFXNI5?vLrc&KUbp#SZ8Y133K*2pc+4jciXuWn z29)haNkv=WO210dizbM7;F|+++2=-MjXZXT^U*I~%8sfrffc4}ONu5ODn1Vwbw8=t5mQlHWJu4~?!OO+VQm6Pm(7Cz+-r|-uqDgr)$e^vw>onb(skR)cyQw=8yUCqEP`pS?JP9BthXH~JWu^|mi zB@8EL3By7jnlVFmX*B@InzMIqe)reE-5i%0u8;UKH zm6e?=HIZZ7Rtd+Z3a=#}5@jF7YB;4dp2?%(v6v2KR4Vw;V1HPi(qxa>VDogl1~4kJ zzA3aD_@z{tgYl>WjUYrdH~;zR*AL~y6xx(ft{YGZ*-u(4 zv19Z6&^=9slmYkzoJe~;xh?w;x$b@2Z ze_!h4BtUwX-sM^hB?MW}VXyn5M2$;5cOU>Tc6Ooz`ly^cC4VHOhqI6G#gzW@iMQK{ zKR%0#xPcrKA|fKq1SD$FSbD#@2Z6<;!GQ(CBPXX!5MaTuKF+ZIkUIP?9)2hI(+7;| zk5(Y?;?k1Sk0txh!tAdC--$CSxZaBWwp@Gi@JqMv$D^qGxBc!%FE26St0ne44n2K6 zg+$Kf$wF~r-d0^*-ExPlc$LJIeMUd!Eu1u!!q0@fdwu~B`O~e^owMEd=ihDK$1*Bk z&6R4@sudpuM+#!rq zIh?ou?6e_XadTmvWQ8@pQQU7gv4ur|3;Ix#y?^X9gk%y2z+=5bLoD$XSpZX|BvA4q zH!qhcm;?U`&^nX4i^gaq{e~gf^`{l!rgx^jVO3RJ0|KF;PCO#JUe`}(?g2bfsi`>g z$R+AN5l%uA^^+^AqaGy5~@p$1=BnF?*5?tYhr<-zy*e^hxvl1h$3%a`i0`3+zQDN4uMw zxwevOZ*25tRrPiGXIzL)JdY~_E`=DBx#HS#oYE+#6St9t2_zf0|TotOKD zv4}`)p^c146&fIcn=^b}#5rGK(@+HIbo$nu(vr#dQ<5YW7C+@d%$l+-Cn-s*e5|tB z;6nU{R#OdqJkJgdP><#r>DVv@c!%ZlcK35`-_%g7V8M;U-e_!ClAhb$ zr4zfeD%1eymX@DBra$(ymYf51x$w9kh06k!Y`5f*ae9-IVz^t`RSoRGP_Hm9bar5S zfq}LD+y(SSrA)JX8=zErC4&Qvr!LhUL)sl{4EsE)-8sUKkToOnX*`OGQsM9ee;z|*<8gzN6WUoL2@ee7deBv*6sd^13MW) z>%e!d*aC)ZDqM>;j@6M;_2U`n65wv+fUTduQg~4P2ZISw=60TqnEJZ=M?JSzp86w( zuI1~C&F^ru=ZCNEOuc=~jkvhT_qq$P-ZAc#8aZ;a*+Xi+*qNr=ZYEX#=D)VW3;qz| zKmwIzdZ%duM{jy^_aiRuaC4?ePh%PjX+)BgQK_WWDNw$xj#${!%KU8$^#(Na37K~k zZw?ge#s)z$h5`JL(k;<>DNyewM4aQx0^3Xd>+4kKYTs6#^$+d{@1OEF5&lg zM`do!uGRvCs3+adyidFBc5ozWYbcULB-vgbQ(qrmq#O26iW+VIJUsAv4$E(}gm+rm zJ**h@jY}Ln3v|1?S6W%SLYI?XxM9((^aPI_U+IjO9dz-VoZDXSL^~MXoU_D1Nqj!K zO4mW9>*sZP2L_Vq_9n(B=61ZwgL9>vjz3X?#H0k{b=d?!dg|OJeNJmzac0~;nfNov zpd2kED?1w<7(Z95WdM85NkG~8qPI3KV5`0e=0IsZ#q!4nSs=B`e76|yoCvyILeH`4 z%~)VyE_tb3A|K`L{f^l{%hr0$<(U#@)+4v;=$Nx9&CAYs4igIO*M>q?tS7TIVC31@ z?igh%j>cEkYizuLBI&Y+joW6LThN+2tv?=e54~gk zl_HS*KKdc0*Us!FoTX^-cFwWc<3S`&59CaJ!TuM3lleFOsD;fI!NESfW||`i>|Mn1 zSq`9yR~$+IRJAp@>M|1;`kkbwo(6(Far>|}3iWu9Rm{JW$d^msTA9s?r$mzLeIOdn zhSJGq#Dw%6n30e*1l-@cyxvdlC~|K#46Bq#N=Z#MJX)Mf2>>)YG>(zK>wm)qJYD_5 zRqHOH%OCTcqS5Hu zD-XxF+~505;B;uculc|m^E&6e+08{Dm7d1LhrM0K1u}@TZjr#4ZVEeQ z7Kx*H-GJw!)V@E>47i^&J@TZtTC8w(O1M6r4}8Gcz}s^#4q$Xl^b9c+^Y!xRUuklbX`0-&y{Sq?)o1|&HK?d+jsorFs2#GB*Mw}J zAgwMKl#xko?@+oi9R=G@$9Z*WFlh%=m~#V-@woR+DkiI~Fw|?^vB>1ek+VrcWkes~ zV6s>sqDo63+6@XS^teX^eASlX!A-kZyutwV;-a&TKGoD`WvHRbEi$)3VREzk1gKP8 zltqq5?Gg{;le-=^NyBME@#_NqxXH)oMU-mMEi_*(B_#z97wf~YlG!bd{M$@@hV(ju+ zafBu1L^$F)TH8CSeJ2Pb)WOF9cWtQs3GWF>_X!5^a0Lf`T94j9T<+bCPT3DGPdD79 zuS>K1VS=I=D_XpPfKp-g4iC?tU$~JJd@>6n6+X6qfr+V4Qw=^FN6B!g_Cr+__PVn! zz2K1HxBHaak=i&3yX(KMAUDQ9(z5M*_w06ABbIuc&vBI^Mf2bxsOy=ktLJfk!OY=; z-IwjZjU`tAo%o7wb3jGDCq1XZe@SXeX{p_%73sxQu^LSh?4q}qj)9&%5^%W9MH;2b zE}~(uyg*?Nervc0NZK{RabbKIk1m;qt!JPX7^OQE*m8yD;O@8dE>6|kmbOR|T)-0x z-Iw1yMu#n#APvyI!-6f=r(#oBlYjo#*!dIyv>sJG?JO^Z@h7;(=Za0{&sr`~*2iLu zqMdMB$S5MlDG$`@%x=D@S34q;ec{n6m)MzF?}-@C>1+8cshFVU(IF1hY9K&3*H%m7{ zb^P=DcT7$Im2-+eHc9t|TDfKOrm4BsYMV>KiM$_25zsK7?a;_oRurV9Q`P_0chRI2 zCV)V~J8dpyL4~Ud!21Hv`&w^(Acip_3rg}|aiSty(Q<_&5IS;AzKSa6E z1zv0O61j3*PiP|eZ%e}oTWs=w5`b_AZ^GPf?oVyN4o=3)Uc``&{8v*}T(2o{5p z)ZBrXTzEmuCVB-OZ8&u)`~5rYHLK^50qS$q1k9o9Ra??kITv!*rz1htVpDh^s7J~% z%iZxhU*n8vJwW)IY*PxPRi?Y&BfGSufs;Emn+kmT7#JACNUHVr^T?afMuteZ@+-yW zc~jMsUOd_3BYL9iqNPvfxTf%}-kMOeI`Vr@!y+C!o-iOxH@qHXV74e#|5cGUkxIAg7ww~fFQfy##QO5XJsr@SV&&h*=g7U&eM@_`(v(AYgo#MR!+l6yZCq@#Pf+gxJ(1=Wwa7iL+vyVVm+O3RjPl~NtaE@$_mqiMX_n=)#Ztnzt20O^g*7t8RqW1#Xq$`Q z{SSXW*Q-~N*j-e*atqjpLRD+zf*Z{)kV3ek*N{k+LJi52?+Q#u~($iipV7Y%1Lm>s>g0k zCP-_Y4Ipx{{^LM8stl-IxG2okvh7QqZyyCDRpiv}d~73}lNI7Byo>QFMnAt6a%SvJaSUv~BqLEs4Qo7aZBi*y>!Pfp=lTzl2Glm+{h)uE zI4z*Z$n-)nAGn$X`e*BI^5b?XIjQ^wb23>u;d25%e_!~9$1mg@!#U!Hh^7}XvJ-aY zh>|uJ8B{dC&-K>#anOx)U*Blc*|z)8S0$hI?unO8=%xgGyCf36A)#Q0^7JrECMX6y zH(g~=?)SmMWq+S05Z$S%rSHj6DNRDv`~_t>kjmp)#O?A#$aK+X26Wz=ETZ!wz#iy)2HcBYom<|fhl(m5%os&;6M|=B0BD7JMsof|x7oEnxQp5F4|j@mZDWVs~2F@oG(gf^&%% z-)mvf3Xh|}1}ZH#@pwE7N=h6`!a&JGuXGi?K$Q0g==>%Ej?j7g`}(rTj8L zZzt^J9U-QYo16E!O!Mwi@PP2Q*#eOWF>!4=Rs(oMkVaNazpW$>0 zeB2#xN2?&BFA6}dEfVO1SEt+M@p&Y;Kk(Bufv$AmB)sZPp1Zj5c%4AN+2nZW*>o{k zhoowb5wPin+vZ#;VAJdD9Dr)TRh*oaJD-j1L1RE!2ydzdH0;uPjn*y)M6d-M$k_#*zp>B2#Qp2)|I**GF|R zqD>L((~Ki^RPnv@#Uu1*!rFiq#~sdwV5f+regKrCiVtbuY3MMeXX81zxT3+qaw)>* za~fP^8%u|^OI^$Ev$dz{$SCH1F9`XY!rT4-)Q&zY*mgotcGYa&_Mf;c^rMExvgZJW zPK9@n#LM*sI{J5%g3_^>u?EAR1xgrHOS zpe*D)v07H}96fe1`(DErJ_;}Autsn9x9%4(p@a?;xzu~&0C#^%YkStd7Qg&Mz&jvS;hQC{hb;!MZ#>yOY%TX%2?5Uh{Wp|LH<1pBoZKf7(1T_s_}b^aHx00L z0B9s;)pc>7_b$&LO8T{m+X1ogZh{VZ-Vv{9qoI(u2N2cZ&CDklnYiM=AfBWF5g+zB zyW7S5_r3+XwlHr%j<2o%$TB1of%-y2E&1~exP9?hL?xg2PJXSjCk)W@i()7P<@TZH z@QC_LYn}ZJhs)zM?c>@LQFB&^t^$L)?F!+{UX$GC&p&=z<-0e5=BbEX20)x*lKO%H z4Bx6RMb#U1y*3TUqJI342G$bfx!wHqts)FYox}mdMsN4%|9!ZVVf|bE-MVm);$s;U zetdpHeUPp~&VDM=#nANb{)4bvFl+Gxpz1xo58```-V)UG4r75H^in;@8^yz@O?ETBVq^B?2f zzjU7eJnOZTj}*(l#;gDN8c^2%U6KAzKcSBcF z!E1r6lZuX&mM2|Q!iHCIqVL+K9#V*e&E>i_#dnLjyt$Wj@G{2xC5Iss3}){t@UQ<7a0>?YYqe&>HxlK=d(cNPCn zJX^br%!d#3cDsu{DXA92GUNXoK;#NVN(_K}mUXIBG2PYGl^OCs2h&=22H<)sANU_y zZ63Ad89>d30xXaAmAxCssmRsGGK6L#RFr!NK0_#24U>Mj=bM2KaXjjo)4fWPvq0zg002wJa;1okb}J>~vQ-Qh3rGOrLz@0zim||V z?53MPm!9|Hh1x!}uIAUFEk=l>*tn{0&?d_AL-@i`-s-BBpv@NM-R;0dwJ2-ch(MOw z1fUCb@K^?sy~?{4ReZ<-0%VTe(2CB!l#K;DB%l?f~ERDQ=8KRufz?#r%5^LQ}m-}nmq9HWN~zU|b}**%VO zyw(|Ki_6~fJaWGsuIifLJ=ka*cP$=@mM@zLVI=r5_6 z?1!}vD}<02Sw0qV?y80#-Z=02&D0-x0^tW6&hdB-q95FDFgIGF6@Tmk+a-8YArgKy zu0W29>QqQq%29i(dVZ;DO=zCXL-M}0)gT&#KmyG6Dfo5M^r<%BCK;bDO*V`hzcXwI z;o_kwR=@8T6|(R7tZMiMVCxzI?@UmBJ~4%`ZB5GCd~K0$e+vlAk^QkLC0E?;iZ_!v zSTw3LA(ZZ)?MSh!kgJW>iHdBlsyCU(4#fjbJGlhC7wk=QwH=nl0V-h4#s_U#(qY+( z&uq3jZQ4RLTafK;=VmVaD?#|^;azg@35!RNh(iWfZn9p1~t zGhYfn#}cvLMm#*K;*R4weJAOt+=}myJ_7vPB|kfpRJh?%5ry0n*DC~_3JJwX%E!uE zJ06Rsd*m>ZyU@*rshr$gg)d*?HZ|5F)1oprm_7W*q~ci>K79(_)Zh)*#fpYrET6tQ zTxv*;WBvAF>G#=SpAN+NEy~qGy`qEiueJJqIUE`_6iH5P-|U-rkR_X$I|5)#@;I)| zt37tRIQnLLeP#r(#hV{Lx%OB=E~Y%S8=xy~fB{`BO*1VnyZb5XQw78xPv6{Jm-WVr z-u*lojnQ_A)1Au`9qvDAQF*pF?7*ci&U~Qa4K$LrXgQ&Fdl<4v!I{Pnm#oa$MF-z~ z&p~^^D3u`X_K-SKm}wjWXiqni9L=Yz-?$b1+@H*3B2yhn6*UpCkeh7Q4vEs_d;+_fBF1+_hFE33x{6#ec|$d*Fv}TyP56?u+gYiY>XKa4UGLz6RL2cKi9c$un9(G@L^T8Y&X)8F`f8%E5{8&p=@en=YNN6cPUoS@Z~C5YLlz zNBmGo%nr}`nKir>YqA7wchp`~98ugIr(`xkF1n*U-Kbi`PW<~ZTP+CVGIpTQo)p=t zZFx#w)37SJ%Vud1zIF)%SNu|!;YFp`&!4aJ?i{a)7hxHGgf$|93a<+PNEd%8W%hU; z{b0BC{cQiJ&&>41U_Xo(o6u91rmyFM)wEk~pL;=*H;a)_+ut02N{G5rZ# z%Y1h?@)ytXeZ2eCA;emN;@sIz=dw`}FnjInRAXWJH!nLFHe*U-k`=|Q(J_~?P#p#r#)HWpA1v^<<($RwZC_J^cj3AHC9 zOUsrhk96!gl;r*D4rNYk>*E zDMZ~Ttvflw_ll#WL2EnB90;<{C8UEM_nVMg02Yx8U-oA8sUR!9je=ni4XwlYk9L}o zHk}tD*J51PyqCP#hedBk@wp9K(EWnbdpH}@Bg9CM;$V$wdq2@R(Yj8ZN> zK=dWMmEmO$&?aGS7pM#Wc!1d6vMTuMgbv0aAiJeQDp|Slp#`+-<=y)-ZK(+x)&rB$7Hvk<^X%OPx9Mntt6M*O&*m2Ow={|~2GvgPjb!Lx?u4iP;&n6Wyuc2Z z^g9%;YgsuqHJ`q3JKPJ;1SOpeOChjEo%h2v`fy|?bQFQ*9=RHcU!YAN1SO zDD3>_sOaP<`f#xbppK6RNzf)WW~}IiCEo1ChDC*H4)=ct7fs=~PM1Q>1orE~CbC~S z8M3fVADSnBIaI#ki5W3@>+XWsF}R95EcJs|$GLGOd@R#+`KiL2!){@(=|T1%oJ& z9H@9%nhF1pH(xEv+$NdGAa}X6VM)<8eLoHo(aKs@9_q%^c_yImoI|glpTA8ctFe+i z{7Dl!-*bB&@0l%fs zKq)VNSVO0|Lypi-A+zrRm!xyE@oO#7t}kV-mmq#bVNctpHSx}cVh_uap>b1~l%;3H zH8oVXZIS6uD*fYp>s5v5gXP`ddAlB*@+Fe2qal|uesbEzpU4%#H z0V$=-D;CXnFIY616ZU3I5>9`r-&5m65|bL%mZsxGLRE#SNJ;bNTP3+2?n_cCI=%f) z>z7Vy9ZUDTINz!n-3jENY!Oz@_&nhgJe|dbv zsC93@8CU^^j4o^(963RU^I*t(_v0^NmYQfz5`KvneR9q^&hORlU#p9r z?pcCCXEf3fAiu0$LDCQltTmbdrdtq4&4@ZbKUeT6Dl~3Yje68HG2QZ-tM{a_0Aiih z;Z7h~&hV%)vD&fkNi(+X{(Q0!WdaCk2N&%4K=a7RGiD3gFfjC0>+Zf2G3YE*d|v&1 zlT*<8uQWjT(T6|aDAKBhjUtJOUa?1>P*2DHo9)XthojlpFfhNrEP-H$YU$z5z2uc= z+wqDb?Mab_CpHHyc3i2dbiki7-&fr^q$0a}_2{JqW7#eia3$+WK&gN*5(jvcwb`yu*IZB z2QE+!h;qpZJpNEbMA^2cdo0o##hmyIzJGKyd{L#})t?K{DYW;i7Fg1uuR=ds$_Fx@ za`O}jek?331QMlKVgvQT;yhoAqXb!v1QR{quwR zxgtFw!9rAhh>}wT)y2yii-jmUYK>~cQh3&zQuU@37{;+VVC>k)86pBFz9rFqR*Pt9 zjVfuweICIDsPSvg_J!o_R;A!VLkT<*DmaF_T{qX)*W=;e76e)Vc7iSc_RLZV8m8Nh z4!Jm-a6u{yp9xnkBp*gK+F`*0tvnjaZgdBV1@63EbiBH)W6O@Y%bO`zn`^NsWbX-| zIBNaegs%G!Ia(@Ld`Gf_&4+XpW(*Fe3Q5^0{kA^@;9v0_Zp`2DO!c1Ha^kRNXQx01 z4x7J^(t?r5k1LgWS54C;-x8zIB+WiG{t&16yXAQ;#iCyQrxBvzA9U9nNGi+_`l}Kk z)Ol@z1dv2j>^h)2m=4;}f5gfwZ9|}cME9Y8XJnj#P2;BS_Fix(D?gsS+3%orf%tzQ6T+U|k?vWaGZtUmqwGBl9(|;;H?=kWe zEe@%r(6$T^RX(Y_n*i9CP8_6>7Tp)v&$rrJTRc|8Y&Gu=Hx6_QU>VS}vmZRb7&ZKx z?!@*rz%LJ3Px9le<&FcwMZwgVUPO&ayk-&kg>^G9onBLwHQy3Vl`eR$`jKUC%nVJ% zBB*tz=&3vvD0wZWldhTE=4E#sL`6m4tmpxeR4gg(Tvr(ocfIFEz+V=+sEVXAGfr z0qbfa>~oi+J`oIR{QX8g4!pF}pW>K50-oMtI7R1gG(mL~80K66<-~P(tF$v>-lnP2 z>(*WgPW+OzAC*u=Ic@qHTW|U2H@X+fhorV<@v>mYm&8t`2$TN!k+eFA7FsSYpD?9S zC&P|aYE+9J00r5bFhSaf%CPOn){~s|lV4BF)G}XC$QC(tIlf}pwGBs$(ge@I$D^2c zYa*QW)(cuDvz$zhxK>FoJ|C4lczb(0iEY=ec&wGTY!swx;lwm4kQq!K3APuw4O)0S z=iloP=tr9zSaVpGQdWL&KAjF@#?Q#Od((77noC@nyNSF9b(nhKw>}H}bn{XqFChE= z$Jp#dK}$~G%euHb94xS#L5?Ulc(2s9ARqwkVy*uw)XLV*#9f!S$Yj}z zPb!(I{$Ex14Kgl%s#9jk)LE{1@712^&UuC!b3T1#;B3QszE`QRP}sNDi$<9rXDt1- z#T`Y{Hz6wOr|l@C?10TWbW1RpyLA-wlDz-I!PiVso0}0j3~X{F-SY5QLaOc}>3pQ4O+vJS+Na zRP4W6PS0JGD8OykL>fhaa@Lm-_SZeO&+dAJNEb#Pb#?-=jf#=a0e_O5<>dC{08}0* zIU~j28dya!tPl786m^qhIi5V*m@JUtE!qa|l~Z*pvjGeYf-%{iB7;m>(p?;oY`Vvh za_&Uq)a)iFVxKFr4BFx@6MjI$Ig(iQvFPZR?p2R4;~p#g`NcUKtnB(pZlOVhNYP-` z<|yXoz~*_*OgWF1zn|ZjSZZnC%a_r;kX@bLsUghkdV>+oT~R|IDCIFB!rKzVYnXzT zbjZ7#G%Z1u#CYk%(%L#pAtFMjqd;>#OcpF?3DPyz;$gi!ym)?F#r7$Tqdl= zvhm4#jWlk6G0SkUz@T0+&uaV7VIn7J;%dc%crafknNM{XNVtYV_IvJm2?5X{2d$mGI{)IJE8e8in=9QcqJ zJN(QLAPvOQQLH+h^pfJdLon4g=|1r~9{~To`gDCoQ$*IOrm1EvwmSQC2;?~;Pw{@Y zO)GZK(_jNBjX4{7Pr)45)XoTxkrR&Uuw(w~-FVo}AyB1fG(FPj(yN1Pq?=tYI#e3p z-FL;Oe;E7f9QX54d$>(!=_%hSb6l^3~G@zBuuZ`D3Q`En<^3~%d6V{^2z$}CexJn+ zdg%35RMeD{_D4Xc+JL8j0CdZDzTRghq4d3OWd`2^ZVj@gNRr7{w;}6>edA0a6Q&EgzE|&? zp6J_^XYTqB7x-D${Z9Xk{+eLL{o)xk#6>HUCG^`YF@nsF z{5Q#_X=!?C#l=r>!^ImYmFRSI9V{N|F^I~~T*8R1-%}ER-8J^@0&|Vq?b^vca52!B zhn|NeOyuTP{7C3t@C3ptw`0epHHIa;jH^y2`N5RK%k^V(r1ax9k5gi^#b1ZXbLi** zvy)@nA#q-gwlmUJ5O_R_v^+m6n^i6rxT6@9zY{x?wrQ)4oEH#4oX(Xb$G) p56+ zm&C)gd(qcYe&61rTH^D}Dtuly+u1zM6(JxMmt$`xXhnePd*t6n>;e%Qr|LB@^X{Mp4x_^!r%MK3dCOW5oL7$Vo4@m{zxhDg2H`Z*SbD;gQ&HG<;(Zc%n(R3g~R6cy-|pWu^? zfKM4+C^cbiBA$4tDBcEX?uqva{9z9)ig}o^tee_begbesd&x>vAGSU8_HTz@P>eMW zc9Esc0bx%9;`uxB1@EaY8Y)+2mDG;qlxMlGF8naHUcYcYE@pGj;?xw0Nz`j*T|n`9 za{$4lFx)uA&NwZ((0z)o!FzioCLG!R9{b(QRPBrWr`Z&=tnTS&7y-XKg7q{GmS*tu zk4z%G_r$Y#yQ19t$|%`ib8tk7Twi|UrF~WXYC|d+^ifte)1u|!$H?8~1Rt;?VmOVE zw72TGXlgR!1}80n-Wb5qAMt(zxllII8!tiIo%XfNA!f$0)_%Q(U&Q_zCDPsQ??$Dl zJT7^Z3qZZa+HKuK`vo%l*hPc+qZTc<9h%T%bb97jqNK3U{LNI_aR{pv_NZdK;4RGGDYu9}T z%xS<8cy)R)*;kadi5=j~!eQ)i;(nYaK^7>&X}gvNZkQXzBLKDI*8)V%+Wv9d$|#E8 zNB4OfaaQQ_LJNBfM&CJR<=@;Cf{v}$zB{XJ{P0tzbo_a^x?-fWxs3ZfB>VoQ@W%i^ z@#d1?`p_|>a+qvh7G^Uk7kNKEm2t}x!5!AV!#3AYMx&MB>N@FjcHZznLr2GaPo^!P zBdu`$pR)r+(~iA&c0%fx8QgE3n5cSRcMJl)%VaOg-}@4S+Q_wkjTo_cl%dfJ0f$>bTJoDR1m3G8 z0?eRDLS}m>1Y+B%OcipCf>>Xy=Mvbx1BBr_eH=Kar_?t_g+>B*PG`?E4S-pXZzhH9w-#WXBYqX()Ufy=m9jJ_0tdV6cTfMlMU*;VRDQOkhen?V!ip z+~F*Je50gdaG83c8mWsf2x7yN@dsvu5L6m9akSgkw!5>z zQ#tEidItwS#x>hqlY)G-a(K;;-_T+rvwBvV1I+fS#%p-8aAfDk2u;LNeDQFY;&oOA z!A3JER12?lVnq03cA9IT_)_al#w)F30t&*R8^*R#o1R05Q>}+Wx^z9A-!$wAm1&l5 zRryLLB|$Wk0+ZmF8LH??mA=;&7*F;r#f~&^-3sWWfL2B9%#-pMwSA+6_f^*h0cy=` zJk#cP&h!b|RM~WqA*N<2!G>c8>6o*F<$!53Rh^jxsFIfQxf{-M8>I|-{?$Aaj7XKX z^$LTSU;6dgOA)pf!L+=Q+kg@9?I=C=)C-F6Yx^B7a;laz>7-kxe1rCp6?o@J$ViL6 zvDfD~Mnx4qm2a^3*{*eCVs&Z<7b=sjRjWX1G@xGvdjQF4&P#NEfcC+cs%zOUlFD&f zb!K}YzRD5GB12goi5b#s&da{r>p~n?PG0WWEOLE26LLF!>a8^ieV2@k(n*)zG;pJr zRq9Vy>90w)U6B6jF89wI^vSVq7guHn?sv<>-&2ijci(Z{^a?L(KVLbSM?yqvrPxTn1O8LCFA<}c8a}!tZ*=4h6KJko~-K;a3u=)C0Q6$eR@|D}QT|bGE&SwIOv35{3@f*hn z-KmOtli9nC&*Oe7c>?Y9RIN=klz(LURvh>BmMkH4Xy5N<9$7En0$U>-XUC1`#zcYE zX3#7e(#@`~N|;iMhls)FSE_a!6)!J$Dzb0Cbu7DuvAaMUagIzzbnx*p(IvcxHt_7x zD3il2V*>PBZSW+2ex8k;veg%Z)Ur3R%R?+%T0P&UZGfH6(90M{2Dsqr_N^bLaW(Oy z7gg;d#AwgS#qPR`Bx%4%K8g=wOGc}alwacwhSG+Y`9@kU&+KqYcLIGC=?Xj^sEyx} zn-;N>c@Y9hR6rQYDO@78R+LQuz8P|5O(?xYyzBI*qQV|b2G7O`)9s6LXTSt&;V>Ur zDU1XK8TGCt^P0E$%ycRSF?YmIW)Suuxv#J7CPvHope_$TeAh~+Vt4XB_Ry!ZM>>Cf z>(s7Wq#@+LHJqNCc1N~VW=-paL^}^G)_N@`70AS8x=`U)spOV7@%fo4xDts)Vc9E=p?&G zUxKU4v*;DgD%%xA2;M}Z^oonoO_`2e8(zNZ6r!cgTp0Q>Q^`Ot5rq>D^zK0btakEL zb$quoB{Hj*&R1G|mgB3B30@MZ%Pk?%mC;Cu_w1cZ{hj$_IRhJ`S7R$KK{33mWSgEg z=oFunaDi6qoC>3`ghO$JB5LLw4}^&Sqa;e^VxoPEf~!yU)&W#wrGN9hD{6lX`UIUi zn$K(a%Hi#)Lh{AE%9wMauW}2Tg&xtV#g4V$SRRy6zG!@#n66x zQu518@Yvx@Wvo_$s`eFu?qIN+IG)(X;uY5?2M?*@nV)}IP(d}%ioAgBQ47gjqi^|* z4;Bi-aB%ks5YOF6&fM0p46$s_f!;=d4fpdG44yDBBej(J+^SC?jb8sHKQq}uY+_b7 zib(UE#>Ok(viNwu2P{pOO$z6`nhV2=BMM*K7MF{|tQT=jg**pCbOd(kRptBj|5VxB zS8lQ~U~-o7OTx;v4<4=2B$+Kyn#Vgd*}KsZ&1=5{$}5g(&8N8=(sVQ^4u~7=lGp2* z*5+S7hQ|78AesI$i)(H|D;iejIa1O_FbkKJ;{;77b_O#$Tzo#8z=_^nD05%4q@B>$ z4ZtKVK~+MB$r|D{)xP2_%+X{zn&3l@>K$y#VP~g;rtt?(YTYx8 ztrJg5>Ti$;QphaQlKlz@ASK!l+$zeX)+L7u^g^cNXy&xiDH~TF%{E?4@KMYejW__7 zjSy<<-A;E;>D)w3^Eg*d;y{Q_Wm5H~UXF;~g$rHYZbe zx_R0Arkg$O^X92yh=Sr99rQzEDfhF?C}x3iHi=v`_KIM^@a@-Zk#xOejX0RsqlM}s zYkVJQ#)CiJINN=vr>FN7KNcfiMWN7Y+FL+BXyDYJbj0@2C318s`PPs`34m9pS^$$g zw%p`mIJHL@2gNOWC+hwO1itFzm*t~5&*^=t*_EHW$XhfvwO^hW z=-o9G3l4oJgLW}E63;h~;95;oa@wB9_E6VrlmUiwCE!8|gsUN{v)cL=Z^M8k!pBeTHQRh|hQBQs_O5DwrCV_ma%l@r zbUp%R>xl4csmNT3kS&`8wHzbBRk_Ie*5QLk;qhCycga2xcB(Dj=kaI24F3*CFy3~n zf;BA&{8amW^)&uWWs5CP;w7(#qwZVzX?CSwHWlw3N31-w-3f#h&u?IVn@j%G@d1q( z)yZt#RPV4xWjPOUF`QJik7n;gw`-7W2FVvKH6SWe#I`q8j_e!Rh4m6-YlxQxH9M*K@n{{Nfm7hko_BSA@&tRB7F@0&4Bbw1O zCY#aJS`U~IcJtH!H@eO`D$2KQ*CJ99qoUF!N_TfkC@7+IBM3-$4JpVNbax}&9YZQ9 z-8pnO3^CNc`MvM=u5a(}+yAj-Ef)+k&-2{Zb)3i9wB*LeVs-`XHr~vgd$LxdVR98v zsoo348Be*jlWbpfgNVvw^nuU8;%00tKPiFp`N4lW zucX(oe>iNJ*@;U`ZHS=3Gr1oD>?$hsD2?P(`@83?!n7P`&P)r5YjCD}N7Nyu>C<{~ zWGhX|qFA!viZSR}TK1F>=I#V%u+7x}aaYc%fnmu1`}1uX9`j@Qp0I`mhBxO{33iCc zNL+PYbrtsq^K)QId6)$y?<&y~lc*|ndH(e7q%=F#kNb+T;Bzz#Zk&31CK99%pA|L) zVKw{KXaqZ>k@$_b=5W#;={jRDCR~Q5cy!{PTe+keDa4$xJUVCpaneA&tnSim=7(|b zTXQ(o{UJ=!FvQ8=xAYO16yJXH-IJ3E=5GbWv*XwqreDBqt(zTV$D7zt$(!1yB%AsW zvPS9Y$|L9Rwz<)9F5*bt9%L4=5QibnC?%^Ux6jB#p`&sY@})0iIUAuF>CV_>m7US3 z=Cy_wc!(8XM=Y-I@u>mTJ8IE~2Eg@iIxcsmPE% zEAj`~42O~o?(a+hQ1swR$gY+5XpZn6&7MxkHCC(0>Gos@pVd?|H1Szp`)_eCx^3^^ z0i;rpS2S?SwPX6p2E`8n*VT5w_>+6I^R<)Jr_|=7W%#hdYjD12HMJJ~p)E>e4>XlW zw}(f6GEz>H_*)-@0p4aZG1otMb`rqz_n-Sb<ukm{Uyw1V7wfiMjDK#% z9<5Y>93HYrUo4#rCf0KQ31wU+A@0W{Abw58SCsL2_~t1|9RcyE!@~V|PVQ&V_AYLA zC(B_saLRT?t0voRO=#giYz#jS5VOPn2hjWj!k^Ar4y3+3UNZ`ooxk-m|M7A#YLSz0 zC$S*@&6)mpnp(03qmNknFk&v2GAz33+gSgM)w#lhd$d}WmeOsz4KW1d6H2!WYa@^X8IYs$ka5AUkFh2b z(!rlm7;b;Fb(Y9`OPYd5pP^M>?m~wXZO%fu4EtcuZgeEe*7fKF@?ztJb_0h-{vA zE{*GZhV&(gD}di1I5AWI@!gK&OU&AQt(RxFQIa7&pvaN+|z#SQLt$b)X3Rb z8)fP-JIW78SnvPc%p${xRPe(LI%1*igt;J4{=h8q`7}8xI%Awl6mA@_sA2PiOlHtz zBAjZ{d?FR+_6aOkvpjkv)r*!%w~ohZs-eZ6Z|`wMGFjLAZ{~zBg2srCDnc>E8d5Rr z`c;W`e|zaN%-WJY7C+y|FMJ7FNlM9kz4`%~xYV#W(D<|a0{Lr`^H65X+su>aSQ<2& zGhFXWA`9$z%;v4?JR{NvsA}l@{!j$M^NJ=E*v0UT;s=UEh3(r@!PqSW?JD5i%a{05 z>8LuBoOfx@PBmc!wGkHzIN6UFXcqieEcs4$w{ot}FVs_&LxM6IJ+YQE`8lXN4LzBQ ztO79c9)`*sbDwUrCLRbclO2*}qQqbXKw#vg*g>GXk#<3q^>$M~{ZWPYP2)D<)8k`Z zy(x#z=iOc>GbGWpsCiLTju-M|fg4_*MA=w9fX-b)jR26~}>aSdKnK z^h~2j$cPmdY+Ru`{nqiu6Y>Fwu2h?NLaVH&E!82@5t>Z9=Cf^ zD9<4CWR(NaaG5ezdZJdmuP!go5DlTRvAQmVV9(bv4I3I7I=krok)KbNBnoxXmrqb4 z`bYi=Z1=B#@bhI%c^M0$Y!XaL@F}N$%8GG)d7J!K6wL@TOB-cKR$vo66{^KZh6S#04qb&9gmK7xT z51FKk;fHL+FH?A-b>VS8MGi7&PXVi)NeQ1G2IH-_8(&3onz@@$AMXOdzRf;!U%-K` z>rj2J<=zWAv+HZqM>|tV&(CGHolC|+}bv*W0$sAlW)x!b~!qyjzB zZu=1t{)E6OZU2+ao8+~VOHjNCqyWA+{hk-j!wYRNkn4`Tpn;z3zP~Yn8RkD$`+8xc zg;?I<7hCL7wJ6HE9vJ%Bn0eBYnUi`Tg02~d2Xncz+^GrD{h}&6ty4m zSh=unCD6#-aZmbm#z{Pc6-=E4Ox_r73CqohISSx!i$|&zXvzK?Ms`q*KlB^)9Bi@y z@>y0Lo{%6S+Fg>kVT8EXrE8H8ArX<>O80p_$}zd0tScwF2@BFbIFyAuS>krOf5B=R zWT(PcX)#I*JX_H=wUXXf!y%2IXc@W*oDk8Mn1M@+2{odLo9n?XGa>e&aaIYjxnE?+<)XJRq#zX&NiIBpyCy0V!qakM5!;* z=e4mK&6}E5E39glw}uU@7)naYkp@j>y;AE7OKsZ_r>*fENB;Ei7Yal9#o|ifo;mN} ziaGa!Ry3&2Y3I+L^ND4+doi=$hXRi6NjN*`ZI^9fvH}!)ujXieR&4`msLR$+#4}op zeFJSMeL-r+f%)h&hZUOUANB>;a&1lCi{<+;w_h=93rFMAy;DP)bvg*{YxHnn)^w;wFBL`}%Q(^V> zWQCgb;l-6PdYR?7q%$#bq(znk5Oy&>YYBMGKSWwWT!wxy{hj=CY~TEu+N3-aI~s~taK z-)6rtUWBB}45Umy>u>8ZX^$yY!c9R<;+tshNop!Nu`>bHB9%C}ArE<1jxu~ut)*I} zzltqEPZXux8$T$xa>tfxx#POV^O9`&iQ{eXqB!q^gcAFwwGZp6NUe)t+h+L><#epK zwwGeLS}9r0v8cpIBr?P2hp!5A9RY=~7mu85r3G1@qgD()nC~3>m`c5V%~fnE>t*;v zJlR`9*FwdHLvya+m4X-MVNP_o^o|Ru?8YDXbmdOY5HRl6BMsf8gywY%vde+N?bq%& z`-R?9cH_*?^FA~x>HZOU=yFcTaFXNApy)C1su$31!}H!A0t1W5&{D#iD#H`IX5=HJtk&wt97eLpyVGb1dAz_1c$JPhse&pCN=1xS0{w$Q`a+=qa6} z5Sdg+t`(DytQ?@ay1-u*BYv6zdWHqm>xr=LEGfYVdw}Dl2Xm7$Gm6HJGJS) z3PpI`Fw#R(_vQtUu33{R1; z!q?Ro{i=ws;%kpgl?%+-2h}PqCj-RJkI_*F*)~Ppv~Uj}mQ*UfgOHTZ2jLirE`{1v zMoJtbj2Rc{5Yxf*!&y>qu8T1K4NxGrcShOu3Tl2zT03SIP6?Q69*a!`4@0fT@M zrPvhiJpJ>NQAK&wbfmB+GD@Mz%$>?jUvIsPhqVOfOeT7c`IvAbB;XdtK+8b~Q2_%<+K{|#f z4nl@Efh@O6)ON&#IBmQsVB0;%rMZ3q4&<7xMkEcmj{bu+onb5_NWuzeR?davd*lI z8Qt3kQ5w}WUX1VoEw6tgZU&VrYaA?uOF^`?xtWiNDV{#1HD4uTG+%vL(b=%lrXCbt zIFbn6=n5gF zrq8cJVX+#M4W9_TJABp;25t`Y7Kg+NNNi_8zhyGT-{E=-QE_`bS0*LE$0e& zA-{po=x@h6HEAqJ2B3uwTrxy{&*QW!;4R*Qea$#!KfA=2_s;CtbJ0G@33;L(SDFpR z$LG1g(A2q~f!D##JlFV189`i&@nn2u{u%l5!~W)ux!+%bCNVlU?{yZs+0y2d1(Ns;`;FpUX9=jR1dO<_^>vL0B4IsD5&&{L7+KJjQ&yWQbnf}NN)%< zek`H3PtIypHLv^N{Xmn{9e`d7(BFWowjSIU>;9%&C=bt2iO( zuZu!$T*qN@!HZ=w6LN}LA~!SnmbT5QP5LRm<|W#}W5xPLZh4J*QeuWB*1E-~WICb7r3l^_ysvy#C~P7a~-P=L7_W2Ss0dx12$ixRx;j$o!MRv&$FI zUtyUe9(eqyyWU}KSEW54uZOImorYfk_Wc7~-@_>`g)ZvdpCZ3A37--gO5s=q zG&PC$cp>JlR430?J8Lpf*a3KTx>4TDuSJsNg z_ZhxiXbGr7{}iwlc!+j*e-Cli20LqflTa~_vjB?v>=R5K^_P>vtZimWNKSt;so|vp z|J_R67=W&MH@yXzz|&{|ZR*i(!wy?C&M{fRR^6aykmHM~XUA{S$fzCMI88nl*edmN<4cJk^sJ<>V zgpr<_uki#r*3%xN#-}u#k~eXKDUo=*)-#bv(mcv9-5%pY?&yT!5s7kAB7-ex7QdNQ8{9+31V!?|90?KNz5~ZE%!pLNF@sEeXMQA z{l)BLuSD^&Ia5;8{uhI^At9;svf?j`ulEGSF#Ra$M7>cf^@$?CeaH=e-Dy>C*_sC= zS{9MMSNv*ULatDZo(BQC72ch&U#H?8Kj&2UJQkNZ^#-*f4N5TD#{Evk^DskgLe)vw zd2=*d*23AJ4^u^WFI7BC;RtQNht(|6I|~OF-MN4!qbEXY0j=-Q-jB}bGe|*6S7=&i zST%S;u#7=)AsrYb{8w*;Vf}{e&t2PY(bB}G6t5Gp^IXZ#Aj@{v(4Ho3=Qu7ZU85s@ zHRIv>ykX1iMH?*@h27{q0?c}jP`c;OZ97GdCSsI*zL)NKo&5k)(I9wT8V_V|o58n%UJDJ(EKAnLjzYJb!C!5XlAc+8@vce$Z{Y9`wcM$qI;@TuiP} z9eOU*;?4TBP>VclJm1CDdlGUAOO)&|eDt_!W7_nA)ok6@Wat7TTLL~A|Ne3;@5v#W z0g|6iq(Z4Tz7l99&&UYT^z`(O2ebFydw87OtDzkl=G-`A?iqrnOI~2%*jxpy2-yw8`r>gxjC1UyZgiQJb`q z&EPBIo!^ah7<7WyPOQj;p5g9?!QDw)@qs>=Dx3k6EF3`}X84=4M=qs7fp)h(=^Az6 z((}-0Gneak>kkl`i{IJ;=CGGq@|<#I_uPE&(&pxkbZy92q`f%v@=XK*Q_zDcf}HnN zj-41|lyfu7+%q5!3M?p^JLQhP8b`RtNpI7(`eTZyQp5fYEf#Q5!N{ePdCvoMQWfWW z{i}8=`Ss?-bo37KW360zL&M_w&W+Tn$tt^K4f@|k;-V4(^(lzKFzegpVlf~lGg{Ca zpk0Vxph4pUO5d<$kxNV|nD{k8^ew(1wsmw;|9R;5#^qiU?&_LrHpwi}`i3(;eUxb| zp18va9yHI~P-u`{(!041UOGkrHX=hAvU{6miN0&vU{)wCgw}msMbied;BA7q#ysS+ z)zo!EId1~OFl7ZYAEvCYYk80pZwTWPe{C1PcrJSNmmyl2M;0eaR zL+Z*mWROIcIY@fEB_}EtELyjZhfjND_{%28MgPuHJ3HZZdbX^1TirI|rtD&FES0jh zwyrE~j#QKcgX6w8>VW@vcD$*q)OwL>GEFjQ>Y0HG-@wx6jg5xb?RS@wd6}ru{y^_I z{AG@poxb ztH3BIS)1!JLeTK(qIu8<{(<(sR<5mnx~6KTCS^vTNfp)P`|h^*m0nUzaHoFcWx=!` zGk6fF8uQNrgwu}H^KxNaMH0W61LxiYDLl&Hf6%&zzA!X8%9c6L`p?6^kml6h{GR8N zkP%7zn#P+0<_|P2_rFAZz(Mx4hzP!K)WJZVMV#8JX;}k!kNz9o^&zAQLMJ>kGgIE5 z>u7~&RK@p+KWc6Y7J}K@xFv1>Wd;GrdkwsXf1#@zG3UDbHgQ~^avT{Uz~UrmOU)+t z^yt1?&!Q1N-N!lC9rR>NlxcTn;&&wnZ~-f^I}(zLO+|PoNj2pj`kHc!T#_%y%l6)# z?C=pRp2+8qb(N%m9(hyQ#=pZdU~qAAGIVR*G$7{Xh%PZ)@x_o6t-%?D;MtW3kH^YxXXVk-O8s2FhN0J&nm>N_ zz}IX)e!NTZVbAxf%yxEyv;SZ@RV{nI(Dy3_wkC!Rts-@yMjZ4}omtJ_0Eoe~$#V4^ zK)u`s6#!034O%}Ak&G6xuu!Y*jjsqZgS(r5Vmgix`{~%lqJ2vl%nUcSf@4e5T_4+Z`@S4$|89gw|7XBPNXYqKFU`1cif)_qMuyvd2QJDy9mt z^*dPUL^~<*V}pCQLM~tt$M^q+-K;2fdAS8?kjKU?xpk22rwgUp16y_M}2uI zgEF%kaYYrP9x%)KC4{U}ra8xQz~#@~aJiHkedN}h=~7I2MEPd6F_|OB?(Z+8#1`{z z_*b}lz3G@?l0dlM5@2f)!q2_l6;3iKr(NQLVC$)#S+68HFZ>WZRRb;&w2*CZk$AVf z5T;Z%zOXGg%qn*j>_kP7_H#iuhjmCevz;DDW^~5E(a5OCQ-6%*T(cnY=AZ|OF<11h zV5I5ho%w7X#aayOHxacra566o2ziIw<*xfbZFyifX3aEU*jUi3^O&$~qm)G8ocnl-uhLqip$|`5 zN-zU({HdgBXca{)@>fdlKGx&Krp-9mmsT2>ZQCpN=n)SFQ5%24i?d;uTAlv}q*^Ks zrQapwgoxVn!;Jn=ikXO@{oUV}&TtL&IE72sS`x&~xZES|T1TdWJ;+F<0(<=aaP=h{ zAm;qstC<6|mnrC8J@^Po(<_8?9dg;l5^<3Hc(SP!1Wlw5=XAR|#I-50w*9@*quIb7 z5#6+J-s&Gm(k{yV<4EE{P_cK%iJNyh2esMxiPWc2ncq{J6Jy7s+ z%2G}U#5up9B9~PXKYAYIen=ywA>(>)70V*&hkFtKZp*(ve|IkWf0<^MbK+6D?|YQN ziyyk*&bOqjX_`WfSO!o=@10*hm@ZWSrKP!vOx&#QKLf%7KR%@HOPixz<~?crogAm} zd_9Mii-|0g1XqAp3F>+vbDX6@_2Vu}L#(Ka5c?hJ*qO_Z z3G_0fN?;{Ifh7E4&VqNvh029Z);kU~Pyg#v7EF{A%@Af`>Eld{drsbECdrW3XsP_3 zcqzu>jiMr<`(9GN>!5rwrRPR+J!R%8;wmu!LKfCz>vpPpIxKxE{`v{?#3^;VU1G;x z(SPlm4NteofYyyWmi*W*AWTlKk_ReiB0N5vOTpt>8D{4$LD|uA) z<&!wDs7?*^6ViQ{o#F(+be4FYbN;}PvvZS@B)zTl0tY!$0YVm6CuFgHibK&K>AX5ZQ%J@HG(ea*;__ z9?1f9NIrxVZXwxDk|7(C*@#|u_*=9pOI}e#29S=~E(qxCf+w!DI%$~g<~`(|xW8!W zQ}!+bUz-4Xua#O$gOZ=YMG+AoX1Akk|FGp#us!#Z9fkoO%$Gvhs)7&Hyu@E#gDE&M z5L{x?emE4I?YsVf(e*`N%OHmc6FBXLPT7nD4ll#NeK+ zt0qfuYjSctTEFrQ!Ftzw<*vrKi`8tfku2rl*c>+y2@6jw*i?6>;P|OWMWF3EPuxy_ z=WsFonEynFmpS7c@JwYU5)^XR&jPLW2FRKotIr3yjh8$Wf$%DIJ*x^kTdnVRBcG~M z#R3v>f*y7omVcEUVz<7*5L)^0>+P!V?`4GPfJJdbp%A?OjJVn4i2K*I-07_Fd#4%C zjnSC7)dw%8l3C9#j+B~~aMl|=F%NC*@LjJb)z5{4mqffyulSn|QhTrq=C#W#(eUe+ zDCZP%j9^6#>BD$a++_Ub?Sv_37fX=FvP51Jka8$9rD-KxAca622yuv?U!7dsBY>kW zia%^*Q_&qIf8fCWa%;J#N4F(#3v8KYQOF_lAJVCJ<`osOf4sI*lbxIahn_BP>u+Ey zvIMI8;cj!s>knwk)g->9s6TR195Ym98M^3#A@+^SkoQ&aRMZRg`urHT_QN`{I`Lb- zEQsoR9pHmnlLXSp>+ZE6V;pn|MU9UTC30!k?!Z81J$G3*Y}>1UBbF@OdyQ9&R4|s_ zc+se~-}80-V%O19(rpu4X&gQ}MvFvLTP0pHf4p(m4$Al0`o$9{yn4oHJ1D{N3U7ky z!OBqCC*fgqGuo18XMZQlkHxbbLVsyhZsu|IcUL~5T)2GC_RH3@gzuZ=FAff`wcZc` z_Kzc}mjFdHT56z$-&^vO_;bf=b=t=3FQ#h{mqOiQS*) zYh*XjlpbWMT=MdF=`Ga4H{cX@Ae+odtGx=>sLroLgLq}0CPOJZkqznLSuc8?+5zAr zV!P1~rn-Dj4oSW~I{M{3@lFvdIJ;%;ap2sXMl$cP;o*TLU~lD_G=!M=DQkQ`CQj!N zEp5uOh~G>`N`QxSaymPPTMJ^{2rc6}39Mtg@pr^gXzP&6qc!rm5Ty(+IOg$qW?Vza zOhAwYptyTs=e;%wf#0pyDKm*~*xtG8gEQ+;9cX;5FzvT4!W~moMM^P~ji`@kkLVjj zLiU~8xGNf{M9s;q5T(4?Gl9FffsVo_EeZ+>OU`(8j9__8cO@9sFW3qa(C#Dax)ed& zhd;p#GoWY${ZM@2;?-l_@~582UQ@UGkRR}85APx~AdLp>%>J?%^6byD8ZO2<$fe4@ zG@ZZlu7F?zYJp-IgNB_33tXE7Hpw<;$NBc`Y(&uQ^L_t3+|Q|)-Fi&imE&{)bC}qI z?D^O`j66kym$g*pI9x^cbw|SnE1b4WqHTz+7J~BNkj)ofKi$orx>^}Z>Uf=R|Ab6n z1vHJn)dO&y0_xU}euI$z6yg&Md50|uBP|+*l_hKxn_&gi-I_gBl^E~%>%SPPZjqQWyAbaCw zY#+nbjC@yY6|8`DpRH->q<+Kiv?=ojj$)IM8?Q>N{c@6UI(Hr^?tQzpJOc&arlmVo9U4PP{!%NDN5{w;QBLVP8MDTg7Lg5VL9iXekR9b2HLJSTn$m-(dYec%*K z%)w-I^L&dV)2&5bmcsY)EB#BuRT?^MVCbHD19{40;B@q}IB>0tPMd=|AXvbC``gMl zx4`A;zn;h0d&(_htdZc+5*1&rO!V9H3bk#*USEh|~Xo1QbJ3zSO6a?9#tfE`vABVK`LT!7uY z>^Lb{#BsZi?CbtCOEh#ud|B^KZ_QyNi^`=9EaJ@&2TKLnQ&#yn(ypAr+SYZft9VAAd*542j5k(<7Hog*dlnx$t(YGHZ=hKgJS~5Q*exP`8fOVS#q2BgI?MrJ= z&nr$iV=U=yXIsH-bs$Pg?5&MgRX|5^J5fv10Ec6TTSoT>KX%gL$hzsq#DS4W>gSOh zMqot$Ip?~SB?7TNyekDp_lQuS*phz2jA_4idc-c?U2Zy9c`x5bR8&-TuHmEL`-)V{ zI+rbJPNo!tObG$%me`9uXe=qe%`g~`P zdQ#sonNrezQi^8MAGYYxRJ5#OjSe|5>9qs5!Zrr)TzyPLJqC%n55 zBDb*}THn~%c*))@cId`w954A_y_@5A6u;aEENCb5Bnnz+g^_Xb-sY71FIT7gKYQyG ztmLGY+75bvw69%-!GcK7!xlX3UeyW1QEQVD2>J3hAOnjvXNm5^ZdX0{qSbZRFU=S_ zi*1*D_cPCNl$mbb!cqtKmT=%46;<+7yJJh+liZFQ?@6N$4j169R*c$J4dH2cujK_k z3)=HexMYaU85)}6);I=SHIsXDyZ~uehz9(7Z+w!bR z6k2^W2kJR0s>pkPNh%uUJ<$a`Mz|R$xCee;d7%ia=P=kWSzBBAKFYZ}(5UEC7Ogsu zhw$TW*XxF>Ig3lliK+De-Fs~7$8B>>F61c`25@PmrDXSHVEC7%-F5r6TYG_K@8pz) zfx9*Yv|`cv`i5I$y^bfAk9f@nVsC5Z+WV7dFY+vASemG(D%?b~GBe)+u~81t1*?Gm z3sEln$DYU6F(Xx0SUtr*F#mUO9KUZc0|u@U`k+?xf`O}yZdUA=&3rN)KPxM17~uYC zf)1uyUU6hPr~M=1jLh5-0eF9Nj9#JopuN?HsCS>*A8#9hHBm;dW+t6$$|y;k1Qa>}(% zLM<;X83~#bh~W1B2J;>u&zF_l%%Il64K%KxwhFV|qCvf_QIi-Ga@m%pp#dX++aKfI znE*8Uz^&`w{_p>W+K)D~{{K;Xs0gq(^Q0mk#EZHFtbDVj9jG_;W~;C9D|Ng)@9Q`KGSzH7R5%wrgf^f!HWb5GSd?#N!C(WB>(Ifb zB3#Gz^{eWy73OG-KGNY5NkSANfA2kb0RS;3t5WB3|;OC1jm$F;YimauLtAtIW%${a0(5kxZ zkvg-_@;M5ChEL$6)-n~`B3`cm5?JNRefiBnxJwag4|_eZx!ItRV0>+*)}%B$CeSAh zAH~m8@d)f^{@5RzTLP&6d_JewY^>r5ko!j5_PLIkf+&I7)!yT=@;iICy^^Fn^L(IA z30PFiFUCP^>SS4RbPSJFATm8;b6-rRtBZ)mwEWX*8W!b;jU5F^Ch+*Oof;U3*Q04f z(rcp)+|#P=`@V9y-c6txq)P~X+a?KK!P+Tt+R(LIw*76w0I=vaUW3L`&~-apukKCi z-0>}O%W^N*^KTVDDF|wmva~j*Mhq^;*@e157%~zN`sv_H1^AJEdf64o64YGWkAwRAgy4RAfb8chd+1 z9_-}TCp|+&y3-q=1eoBHK6-mWA4UC8?r;D6_6m-ilikUXz7+8kh2o6F-G(d5I`Gt- zTl<>5?{h!E1<6I7m-Cj+Gf=Y}CMFur@1UYWC8NRA%sLvnQwFF={cbeDzcLp!V;EJBrhK37m4`CoO@p|0B)oWzRo_bc?C>U=??AzNbw zcUer`8r#0Z!n4qnS8r?!k7&4_y&w0(#*C>(T*gv1+&r8!0`}&mXzq4bV8On%R|9$2 zQ0v#5gQm0Kr8_$-gdoH5lZbr?Q0i{sHw$4IXNh3syqe7_r5(Um2y6jh>u7xcU7ZJ5 zP1(oqCj0)4$6>sRQrk}kP*T{ z@7(d5=Dm+gCoADGpb~yG3^HgWj9O4~c{GyGM(P*f={zCGOALdNa z)Ma)J!ikpl_U7~0;y3p&`K=L7z4^B;s7+zuMLsIVYtNKP(Ytb$p=mTK`~6{5hCchz zNTU^fQ4EL?XcqM)-}B>_1?V&gO!yQ0q3W!(FSJSV1=Hq)XL+OZ2!DV7!6IGZGjh7s z5_`C$BCWlhT^g<2qFbdPo%LOGKYKUyMX`qqqi+iBfBReP>7nNsDjM<1*UD$VeX(!X zU~$`k0JO`SQ?*#5KZ<$@1;)^&!DJKMG;++NY=O-$g}wlw`?h)pT%;Sj0MM_r8usP) z5(lXRNN8w^#;c#EU%#r+@$z6(LA}a%$)ejbz2XS7K?N(S6)3s8(ltzX)rh&%=$^hxbb%*F=~8YOaQveqI6lL zTdkE2Kejz(>c1}Xdf zUQSe9i*MjHpWw;#YVB1IPmATXM|f+TS2kB1uPyGOTr>c3vr>36VV+Id&N)EaME^7L#(< z{`X+Y+N%AY9v-=NZFHApBTk(H14V^utXy&@ycI&D1+RYU9O~+hrT~WSJ*ez+m6}kK zMQ%n0B(tE&xxa$QeaeWT^NMj!ax0nE$ij+fbEPMCUW8CBT1>XJheN71ct5OiJjMgl zQz2J|;H{(py*Tt`bdinJdc(x;fjMEcmUcGr{$uQ zpwSps7@~)v$c8)^XgVI@MlT*h(O4T%=RY%RNUaX7#DXtjv6 zc9r$wl03WGq1<>u@1D_gJIRJ=D!4xnOku3EDq6SRF`Au2%eaW|f(cvK|J+&uu|fMR zb#^N9N)lkx4vR4*abN6$X{7YtxV#NkUo8=qdsmak&kjeiyop1Z7M*mhH--{+wkS1u z6z}pUtdLrf7l}Kr`+Sg2S2<&s&&z^0Vf0(^u+#-av8=O4hV!YKCz z3iW7zfS`y`QOLfUMaI;fnd7G@qiL=yEM(7nOegpIFDPYY7 zAVe&Qrc=y>PUnL#xes08FS=wr)rz#*DW7-dgCxzeM+pax&>kSydvf{R>vWCgeWr8t zl89C5HXP@hy8r)8G6ZGHvC@%$FOP8gri_xX>xjmc&8*BLWOQg>cb`0ZAW zL%4@_+$H7|q8-z2;0NhUKXvnh-IR5ajR|~K2JGS)jRkrI;xc^-{R?->vJhY##~=4 z=hq9KPIZg^9>DA#mLllhrUFlE6nPZ@kv+$@yiO{v;hO%YGBfEE@Ru4;drAKm<6_8tx94hOB9XX1q z&gYk2V=_5Z+|z!CNLO`_Db`8+P}IEM=ze}Db*~#0ZB+cRVe3XZj7m~)2ogl~tE;q~^(Sb>L<8RnzrBWa%W@x0J@rqgX_BI+M^+W+PV zSB-B1ql_<0a|Fdw61FOP0>6dK_iRFqb}+zSpB?`zAibwm*ic?pCOTEP@<$Od5T-Yj zA0q1x0A$!Ee;YU9IB4MVC#sa_r+wop#~L(MPyak|{#sY^I6QgV!jlNPBe;SQy64y7Q=SCp^}y&z?OKR2ltFNMGZpS1NT!Ns;cwC#AkLZhg3H1*yc@ z;cpRJZ2TXHXD%V7TI(qB0du18MR`e$HeHlMr@Acb|jg)3OQRMz7D4(C-3N>B>M46Kv|@nU96f}AL-~g!*O16%A9VCiIw1xYMnrWrzmm@6 zTaW24IL4Xim?J#MhSlDwcYY}R&j7&p&=(=n;{l1=EH0+2F~Sv3cZh)!?hZ7ndqHHq z!>8yaPWtK_ZHMbZ&GK-pP~w^1WNzK7L#`VF!?6Yt_?a%de&6CN4!j}`!P85J;zMDB z<|!q7+e^?6_mZ-;rrAeV51&Qy3+cY8iOIm6C$z>g^j8=}MDj({-{02$6a5B(aOOsUmR3p<&{{Akf@^-|7Osw^$ z{5hYlG30)${^tkUh%UaXsH&Gluq^>t+vw60uzuhBS5{<2QOl4TzyHyeckf;4t$(F5 zNGf>*%`=UOB8-V`KhA$gELCFo`cFm8Lz2n2Nom6geZ{Z4Sy* zjH>Qj63Q#0y*h*IB}q+^0#pc|2~(aV9zCk7G&0*yoj#ug31u+MX546<1=LNq zf=w7;`m?KT_O6+B8U-V=O}ah4)rm`a(j~vmOcE|uY+sM6jfTX&^s{ccCwypev>H?} z9dw+e?CEH4&&5IpcJBL?13IyIG6HY6-szsug>q`%xl)#_l(aT~M{s3Wh<>LVEkfmN z;w`1{T{r)Rq1`m!W9QwOBqVh=&fI%fF|Q?gak7mUw6=)gJXRbW{1vFtqgE8D+$oT= za5$$nsu#Ww;N{e!W@4l7UsezAVjqr$#)fd7vHqP3hSHkPqmy|oaAo6US z6YYNUM}bIVS5<0sbTw}qQXbgaR?>*F(wPw(-KF@bB%vt(Cg~(5E%0MYH~`%5P4Vdb ztXtOWPMSumUd{;*`y@nf&obq@`fISkfD(`N5q!$4yxd=cxcQ z{<~agZ7udl7MA#dz_>99xuBXg$uS6nsX^v68lB|cE!)rPN;XC6w2c<`JN?&{UFoIv zx@rR?FuEp0&1Q4Vs(H?bb2Bj^pLvbYhm)hh{CSp+;wp~jF?a8}Gp&KhOYwVUShh;* zBrJ7Os6bSiY?*)K7{~=XJk8><8u~)twDvcli*SVDgFnM>sI>SgFu!lig;%&E?NTNp6kLi&gu}7La&YcVZ;fG4 zXOXAL9gm+r@DRB<#$OO3q2w*lsz_$qABz*`&w_#Rb$?lC%9L0t6c%bqtNF!}tg>DL ze6LuK<=(COd`EMf4Ias|-TSJVmI4nB+poON6VxVC26HyFnATURfG>KNVQe(=Sy*$C@t_1S0Xt~NTY%>4xXZ5 zSE{pLl(7QvZ1&13tS+98;mUeyW^j#f3O;2>rH~X=dh@22lWnz{ee2s;UT@%!AFIwH zI+a}m`0*ZOL7IiWX$1|Nw4OI@>Vin(o!)ijB}N5LIZ7>k3rGD(?R+(>b4rd*Ltkr+ ztnSyvqsu6(=L)i?PCR{}wxY0MaIWn^fU{}@QK2aRCYjKXf;)0hi)wQ7g$UJBvYUCL zxcdnANR8cxYkWLEdG>DO>66|9=7ZgTg{I0bRxt(@+{;2LHY(TZo%fb&6McS2&WK|$ zkg-VVHY_*k$XY{Fy${ygl*(YtQh6sEygtZBQ=)RHww$>;;10$8Sl!43?ml_+9+>AP zL0l#-?ECe~6LfYIu{0*)l}rAic~-k`s`~$7?Jc9?3b!s@+$98eOK^wa?g<1!Ah^3r zaM$1(+@)}L2=4AK!L4w23VP@C`EK{<-}e_a22w@I+UuQbuK85?{0-?9#P2dp+Uh-1 z{^?jm#K@qb>}+XwGJjgH@!=oK@SkLco&rU}dB^@A9ohJh>TJni2F-J|E8;LPW<-B^tVqVX)r-mR26HWjE!>D%ihhyVNX}X*IcM0VF|7#s~@rZ0yay2@MWT`Tqro z-|f%qZ{UCH1OH>j57Z;Tqj`PBwoxMWM9acsN|Vv^fgo3Cb*5GQI*6)@A#X36xw>nu zDsd#g&Or(!Krb)N;NG$G(BIYhw-9123NQT#Rh>&X;1pTq(58l9Z>vi)SSUi!SRUkR2?e;^RWN@J0a<(~5!? z6kN-5CBF2jEnSb1Rt@$h&3Nni3y(p#g^f$`O7cAl0Qoe_nm-qzOwae4FogYzQ7d{9 zHUV1o`Rs3lgwDP2XmW782RFi=+1w_KF7-k#5PGCPwj`r*m&~Fio7-)W#3}U|IStqX z_1UM*?vMdiW@`?Ry7DJnMZEg=V3tL4NB*c*oID}vH@`yOPBP*Ba0cH8qq@2Z{iSSf zZM6!3L&pdi?Oj8DAJE#07yvJe^w}Qy?5q;8?bXWlZa*nA>(81NudJ{8eRF1e=ILQ& zxYqawE1|(0x2cH--I3{C#-aGSqCF%hOBCpP zkp)`Fg*X2-r;RQw9Pb1rmL)A!4GkxKq9u<1R#m(~uk8DQIWZ}< zBmP#V&ch!6tMBGB7bMras7dTCSl8cY`E4=#c6QR#<1@kyc+LXCPr8VuqkZ`jj#@z148H|84sHo)Hcx{wT7H48 z<-A}t84PK?7pE=S=?ttvC6?uS_vhgt+q1;z*|y$m*dtmxjobTuCvCh^T@BI>kyEmd zw*6TIV3{`xq&BOtH$dtWDpf7%NBA+G!<@cI@%?gqwdv$t@D69{RC~Jz#^w`j?Ip>8 zq9j+Be=waW^|vB>qV01o;}+7|;6Ggr$BQHg--HMlZ%umC!zV=%*zhQbVRS)WY#vrP z^eO?)?EC8G*FVaVt_E15n(QEM)>5QVSo}o%@YieN`Prz#)Wx6la z+{b}FiI7(ZFjC#?10NDLH<~~xd$2*9MuP`E@#Sp6=+OD7@$i}h{xx>~(i-!CFWATH zp741erA%MpZ=(d~(E`XDiVzOGSiA}#qG#OWqr!+xZ=o(Sy`NnV+)qp>kENWB@j>hp zD6zP_99X9>JNeUwg0*~i<7}41e@u03Jc$JM zH**fu!R0!ifEUzb%lP1R-Gskft8an&gzs z(!eiWP*Bs|{AOI{A)36DkF`s{-eEryXjO;Jc)5oY5zr-$%6!nR-_m7A>=J5qv701L zoLV3PCjR9c=FOT$z9pL(hujtX4r%sd=thz;pLsq1*k%U5_zZoc9BwkgWKe3WPeEkV+ppXAxtWO~48b4QFmx<7~Te#iIj)1osg6wm!ehZFurW@uQ-oD&;> zX3@gt-g~CX%jI!PAS#Dnwc~5h9790){;gsFpmM@8b#-ll5SR38-MTGbnP~cCyGPDR z`G&9pm$o9LMDiM{J$1C2yJW+EHQWVWm+L3=RB9#2ym^Nj_8DTw;ZBeU_ypnO#8CsX z$vYA<_n!xQSDT2PRwgrD81i1ylaOcE!@}n1%)bxXO)PIlfd!62%`k_m)xCrf==OUf zCRf-I6ysgU`tm}aX9P6mpz$D7}Y8rF@v6< zg49{OnK}AvI?DvGAR!`KR@a03Hzu%VgAEm4g%vMz6s`2F(-Fk$}ScOuW2xoKlnmPD~Mz|JWj>=BGJz$F=RQ+@6vS%;BIpYvk=9GU&pjv<7aSj1x{4u2I$dcr*Vte7oq zY8E;Gkb-#UX`I&QtrmHSCdn7?g&el&ARTr*^JU7}q*81T6i0f`T9FPZh+|otQT#5J z(uv!0Dgz{>f8i{AfEGRMV$*VQ9`8f6GlgI4j?O*U zMYmjOgjy+Kt$>8ffwMPiRVD$tB$*5kjR9!r7t&1B7p_Y~4recapfIdcZDowf^4L_% zwdF%;oDqx)I^+rD*b8CCu=VqGYwelQNx9NMHuWB&IItOR1JOipH30Y5$U3xN5N*h* zHSv;Ux^{!dQsMdTd@o@Tnk{8i)8Msd*&0p8WAFFJk!or^*lY6o`3Gc>8wqNtL-tSn z`cF^LPq0t$Pl!)pG`)q_n9#=X^bWTQ*8hr^ACnP=pw|%ZneuI?%~(2VZ}9vC?5CuU zu7f(@@eP;r{B0?@F!u`>VIDFwt)t9-)vgyJh%V*=eJ?_ckxb9gr%@&W)v@Dl@*&;@ zw9x#YzkDL=V7QGbhzJjgkDMd;Dpv=NUSN&J`Xlg9s%{=O7TsBnZPO9=v=3Be83XzffIt^Z2V!v1T^Rci6E>-k?1y*b>$X?RoXFek z^++{}zdJ_Q{>paDLv&VN6G$EFY+dhrcfvQh^@Mry85ZI8J>-IHZz3_|8Hvg&?v?O3 zIICSiX~SpDVzi2wUIkoz*Q)O*No3ok)9N}kXMt_83l^(4UE`R@*kMlKRQ^|kQ2Kq- zF1C7=LStW*sZPG6vsrMVlk@onN1Z4JhTIiw+$~Yjnb;pS28FpL>t=c+A|Lz!cf$$V zW;9#(1?Fno+50OYXHWYa@sxJQyD{PEcKT4$L_(uy%K8U}VwuIZpp|TL#Rg*>h;M6L z!0s?dMw?P(;-4>UyrL$XfkS__~MmN?OFqBoOwq*}{U zs0YP_X%bx35AD}GPhwv{T>(hH0YC*}anV`3*Xg+jhKrPl+k+qAUN{KNgI0$+!uJnV z6(?5zMCNU|Qh#MKsJ3Hz+ZeL2kjjKAL|+=8%HgG*JbKGBA@hkUXDZX@BoS)zJb5TY%r2sVbLxn8~%WW4W(Q3 zJ$s&2(aph>s~!y7%9b_@#q4Aj_#@$Zi=F;-3?Ut6hGbfFpwc_~)^`oJN4Ih=Zf$h} zz53MwoFhNT5ucu6B-(6{>Gw9m^-< zmNjcZ8Obln6YcJ^&{ST=yptwTeq%fG*7d~ug2X%(MzG$6k&ThK_G~&WUj81phkZ%{ zwa@&czgtCVrZA_-r&y;%rWEVZ;Y_bFhuCJScU~s^unMn$0zs!+&Q;_Fj4lwG?qQ}C zZc-pQGv?Eq%FPfdzt^-#d^`HM?1n)o1xGs*Nxgkrsg8JBXBHJ3m6_5>Er;=I zGZ}-es`q7r&!N&|xG2AX{=)0WX89Ptp*XHk zFmoc#cd%60`zYVVDH?OO{oS!tHTfa38fbVku)dl}4;-?4p3BaF^E$-lV+&&IYSxaZ zCp)raJ^94RR3I{M2duzmT+i(C<~9mbJ&QK~pxOJ)EOld3tWG0w-w`s6!0Yu*F6b0N z>2QKnAaA2{sL1)aS58>t&qG<<$U6bXGe zBpf~e$-f3IhcEz#QIp|$@^ZWkFANidg7hP}i+|<=_SS4TzIbfq~g6nedkV%iD8c8DE z7(#=%i0jkCKYxAz6~{E#*W)m~?T*8v0AsmO-vA+%Y*WESO?Y$Apd^+Z3?ZG1eNd6J3*xF!YQLfJccCejk9&ZR-7nM(*F|UG?b?79Dn8if)Hnw@LUr z;rAvZvBR&GpWYz{T?haEP*Xt90bBBCiaYHR>o)QV*6v6k!zR=zf2uon9*6$>@Xa?-j2nth11n+2(;Byhd9C~WV)7DzYI3r4K3CI9KwNr# zygRHM>Va&;PosaPeBWxRQf4bfR<+gVcD0KfMI`W{Yrg;nKK$ZQ_4)0o*QY;NzUkS; z_UpW5(#|tg19u)Ow~ekMXb`R8?H9?-55e<$bgc`*?iE4k5?3sf-?Xm6K2d!5`?^F! zmN`;an=e0xeyd`hO<+@aR%p5GBE=d|Kea}}bPL(*9sDUr%kW$PX_1HQ5IB-a=B`(d z@=S#Pa=<5Knp)&Ac?ymiS@9k>-XNuT10!mZZl_KuPU-nAzrZR9IP46#W|SR8D*4t2 zwDK<@Ww@grJY;&hK4?tcQ-~08X#8~j`!$}QbUdviRQZ*%)EPi-204DI;3tC~St zQ>s&!1^FUsk3m1<_$*UD%wC4*CrjRj=q~!C!Mpq^a)1|ITc*V@!}D;=nVkke0b>`{ zThHatA9L$-FAz7N`~1!HO))4KuQBLx7X}|{(TO>dCTVW*Mp8@u zbr`?4%V1Ec@)b)o{~#14_8716F}$ln#3cBPx?j1e^D?T+w12!*(rR(7S*ipgW7ezn zr%_B1``n+8CNMuPDoH+pA{t&Bwo`N+km7{Jmg4vJ=gl>%+REa4Ih$`c6Zg&6RIZ5g zsQ$gYen(Pl4|&Pa>qY#ofv{^HSeJK{I zlP_vHu0B4{E_{ZR;k`EoV|^R?-KdSHOX96AHbvxVO*xD@C$?WS? zy9eQyR*AVkKR_zk>w)HHJY6Inf5=qdRd8n0{%AFOA3>eX%k~@3s2a=mL5ap%<2UJixX7qk zUtRt9-ag@IpB?(psYCi*#Dm}PxT~(Vdzpjg$-?a+OL*3Jd8+zjgM_8B4j~5_Jf45l zhfK|2dAi(7S@qg?J^b1zY>1ya8byN*U=;MdSY3+Go@|JT>&BlF85tjl>Ix>`j=Qo&eX^qt29K$AqTO@#HXG*-bZVXYH*^ z^pM%URO@%q`4Mb=d_tiJZx%E1+-PcLni1UPYYJb9pNqo(-mbcc?-B|}IA!9Fs5N#P zDF7=C%U}ZaSOu$*{N-}JSeT!Cr3GGV@*=He$@&+A*IUa5P|~g2u3(+bk`9TrJdqt* z4UTl`eiUgVgs1tITgG1}dwci6Xhc#xwz{q-9eRH}kY%jKQd<0;KMb_smc86&S~JZU z=XtNPoWKza`0(l>HY1)D++K^rNBsO^H%6?dP`j^)NXNTq`L z3EpJawtX>=iv*5F!ZFV(_#Ae#(3yuzHPqH#H#m~&R28XZj;w3*r>ZGzwhUii91tkQ z@lOjIP><~I*GaviKV&tigw?tjWuX(m7+myu$|{Dqr~IcPmHsG^Dp4-jf3`e)#v9Wi zr0|$GXJPD}&o}^>d)9OaQ>XXje~Z{bG!Ct_vav%BraTwMWTtROgb*kQ^u6<$W9=iN zagF@p)rsF!p3BY96A1V3boZ3!lrI5hPAi+RC}3Ts0`-6fV=RWc;$E0iIn8i)<~&hI zNAM0m#D83FW+aYa>&W$HR4X6ICjJE2tlghd8doOEnqWxs?1}IhRQ@s_L~sI9P7W;I zgw|(}l)GM-`=X|frX`470T!do>k*}v#R2|_oN5{?Zaz;#i!9#QNGP@;s`VO^vwW<9 zZO~5{eM7)*f@9%0Yr+J2;;>&m`cE2Js#ki2M%>JMdLf<6`B0p{E6uG=)0zq~(SKms zTB`_wSfXKXAsa`<{ot&-!C~crON5PqjV65=7Vdg6MT#|^8>F)eUp!YJLD6)FZ$VKO z)$8DlbRtV1-s`-aPkC91&v)Wyl7lgYMw?S5gf;RuM5FT&;5z3ev)0sn6t>fa<)!A7 zx>E5HW}0CUadV5QDw4HbjfIaLrv5!^>S9eKUb?bilWw1;k#E8h+gS}+%b}HgkraMC zQvq5hBvbtWr`uZQ{+QMemzT(F?9UnM?qsebtA^okyJnhY&UYJtcEhcnLIsbDmQd*j z4QIyE^>~PKBG~DDKqEj73!8^)yU}s@^F1c4fgroV$@J*r6^uM={8cQaI6SWr#z4db zsprfUZiGdQdQj)v1&e4nFS-}EVc$^jq-9Xqxf4PJl-h=sYG;<;H5-|551gRAjZtv< zsd3718k(I@L{l{twm-aC!a}i9$UrnP*#`wXbVZYQSALwVI=A#{Hqi&gKkCS$2t^`o zH(K^;b;ge-<&!dQ-;XSn$%uqw)9MHH7epK|7m?rPe^pv{bHIiRO{qp%L5w5uhv?S1 z+$MEg*H^WG^ftBqmB^I4DHAq`LRP#?2k^~DNiBYYtb`iDc)!K_$ zbGfGG4hQnA%l9MeoJ1R3f4#{R2hdzECCSrh3A>fq1WVAC=_ zxf#A=B&R+;I0Hig@@cc99=hV{-?dBZ4XZhF;8Bsalt|G-3ZZGDr;J@@)BO%77EDANA8zQvzu3!#B~t z7pc(cB~BXj8GDUZrJtO;1+S<6ae;h!{_$&j6h;XX(E17us1a7Gyi&oLsj?0E-`vCt zBQ@L*UwVM6WZbUm6xyd#Z{KP?CEx-rrSG7I_@-tt-G;t5oET#|k%d=`##K2*@+~m< zrbh2lCdwhaiz5D`)J1{HaBapG0=>;M23Snil?=yA0fI%6nHq{AEuAY**l94xQLB*A zX&-Dl{+eq0x4Ix(k)fkX3__E@tMpx%(dA9A)3J|X0xyIU``&zhCizq+w7mcEP*$hY ze%iW;ljlFm=tG{;guKJh))W`CjoNdRWn9a>GE*OmAzA$gzF9c@MN66&Dg4vwm31d)ZHm*lYfn*}? z@~{YZyH=tuFxSLY4#ifLz4; zbHGBiNB8xpzTSHfPqW!P8p0Zg$7yCdm61^SpEl|L`7?kz)1h9e(<;uW#!L+$s!P?1 zC>yL+Q-42xC<1sPp~rG3zPE?KQ}zGe|9@niU*g?n6s|`zX+%5~a#2K!g-I2}0El4r z5XjlQ19(;cqe1_lhyCAQ%SB51|Mhzv9=~ZmO9LLhoVk*jl`3^=mEvy+loF<|)_d{lw8P=3Rr=TR>*!_^qD96!7#t+(d_mpxYpu4 z>vP*F*XDLP+~m!Jf-g++&#LV6$({?4kG~hle+5|fO?t>_&pybaSzZE8paa${w{8;C`q?o`0)k#~soO9X)y>1-P)d_cosoXH4}WuG@X=a+7d~U_cNrkuRxD z_^7OUb@^GL^Q~s}wNj+=#c1+-%RyJ= zQi<~Z_hk;;Q?-C-nHaJaUsMu1$P0M6j`mJ1JZB)?hGpmWSS`D}i2F`Aplr z-%upS+Ra0cy2a^CiaI#Dt`mi<&Eqy)x6RGK*MxMwD1a3l0*(PEi-pnJoKY z^Ykj~q_WzHjl@!73%S`(`s`hAg+($vVsz+_^)C_6xy|gKO_;Rz7cGzj(10BNTL$Ih z#;=DHnX7kY3y<1Q6Ixy@8Ufd(@`or1R8kBmZAI_NMeUv*;9_5ZI0SbuI-qFYB)}P@xEHoon(NA~h zpP4m3rOiNM) z;74Zae!PB#cs`f0rst4x3eLMqyB&9^emnNBBF6o9d<*!*z%fh2z@eOrgygcPN~iWK z9GCesy>emh@nY3QAC3yp_5a)-k|^oBoywI^4!%jHla5S@)@gs5lm-N+O5JvOa$&E{ zdnjRYW=pOw@%%!GxTq%|K6`=Z=vNh%|Ngn2hIqQVFPb)AY#qjUdp@+_qTtbq9{^~& z6m}zIq07){G68Gf5Z?B8E_M53ap^!orOrBF(39!=y!U0EUi<+`MS+Qk(`-b%$#w&W zf0wW`rgk6_E8P|PU{Whqc*;|*6&3q~EZu9zyG%j%4A*SKExj5q6OX48dLgd|zw`7D z4&P-m6B#s;z8A{&SJjO283m6yogV#Q&@Lt@P)JN;(H4~PF;vQpqjmLR&})2OppeNe zx(Wq)`kKrwgv+Y>g-KGxUWZqNk*%dlBc1xH&^+$Ki=)YhZ@})a}mI#{4vmewHWro*7Usn+Dt9>)dE-mc1LEBv8uU zZ`TpH0^5t`zR8l_GzAQaR<(lQJ>uJl$-H%=flw*rZGh+Yg1*+~+^aOG2{9c_lbAb| zYoRqJH9y5n^&`oPEY|Yw^fy30=ew85Nu*CUr-pcoUG37-+x~rUx&c#GR(U`B78NPN zvM3z;J_{wE4owF_cgz+mT5za zU{pI0j#_k!9?DaY2u9ng^)acTyD#jcq^;o*7FH8Z0v?gyK4)3QUL#Dw&o`ei;ho&T-vv?>q$)Q-bxYR#Zr>ILO(R^SsA zI}9F8W^INfdp$%3!(x`K|9(;~maI^rjkFikla$|M+#im?98`#WZ*vgYU8 zXnxUQ4{+Y3@``8mMtG)61-s{od=p9KFvjwrCqJ&?%6r2Z(n~oLX&&9CM@>*lv&t`0mwLeX^PZ zb-u34MBq&pSS?nDKhcdnAw~nz!G1NtU>34aSmSgR;>Ae{?EXaJIjDy0D&Uon?>i0) z<{9i-*dIz!Nwor$`3H^P;f;$or3;hF1z+UOMcVMMkKx3HUk(;kojjkMQt9Fd1Yoc~ z)3*X2vC(}4(c^;&est##T6XRdNq}cy3O008ifc8q)fEi*^UA6H3nilVKY@AnMt^Mo z!KvjGRkPYK3VyAteFeW{aU(i5L~{~ze)CyY|3Dn?AJ-A@&o8-4?Zmv>?>>QSeDCaP zV^Jv6-as}rP;;{Uv!z^u5GaEDrg5qNQM3X+rPW@+?Tx9>y=ryEPIaE!KkcVm3tmd7 zXqCJ-=?}P9)a5cX4F83T7Vg@&ixcsc$%Rd9pvQg6538;0X1UJH@Jc)Ejf_61e%SGt zoc|kT&Ba8P5G^YlN@*{3{N$MX@{0ATE5zGk(=Ax@_wqA0?+t#)dg>Z8qaR4 zCrrwPG@Js@Aw|3Bb9_>o*BdPBE|xnCwNZ!QFK~9?^P`56Dic|Cj{JUn#z^>q`^`hH zk&;D_j{(6Cregm3V{uG!q5O(Y+ij07?;75z?=N0zRahVKBGB7{{b8Ijs3+2!?21KS z^Ab2cSypO})m~39nODA_rft-S!9=(-eFKuKPh>fbRXX9X@NQOpN< zK1XK3WNT$T!iY4aq_9=bQq#^kI zm)jq6>(hQ&xB#s2dHDV-dMQepn~b+g^_3~v9q zaI~ zfIGJS7W^8D&m%m#td&cvZ{8h~ijg!=$!6)u$@Mt%Rz^A7JtYSOo)>YI%+9plpZ}@o zz&Z{5ng1nGTCK9=f z!z)q!Ld7i-aJ8GgC0@+8p^jHVm+aX*sCe-7Dhig5v78tKiTmEYYoSV2L|LhJM<_lxcvPcMS%xMr=#dLyjF0~b)kptH#_726zpNY$d$MH;jKppoVWwLTaSCe*A9N=XH#s*+Q zf@O9Y*M)9b-|!l24AvVRurM!E{VUxpsP-9#u~wz&&NVM;i>`xpA7IE(5e)xMeTAAz zX7A&5xtc>hqfX<*{)wUqH=@=vrqwF#RnpEK_5+EHBFE>TCN^L_knvuDu zkZ2sudOH~QyDkevvyUS=w~o9Q&`*DSd@AP4UiHj2dBey>a*aq$;g`KlY;3tP1LKiP z7PnM_Y-OHQCCl=nOpcj1(KR@<>hQ3xQzHpE9n$mPTm6DOzuA}bnOqY=Qv5yq>WFF` zc84gH&1j6Rr~H(}2d*7NSU)`dDeTOp9vh$n%c&jnE~gl9;|(vs5}omKDutMgsN@uH zr{>Kg|c=B(94|syB4IW5mb)6CFZ_?R1#N<3C78kQmHN6X=wh-@?MTtn`R; z#Y46qxpsR9|CMJPJ-Crn3-o_CU%FwazW0UJ|D1mhwhzaoO|l6T?3tZz(S`Dv6nHzWl8+pfEqK1aXIoFnOhkdiBAplYCXNIu!q9J#?VElw zc$a?1e7F|gFwP+xC4z&pU0%MXOKNTqP;xBRUs9berZ!Z{Wj{w#%-|78-$p!}4#lPx zzPwN~>(zwf53&ViJlqoqWEkHkHn<;t-z%~2&g1gwk^L8LLrinA3>=uMPv$CbE9iQK z(|FyPHt+``5i!Y9vND4Y)@vLYSu5iTzjgWr!d6PvaoWe<=ZP?ZyH`ye7bmVr?&9gzlWs)BLU650=6FeLI)oRHP zM}{fM!E|ywou}^_B4MA=Rn22=PTo<;r$gcB3%RCUJhy9g4gF=LghDQyq5k1)?+%lY z1IPJnCAgE;=LIf-O5+PR)3S@K8u}EsIikqr8XH2r)vU(QDLfI^uCMkw4*krxftysA zxj?kYWC7n*1!A76ox9qAeL9cF?T`xGTZUsl@j74qK)1Cidn?Wntq;>XSV?t0Rhg6?YA z0~|bvdY6HU zM4y6K!~jR9#W zn@OAJ)A-tw`pig@{Gh9mZbiVyxkkDJsLsV?OZ3z$&p^rnv=bW#muQob;M>J(`xNV} z`g!kk5~CRzzxN$i3@(QdLu*xP;1AdxxLvaQ#sGQW9EHs>2Cc@tSjZy|qbbvuw7!tr zG@CkkylA#(qYc~jYUNf{ArwNYe2JuQF)6N8EtoBDO>3(fYC9TJ+$rQ@_^dh|eyh^Q zWg81N6t5yd5=m2wn^Vuv=a7@wK@!4rq0RDM62hvjp`e!r>ve@OEJdX^f1$(0(@y&@ z>Nb%JPkg&ri&K-g*H(;OHNWn>Ap40%5}~w1D{Ws2j%M5mxEZ<$fwjiW<|_R_@rMhn zKF+h`%BJ%7>KfuAaTOX$i_PTRoZvmA9J0v$PwIh*jA~(=JAkV&u~Mf~{Ljx_=W;0UW=1IPo^lj0VsW0-MBtFK8g@(kBun8_!xTp&V1L2YgUeY9e7p@ zCy;fr$7ny=na>_$f}UGi8@Xmd>v>YM3r=S*e1mN?pAE^suq@209%zg0jY)3wd!Z}X zO0xObh}at&VzV!FPMF_0Ih+31C0iB#Y?$YBz3_GYSDVu%|IxypuQgXB0p)LC@FSK+ zbz&aO4x|0&XX?xP(`B@o5ZjGK#0NXa2QYL^sH{u`e);}^|1^O`nYM#GlR24?#|Jzi zrBk&RF?S%=zh8j1;vIiIvk*q)wABI&=I*r*MW<^c=Lm452%IZLPe5+vv_Z=~J%dhdPrLa(^*KmXhU)2dYyU#}Je{i5-L3VH`1UV{JYM4TS8 zKt(#ZD%Wg}M0d#Y3(ZBsqMk_{eKotG_^4Q}?f^4Hg!Yr|y&)wT!Hro@muyAN!F}d- zYS#aFQ4P09f@u`UjD~Z5f`>$a^@dt|bmsT9NWB7iq9md46$_?Mj?&erM2F453IZRI z!Oii~A6$UK7BmafEt^Q|s$1##vgu0w$PKfH8L5QS1r@9!ANpOr-6a)LD&;hZ0t^3y zgqF;qF(c}kL(UIONX*}`X`^e5&;<6ekOOgaisk&Ad_#+8e1NSh=GR7w^G#{@8wj#M zea$=h1UmN41%L^vx{5&mctI$%zR)f03}%J_1uAC0stt91-Qc61oveJ&=0HJnt`-xy zmq(;6RH0X{(d{-f2*SXoVcPQIvOCm|wseC8XWDGOJl0tk_nTr0@%`L2-6>yKd~p!M zbEkS%T?Gb769oZVSW5f?uC@m))zQZ_>*-HM$5q6SpSYiPM-r2(x@(YQHFBIs@1SoC z0Px$RP?-$|#m>{??j{t zL_?A^(dF*XoyBKE`8AV6v0lT;<9=P$#f7QSoOa~^!TDq^&2%z#z>#(=e#|dJ*!^lq z2{R&T45Xm5k|KK(U)hVA~4r8mkxOz&dm3O+jeft8gpc^cgqoYvI{QB9y_gDye-$j~^rHS{-U~FbX z2YSprMU|bNx5Uui_uH-`$4g%y{9n#f7Y(YX9EUm&AQTymd+h$0bLqE|B*N|`^F#}h zL8m@q30+MYKE)M_Q81I!!w{tpDNYx~`RX5Z=hJonSzJooDZh?hn4wlI8_mb7JThl! zzP4>AvT!Um70ORJSfONH7zPz<2$;vCaj@&siSV!8wWafB{SY+glU_)Pr#wYK45Pga5e zyw-4{%jMf)C$M%6QG~KT5JU9m89pt224hSZwKv@$a=TJ|DRrCveUi`XxL@B39l`b6 zW|ik)(n4g-^YO}4bwxYo+G|*=Wi7NT>E1DC(bW_%c&5LbHiujJykToDQ(uH@{6x*B zo=NRlm))v`$*V_NVgw-NyR)?@_TgpCO1992h!ViJGC+VLeJUo^d_)ZhMu8zRI}icw zK=$*^J>XUU9;2H%)TGR)02^j*?-xaQiTxqJ^L1s=3`{AGeo$#(6#oA?Z!Za_!`JgH z6Ey{CZi%M?54_ge^BSnK2HNnqI?x4c7tax>jxj`7Ro)Uef(!h9NGT8 zp}RdPG5t+ z2OYpKWeT`HIRD3JxK#bpiZGMgwGBgEmBneMa0!Eixb(kmhU}DsXPY@ttxvI~AKCO5 zcO7dky7NgfSWavg1N+~heSwl5lEtEq=r6EoqV2+vnM3@(S0I-h$D(uRi;2Ui8Q$V@ z2AS@`JP;ria7n>q&XRH1`H%xpnO8R`72JtFSvqyFjs$T9dwkoF?T5QMU>!nuam1gP zxr~=NgtWTmVa!ZTgBowDHjIa*WkrsW0qfS>zASDKT^OK*%;ZWS_HCc=7iIS}0O#-N z^23N?&G{7|b&4fTz>bCz7xO#nA$zF=wLKovcXUd4C1A#f$R_98DJPv^gl!marcTmjJe=i!2{Ou7< zD!_m@RYjle-NX^;7mC)E>d3~WgMXXJ{new=bf<<>D@ zsn3<-mg2j)M%coy%k7>5#}}_=+9TSC2T0{ZwDKM4JzzPDZ>-4+H28pmMfE`{8mHf< ze;~Oy@Unb5Z|TXW8|GKPqH3k^Lf}5z85M} zkMpJu$g`Q+=S^nQTFAI#TgIJ*pt{2=z(Y5JbMXjY^%Sm2t7(Db^x z8&+m|s@n*Ba~OYfU5`uxPcZ32M#rXUr_oQU@w-A}ZEx<-*IgH6bK$=(u~FMI?5kMq zNalKoWa%_ta+@fcdsug$79=6ZpB-GEF#xYbHd@s=)&d=MT^8Zi%O zDZSzbKMB&F`PZ2^nAUC2g8pZs-ocjPzDBLqB0;V7RU|EH zaVDCn?LW4-8T#x+YT@N_x}qS$WjDBZJogkmnT}+grbtcn$$A8%pv$k$&VFF<_b)4p z?T15a^$%;&8u@edz-%-bUwQ~Mh!G~${x;DE$SX5F?u-^Up~zl^(<`@SC+AC36SqTI zMrQf2C$=|lynt%rC z%GNT2Jjfiy%Vqs!&(I(-6Wu#Yf3HK@CfYpVYx?5%D5Bfuws_jHoczz;OL0(qJrmC( zxMi1S(qa5wS2yf7YmKQQWm}~fbta?T0wca@T=DpF^Q8-U=vGb2vjfb_2E0}iD!OQt zUuSQ!@h@1``sEH*L~K@5qZ6zr4$)2SKN*d;;NE;6g6hDW3`{VJWo?f`w24p=V)LS8 z*8O#r`1o{}spXCp&3`oOi8PvdU1_MM&mNJW$m{w(n^e#~Up**m+X||rq?#_9!ve4K z&89tg_rR<}P94_S@8{Owj}}o*d)=8v($0%VMmBE!ns7(oQq{_C=O9O@Oc2ba$Aw>9 zz5W?Wbdq+XA-i5Okc40gA>p__c-?UCK4&;h!@9;F%L4!|_K@v$HjJ%EGG?22dEe$$ zZdE5XD!Hv#fZ8eWhK{DTDL2<`|JrLGd;lC$NwPtp0yECp$u4SP)e7(4M8-@XafHG-(*5Q z@)CW(yF7`{QFVG_T^an%fay?4kzS=5X-mHA@1LvHsuXHK+Pygx3f-U3lm@8?>)y$t>&fqmX|6dTqEsZT^i(StZ&YHaJ~`oUWR<(Zz`0 z|I8l<`cthQkF|i`190Ni)Dxd8>+iY$rMHWq=QiR@S+LB6@ci2DL`P$wlnDBr)oO6V z!^W}2gv3~q3DXO>J=~RqoZNhYf%j%x#Oik4(=Qx61ty zUjyU?=@`-xGqu0X?rX2nB5tZD#X<2Os+$?&$ue?pE~a6V?+45r7QTPWzBP8yOV!p7 zc{ml|(MeXjV#=DM#Pgmw4PGmeZLf1X^m;PdAbXzAftALicAv7JZ_K(w)gaB&p^}a~ zS!l<4gn{w2Ekvv1=O{x>Wg3pXEn&&@tbuNpVb$vuHVb0>!!t`xbk-PEUS-kz+xMaX zyvdMg`K==D)_m;2!0CCR0_w`9!Kb|-u>B**H04kq#4VE@fzRV_V4Tq8R9(g&G}|oQ zz<&cQ;95_iF{v8s1=HFDHb{S~@-%kFY|C9WaSj~{#W-!5sl|;jA62!AOZCuh_>=hf z{vy~GvG&#SPQ%sj3@~D2M3M6Rt(8zOJij5hb)fK^L^(UcR8zNcqz0~1emqXq8j!SG z6IcHi=?J1V=FEc%jz6AyjE=_CZMRJJLzH_K8?WR=jS`+de!lzI+iEhm#NF`I#b4fB&<9loj3)wXnN0u zjQX!&%!B(1ig-_oS)l)qT6Tp2o!_b~Xm8n&GR50zK(@2ahEQS?hs!b65|ubFGNmU` zB9CB9;`pe5F|^M77NB^3^V}f+`_mYh39thF&*cDug2o!G9oPlLFKH|@ig$YW3Td^K zp&xld&ypy74*cG99Cxc$H`xN4r~V4LZ(db8KRXgw_Efc3^Bn>z5_P1pO3aCSdoD2L zUMy#AMcth(XNkN}P5s+m)p`ZhcF6<`6mx;g9^Zt~+|FT}bg(=vH0NW}jEMGD7?V3} zod=slVW-ga=~>d~nhSgH+LfDT=NWj~$%1nxkujO?v{AsV3zWlRyAud^J>2s?5oh@A60bPNskl0*_YYcbeLR0ER|%&Y}nujr&C^WwT_f2a1nnQ)MD+mgvl1N-f?s*qjp#B|=&GJs^`0 zkvsP@+cI9z=BQ7XaPUbjcdwP~47w89=Bo0!9rogBNi&3ZBU~axFz~FW=~HjGXrVG| zH>VWU`a9E6kr<~OHhK%gF;UpGIFE#QM=Q153dhS{y^jwnV+ViV4;6#+X}W-|YFg3M z=--j6r?A666d(Cb=J=p%q%>>M|Ap=IW&h75`~T1V`M&#q^I8|K)cD&z>aV|v;&{HO z-(j1J1*7{^8VpOTwf4!6qY4?HReM0=SZK6hKWKP$+s8+6)s5Ne^?bkZE@1~+`JeA|>rPkIpY6~{nCVh%6rQ_)& zOVq1##@Ds8(P8@I1hsX|R(@l~GpdC2|0ooC7nXowmx615Id6EU-TUGDTbii?#I)mAPcXX#j{ zB$}WrqgDFzdyjcHBa;=ipZ4OR7!;IgJPy7B?l(GKGZG_59))VLKjIiH;lG_fwckqP z<8e3plM+DMLGJb(H#>FC?i1)ufPI^O2f4pA_j#4Jak4!_LwKAeX3Dgyyri|zSgcom zeKGquaqPu(Z;`%uTW^*3AvQgdgx{dmtf=$HIQ8LoWr5&=aZ7M)0IwvrGqi{6bEQq`7q`b|Z@thUk0hiYGLygO-x95j=7Iwih zSxA&$y@K+q2 z8X}L&S)zVqpqysQVdJM7Gj?KqiH;W`bP=!K$R8x$TNSHT+^n=&Kekv}w7RV{o_}AB z{9n&HEctLeo=Ctf8hT-KX|=eElPjy99y_KIrPAzRnS2iVKq=>!Uzp@L8QUqSegnV8Sr8gT!J9lFY%G>ap}=9`=VZ0GM>4AuympV%YGWi3_nUbr z4N$j+MBKLZdqmunYEf`VKu>qHcs$SWz@hV2i1_M=+q=Qx@6e^={b0Ki0cfN?u2;KOg`TIf#0yS4JhPP!tcEx|C33Xd zo(gxvpJ^sWe-7=>Sx>iE8uxf8NmMh3hndX00L)5)eDkf{jp=eV_E1`LxNAt|6YeFL z1G5xz93Xopaj2;g*O)$+<2qEMIx&5>#$AkNLPm`%&FH~Y4 za1vZCtxTKu8is&!4*IKUXvpBhRWYF5jS&QhoOVfXP~8*oD><&keh6?{zf~zy4g%_S z(U=^uZF3fUxJplk&Vv>LlSY<2+Te%e(2k%momR8(V#{aq#+xUvCM(6ij&ZC7xUW>U zQbF6g}lNz+*&i&#*Ql)CsILe##^W$B~6YL)Y!Ho`7EQatUz| zaS#V-2bgv-Fe)7QQ7ZIAQo@$~E-D4tgE$uP_tM|FXyP$rzu_5|vtq#yq;j7rK7pW? z$rk{E7CZZp9R4w1UNV7EJD1Z+>wKr8vC(}_I*#5PwZhx|!}Zb9*8aRP^Oy&i52f)$ zq%Hkk+WR~HGasZ3DwqiyP5uE%?VHeaq{=?lQ*OP!9$UdRF^t6IW$i>g!M8bCC`uDd%ytfBo< z(CrZSIqnV+5s{h=5aWo~?+6IHFKu5Y`FM`b_&M@_{3F#AMrivfM_tp=)x>fygbv?l zWU5q6;bMb`ZUdJg^>LTA(l~%(y~PCWBFK8JbAt5l>coNH=P)W*)s4=+Jy^+a_t@(S z$)$I=;W;W=G=41jt?}T5GL#LBh}7ih_8t*p#+`X5M*RVFQ@VDMa#rX0qwg z`7WA5G4#>9D~$DqgCBx-<#q|xTxx89+`RhYq0>zoUC?KDW@U8fD`#B`Xk&G=FnMfa z9Vr0bZms1EB#dMi7Nxv071~^EFJ{S-dA%rWO+mbe8X!>*7F(fH;>}ojA+gM7j&mX&cklfd@Wr6jH1ULI7<-C|BVrqa$SX!o3WJA4^4a-| zY5qE`&%1DwWZC+PJtA5%g!IO0I*DbvvW<%e|G5r8ncJ0u-252>psF#?@A!2e6q2NzPX&b-Zcnz#F z#f_F=67a2%4kqU}j6`J_%nr`Q$A>03*b6%~7oP{-HK9L?7n^+>F#TX7oH3bs7ESog z*!*Y8oSScIf(d|1xCUDYUq7Ox*I3eOJAMyDA^hP8{k=3D^cLVXC;|)R!k?tyZPP;$ zE`I_Gvpd@Z{$C}o{0Ioe_Db;2+nqM;dq3*M4!E$fAKIK_F3j6~_U{CM46EwQ?ChE5 zHk}L@+29-5S>?E(Fs0ZvsJE&n5faoy%r$jeK=mf`y&LW5Ej_Q-x5A>PgW3Rk$6qu( zqL2nj*dKR6G07WRo4w6)`8wTM>yK>KJFlU=4E{8&>#H3wrk|V8$|Z;{pKtaZJ6pcZ znMGIpE$bpF_$)dm6uW5r)7TDx^xI}MbPXAbrgSOCW=I5@j#cgUgbh4>fgD21`+RThNU?-DM4q#H&@w9;q(wyE>$9)+LLj^0y^p$-5UxBy^ADctB*z&CJ zrSDXMY$iD?KZ0f9f6^rG4sCT4Rc>DLVK`r8BWYEBNr)muZNK6Zf-vC`L_Z+ILjcto z`!upj$FIw`3M$omO^|{c4ZTA(40;_*DkzXAmlBD)Y$%;zEcruXo(=!cdKQku!DxhF zSl2|GXQ#^=j1C40B#&28{-)cveU5uKR8L$ww9AT@ zuenMe zkOUbH>;|42m!KRHJleIl?PwqCcH4JOKpZ}B984|`RbaF8!tPHV(h?U}wi+1d`-5HK znX`K~yGpMJ-ePRgsbwR-laAvO;x{=v1L-1=ND}+?cGFPjF~%Ynrp_tAHvU%Krb0)A zP@fqtd=EY5t&Lufu3zzVcXSxIfhajw+_1o2@>-mz_J6-6Y~kEQK0`c_W&h(*u^NiY z2*c|Sxy0`ufdP*dKE`y65i~!nbca8qr;%p^(eb#wd+4gBb}A-kd5Sonk-@4HlRHq2qpb7y?(W9K&PL}VcrDD;;>Oc|^(obAT(8?M zrRxiRkMqb>pm&Ob{~I3(8pBUJx`!=zvxM&ip+MGWk}z$PLZ5m_eKr!hG}###o(BUB ztf=||dltMunl*g(o!buQ;d*NTm(A=ZYVvoD)ieCKr5V|)wYDJ9XV>C{omsy?c=bT- zc)3{)Feb;a&Ofh@m#bjmX~cBjx&SVO5Rg@Y0)*U^hgA{~*c%|X0Lb6=3Jg(B6vz)J8nl`Y}M)Pg6ytviQ)_n`g$HcCIk9+4$% z=Z|UJjqV5;GSPrdo)-Y%G@1*>r@*mXtd0csr4L?@=Txn@pDx~2uX$*`5Y3f}xw(6@ z5&wQV6Bvq4o5=>!8h@`D|1r<~*z&k4pxJ5@e<`)^C`Tmk#kIQJe4aC)ViCy1C8eX` zH-o6k-AIVB1EU6zffw6Dgm+(9 zEdO=uqkV{Y&5g)=eQM!Mi8sx@Xllo*_Pr)T_6latWHLBJ?_j9MA-q8%%^WWtR}fDj zT7cvCsOBj2<{LkgMo!dyg;gzZ$Rvd)Uq1EVzb~n5EHp&)9xr_b_kjXvsG!8t!Q3{B z(8)qXHaeELV%mFApyo9RO93$_>n!ECJaY~kQO0#BsNs$tG9Qc{ zY6b*r6?eV`lw5S0102nBkgj5ddfu7mfJt97cG4nEuHjBql2QvbmZP)BO~?Mr%#NNF z_R?0x0UXA5JN;DCx^UuOv8%N!bSJl?sRorQ=HA+F_NGeL$R9!?pN{1MeU!=8D|O18 zU=xOq1!CSWoBHF;Z0*^n^;wDs4awXJM0?2%URM?(7z+Ke%bg5rv4-E;U0;ahl+>|9 zX|r2%VLxensEN^fE?8|sTk4>DE9{Y}b%AkL=d`cfGMQ_G}Ei$GmY83OTv-LOsaO$d}uMdmTP` z<8k`kB@%98snN+~`=F4`eNzZ9EeK03b}1}jaJaMOHuSfS-#U5Qp!XKBI{dQ%T>9Hs z3=^p?tj@Y&0SJ`sV6gU6zbA715#qRYN5W?x9g4%C`y(P`qbcfzn?Nr;}lD9; zty;%#QK!Wy3X_8y3^vm<)e7&>cgrJIk8eW)WTb#s9veCear>q(_7X;on7jcNg9#1^ zYinkBH(Ot;RzQ|zui8z)RaQI2lchC@0EGz+Z6Uh5>#3&1Y7xQGfWWJm$Q zTb7#LPo;IS4`3`8LZ{pAx;whU<~oQD0m{K-=6z!jG6|L zR%#03@w~dAF&s(md@$vcdCBoQPa(nQr+msLm*hC6+IO3)Mbo{de543`&eZ55`KY-n z_4c%fV9i0MA3{c_u{58I?sz!_njYJfGMS3O^((F6=E^ab*`JRDC_+Ugo)n>y{ysoB z^>LBQhDPRxh5g?%ae2<*&n3?CKy1W17sUD`Zf$}kn>Z5cmY>J(tc}YVL{uW> zC%kj;M}jr4SdAXUUj0cd;*2EEy9e{^+dEu_|AvXDU26)msesd7(e$GX5M3D+pwmh}3S{ghxqi ziNC-i_hIVRCrW0ylnjD>o$1v9b3HH}Awo5q3Vl+3R(Ya^Z_5x!R7O<=VRTixcYQo_ z+b7k`)pgWXv4kZ?Nkf+8Cl5qFoy$(Lj3qArC2839ES*^tCz5<>`;JAE=Bcg@d_@>Y zaXI$$=HS8M8hUfJr2&eng|Q69NlV9=KW7?OWaf!NuC~w^3ff6jE0&%Zid;lKUADr*7xsx9fTdi?M z9P)<~vD%a>mr(%Ng{}6Imi8Ezowqp46T>It)k`907;> z2wZ+5rM|0m)HFS>^(`fo?TTXQn}3W}M+2SJ(Q`Jdc3H_*VQry3Q+^fyRL_-MhO)wF zIoRGk>*M1_XMWW;Ub+uo-6e0V*SJa=NHh?<9xizwkqq{6y^XbMJ+Y&x@XljgiN?;` zq+4O4CpZq1Lt4!TQ=KeM{%oS zIv5e1ZYN(7DxW8Y->tOR#R53qq?N)Zu2suxwHHChVX%!W93>kNFgD>xZ*uWCE3yy> zkt-_j)TpsCNdD(|hsilknX%ek7hi~!rE6k(g@SJL7KdV3!re~pkgsNxQ{d&~+rePH24@w|15Bx^k* zNmF?P;MmAtE2e`~2v2`lT7guNuV&|PssOYsdd=9P_)6?Yc@6|9b*qF=>c43|fIUqf z{B<2>E)jd9gqbAhFo4%{xr_IH6M*qi#T1oF%&q+dF-P_nR$-S2H{DXbxj9ueF3t>a zLaog_YdospCF#bZ|Bm2HE_0-JRjmBV!s`#iUnbp-aX=TF?!~!au}eRJrz_TM&Z^pG zW0hobU}-W?%F@`M=##D7)<@cW%kxAKBzggbzo>4l%=-wj=`i7#F9B3C`iES6g771A z$~&g@cBTm+g?*q)#mEu~%yxINj5Nx*dk)&XTyR0d?VXQ`MZe`tpn>cMbz|XtqiXcY zH9uL66)G^$3c1*ecXh<9KbL6466Y$D2YRb5^tKttQpxn824wk3*!44&Xb&8ji+jCs zC4oQjgmoJQl-_}gW!qR+cdbq5?&z9d9DT~6MI)Ggj33`Qero2qoU2f6R<{%|mm!pa zA({eNz_k@BC|kFZ7U9DN19Faa|KFTm^uBQ`9fbzFJ4&#$ip^UeF2!)((WF~~Tg=jA z*wULCMeOiku_l5Oy4$0MpSmR!{;Fz6HYYMo-A;@wn|k%L$jUP`l3DU+I%5997=_!JjF#|uz(Py9L$?gP9FjoBB|X5K)FvlD>@_rMkw7bNALCKj}*)zM0L z$}ubi*QWO#cAg`Fcb4DY%2fVlj)nfwBa)40!JeUCE>mrJpD_1G_hDvB+`fOIHgUkS zctJ@LTwynP0@miSS@7d8?YebZUW6ZoxuBA>M-dC2DdiTx_IZF&m1FR+pGD_OugiJ% z{ul~QZ6_y&A_OI)ERg^MDnIPvUEZ8CJvm>OwiB|Ao2n-zF5yZ?;N>#DMWssws807U z!|>C8M~p$bniO1^W^C0-^^k+YLEUOE4C2)$52;%RajouKudRMvQt1!}BI2`nYgIny zP*4;9DLBX!I|fpso5n#^2*xRX&8?5!j}1tXCO&=#{f1u~%)Em=265S;NY?kS$otYb zaRX2-W%^LkDvb_pB?;9#gc!*jSjb0WvR+;?>E5%2;tG=SPF(EXMFtk5jy*DEbn;=? zN$my>=n=2q%hpBO7tJ?d^iDGl3*qXQq9pyS|Zgn5?Q>-z*bS0NM5Wx%5y%CsIcH1&$|V7aOnt6#p>aGlzi41+M3Af5wVBPBn4A9iMD=>sU6oHwKojc*&A5*JJ%< zE(n6S1V-)}_l=Rxo_QkPFW**JyvZw|bq=x&Mu*u%;DK!n)wO`Ud5DF{U)iSL=k|#E z5h~ZIsb-5?WqgUlG}(4LlcPd)b~qV2;v`ayE##u}(wq5tTcR&k9FufrQ?wGrD7>|q ziNs4hZQa$E7~|7g64!7nb3w7scfPJ`dlz6Gln2H1fGL}CmkX~_w!~W60u$S}m5TU* zbP6{H*V8lZ6@(ZJ>%r`ZYP$vRqzCV;a+ir|Qv6SY(eX3Lc(VlTg`yr1hdhR*g)vsL zef8Wj(0+xUKACTYI&PzSGr)|$9CgM!=7W{J?;3KXQG_WVoe~#ny*QKkjd_|~pve>* zF!QKN<5J>~iIEM#`P?i7iNB*)7r&Tt^W+VAtwlW8`(9)F85;9=nb5lAHt|v@J4js z@Z0Y`%Tq}4Nn;rX2n}#4sV%7D^V(eH{7g7`NIdN%rulriI}ld!GSJAQ*)%hf#c)sz z7(aA6?LAJRvs@EE!s|Gg)K^fP!JCu8*mo30onw%FWPS7&`A;-l@1IPVPT~~ai``>d zb3(ZvI-~G-Xz6?|e-7dJk_Mi0fxY7W3J9oB;x^MaouMtP?-@T1|DgSvwEe_>Yrfg` zg-DrNfhd);9qNwhLT~wezAs{xr{_PYHuLc=)y0Ik*<@zY7KLn+k1ViKQs!9WtvSQx z5_U<%00r9+Al6FLqXaax^2^hZFz4 zGoalaujk~|!iXtF(2rln27?wx{0E{#B=A)C}$m@L7 zv(7#;1MON2`etie^OQLMfcL8ny*k!{NVASlhh7?qrK6bPO+DOJ_3X#QV8(Q{jwrCOjnNaMSG_*JhNd`nr9=2mW ziR5v>6?|&pG(j0bAVlNbypQt6LUw!l^hW(jCaJ9}XXzh`E(Cr}`c3q(kXNC+Abo(dR-v2TverL6nCL9O*kM>Y1t%xqg;f^b)-$?2g_#p57GB<%y%bHE+SDa~~WUW9XYy0TWVDvhh2}gzv zaWdTF6|dDD7^$LH8fh52$b00ro76BZxTsdW15cc^zErFS7t+qKdPfT7iNJ$=SX>7< zR_H%Uf?s$-XfKjj^)n0aF{>+ECoMla&7vu~o^GLB!zQms63h9fbnBKhe~QC?Dn@c{zaO+%LuLvz+m$GLpBrQ= zw30-!K8rX4=S%h7?fDLoKKujxrOU)J=~CK&*(&ujuDc41uJ=X|d1bBCYs8%!;xR!R zv|gg@{$k$|9sNiCYa>{F&{xQYfla^| zdC9>7Yde|qRa?Hw3=Tx$!Oe4R5@LdT(Svm*N8A<~t_O>oeYD;c))SLF{7)2P$fFH$ zCt%pGrEM;g8ciTbz`rH;ujim$1=;YI7wna=M3}Bd|Hz)RMxHVn4ez?)Q?o*h+s`S3 zGkPB)Q7D>UGzap?(r?E|WrR|HxWG&;v~dk>6PM7g6RP_BkNx zbn0HN2%^_sN45g4MnKwUGIu6^l4N_AOg14yZNC45nERvsZ14l0{9YmOSU4PC2|n;n zz8bGt!97H9QF2%RudrVQE}Rme%sW^n0|^#yR#2CA+uBbaRhH_o5vW8&{R zcf%zXO&yoO6>I&TV2CmShZpz4v;V|{$7fjroofBsrFa2EqD0pAA;>?xXpja-PBITF+DPf?# z7V`#mzuzZ}j`q!_ea!!weNDa_(&C&u97HZvyU>GJLR5y~nLmCUTOk@__8=&E=zJd* zA9)EwP$o+2bsl`xEE2^44X*lvQJrt5qCmtR7iW*!>hJe0|E6ztxhFQYmgR~;qSYV{ za9dIXvg#un8Es6Pb`vN=EuB>Oe-Bxg#r zKh)c8ZMovHm|!gdl(-_rW9++&KXo+zC*09-adQw9Ooz!t$@Ge8z|Iio%{&@oFuIQK z6F`>a74s@#*h95k29ODl)T6Mo^_Db`#);DnNz=}s?DuKlx1aa% zT|aLNCT|=2Mx3md5Ty;OUWlGy-QlUg?3t+)C zN#{^Ml8cC|DHq&{RxG8UD#@pkUQj9xY|9+=3ji%wv1METlnjyO5gl6kprLLwgI2LY zCdI(uyoR!tftmBg4KoUc9a0?rj>z3@bN*;nvA7dOo+}{K>1dU0jy?^a6DaIhOW293 zEhG+^8zf%RSn14f7yYP=B^?R9u81)M)V!q?JdE|(*3j3{3Z)rjW9ZngF9?V^VZ+PK zP6;NV=ocV5z-A6156vAo(iNA;LK@w+hSbd$gxI}P+CG@Gz_=8e-ZtUQeI=eD_G|gC zAmeI;MOeacjn_O5G@=Tdy?GTeoC5R%vWezwUqL@dl?kg*fdEQ|AwYZX7hS_IUhknB zXH!71KtMIn?)%zH)=qWeqxFb7E>11RM(fh-`&p*HbQ6`ANNqQofR6JlQ}X)>T&iRc z%UXKg@J48NKLmZa=bvEBS+itRwPgn@Z5fc%4^wx$KsLJX#lp7BvX2SbH!j(P9DQ0J zmlZ74?-jv<0PZdb@NdH5{NpG&0ar2#p@TT)}+1oYna7T-FF;8RN8WZbxa|096< zbdHJqQP`vy$;ky=YzBs`eW{jX+Ew@a5AN$tzn4J24P7`V)O1_RgKn*=-JZ(d7{1fmpiM-*c773Q@4P(gE) z@NG*^p@Eer%60Xj=)g9z4B8C_{%jr`8D)TThi9Fxckg_5^C{7vwCY9Ax(QcAN$pW7 zGU6v-^~76haR?(3a9yoz0x0*Bj(6w2>G9+!CoWQ1U>}vKp~HmbpI8AANe&N7EW(`e z(92@FweUQHzEAVob9HXlzn!;Xek_c}p1I}-K37Wu+S?BciG3}_t*#$~#_t+#mlFb$ zP?^Sw+rC0z@oI|Z5`o}BVf%Tyds(B0^i|q&>P#lU_+@MF;ECq`Tl+~!sCyvo$oGI z;P$h<*c*#l=MtCIYH&^;F|YAeBX&^_V$qRAERK-cw+hZYSgfD|#*oAj=tOj%pR7bh z#V_EathVSPpRc^!=#_P6%s$gxnn6}s2`h$?3TaX8ZJs*sOpEEuN|Nt1)^2jf*?j=g zG7BaVuo~ypG_Og;huVLF2!Sbd8u7Rd!sZ+NZcg}~RyubPf!K2#5W*_R| zh=6r+d=Py3ev!2(w7BOILz(0S z^Qu`luN9v%4&MxE|C>S& z)%kz%8p&%wmiPalM=%6J8j%1*!&Wm25&xG!3!~f*k#h_s;ctNcVmG+{@}GCq!Zw~K zrvOMn_SA(R@6{r|zx`n5mG>`xJ_7v|^VYrm@yV3ij*4efrq%KkG61K>?l7=xs$}5O zj%>)ge3dPF1o@jrEB8(q_Shv)l~J}n?StE(LJd1k8#)VvFn~3A(6aln znJi>Am-EeG$@urm`BE|D@XjM&Qhab3wGh>kh8+y^NZ2k$A~`H)GieA>GQcHlpR+g{ z>UCK4m`5~e{^Vx@k|=;`VX9L?+CwlU!CogcKJ&Es6`SU|aSVydw-Y%=n0$_I${hp}Rm5BN7P&)jjTIoE3&;Nb^B#I`eb`=k$ z4df+MwXO0QB~FoTp7oB2J~=EKY&Lj?QlYHcoUpESn)}0SUTbtVkLql0%hf_NefjW} zvtZiR;0S$Bulc*p^YR>2xlJF;=O3?0v$gN_S|PXI_s_tMN`wQ4XQPzPLmm%J(P(^80x%-`-C|uVWWTdhQ+VMx{IjS3e#374z8G;|=vv3~d~Zr-zG}v+-Qm+BRqA?rmHUM(VQe|zvhHyChpbO^dYW*>%kVBm*FfBNK~Zh;hwd(jmEk6DWut?~^9J7R2?EquFuIU(r8YAxf#C$rb7m76=yqxQ zzouRt%=HZ?MjkfW-DN$d@;dQ&BL|@nMs@@u8JnI*D0%sx=l?HcTo5xIW#r{x`j;HX zl4EM8(xy1BiW=g3b$*Q&_m~Xqz}D^v^%FOcU_m@d1%J_4TVb^Q_i-x;WVDRgX!F1S zigA9^$gy0gec_De;PJ3t|7_)M`;st}fSq8e!DGUfpUY2R7GP-oai@gyAm9yDOmPF5 zf6?&OwdLYjP`s`H5itqw36cxWNO%&BPKTo4!S!HRATWXE}@hd}`bPf&<7W}{-4B8A|6T5+zNF9&#ae@Vxd?`U=h~f zD-1c=YH&b&^SX4YIO98tPLta6~irADNp{k+$0Q8T73$d@Mk~>LD{eF zyG@_i1;u8LimNPlPdlMahY zo)7ra_$-$zeO6$DZR!54D*jQb-P!?fsQ5oDtU}x!li}+Oh3{OZ-X>A%?5%kgE1T#J z*FQj7OQ*hR-YGj0$ZdoA3$X|_9szr2E4+Q z;2Ze@una$05XPyegWOR{MMt0Q(EmA1G`o8XSRo16eZ_m{(_0eKU4Gu8p`)+WCa;$H z1^?#TT-=d1(!@nmOwP6D%a1}4uBnAP{DGyEB$-E_oL9aU0Nz0XvOca3groXOI}{Qq z*{zRrjPn${E4ZkixD?E%3nZVyfI!$W0L@qQ=~#sKI#?LE9^JfN%L)2ir^7sdwol39 z^-%oz$`Fb%rVA+PEl+P_;U%2+ig75~phs&C0Sjt1)h}XH7_~!f`K_xPi#=&oO69@a zAiH8C==?gfHH%T`iYyG$f`~w4p!d*`6lrUI=KE5pnXa$@YpI3!54npZpz;ZE4G~cb z2nXD4^bw#4d!k5f+^LosDK18Q{Jd=3R0|P+Ir&nx#ocfov|9*iKt8KW{!1?hBnN)4 zTq7w|gzBfwg`GPoksY`v<^$any6w5tZu+`~s#qEfo`U)3q9KpYdIgvgM&5n2XZg5$ zObNSG5Kpa2XME`d0SdWWm*>QM7ieDEN=gD3DM4tYc>S;K{bEQ0++lM(QEd%I z5$!+W_%ee5q^GmG0d?zc;Na>xd&VCb+_d0yPvH>JZQ#9|j=1$pu_F7Oct%(^q4jEF z(1D2HU9$&8cU*sQ8L6OWZvzTXk%YqnDr#bFndMa2?+u z8`ReA+7*7x%i_x>)rW=KCNh~Rnz6vJsjR@fL?ZUyiNK^Yr40}wzB&4J?5*@SEu38 zjAHSB7hZ4JCx8A+1Q~s4$>1F!ftMqnCh)g_ROfAuD_ebssykRs+XCNAOr!Vq$SXR9 zU~2Dd8#4~!5V3xGn>P8t#FB!L6zI+nREnG-K_V=Pm=?@St_HE&!8ZT zLU1ALZoDa_L>)02JMe=sJ9iyFdPqocm{8Ynkg3B06=4wK0yxtfZZx7rQXzQuvu&C- zmR-Jl6|ftbF@mc?|E#q_4{q>}e!$(e<@rV9_0qzNtD4NYi7y8Kal;|vA?8Si34_`@ z3n)vJK$LQB_AfH>iOV_8TWF*KnRjiV77|^}L zy_QL4!@oS{jRU^nzGI)a>*{^44XKr~{}6RemV08}@jkQ_#L|D2huA%mHq{0Byttmp zmj4A782~^xyI`FFIg6O5CKsUoCvHrEIpM(-=3{^ge-Pqoh zmj}}s^f~C&{ZNyu0NzqQuRfZ5|0|M>(RRWM@!h{ycl@ z#_vM9d86Icv3qDiK8SCO4QpDfySw{7pD)qQXkRq4S^tyAkmkxg4?p5>+{xNuqS@?9 zOfc$%H^b_U0oP55x2(fQG;786NP?AlRAn^O2VX$41mRBqA|X3Q!lYx~U5A7_a@HC{ zF1hX%ODQh_a$Y(V^95^ymF;qFKS17~oJ<84Ua%Km$p2lccr?a+Gq5ot5>wLE*03_& zG~*op;*`b*eGtKK|C?LxuF5({_8UG>8@u{aG@0p#@c1)dK`?Th`PT}-)_Fdrt&vpd zcVT$!5wuRjCi!iE^;tQQZ28fLN)(hIm-%j0<;lvTzrLY89$kYzZAXY?Gc!RFBWbHU zL{6>3pb*b~Vi(GW_Sy?FI$@SFmFU$QhkuacAEHy*(h@m;^+^?d8#*KvUJx-xK~;%~ed%pqIEqgIMoUYq_9u9bidhQTR@qU{ zEo5KflfMq z2B0z~V3;%K0Yk*>jJi7`jcxvdx=e{!-PA7kr`RU#h2QxN^+dZ7*#Cqoyz;OFvX+E( zk3-sA3t+!Suimv*3(Uh*bVT3o0v-jEvQW#Z1>TBM5Vbkr^b2KnmibEc^xJ`$nND1{ z-{93@0*lLg=@brPUS*x&8NV5+onV#@jF|#qyEAMLgw^xP+aP5l4O)hyh-0Bg77DXC zlgKOI$lu&Xmbt)ee1|uWRhaUX%}TUT?9mD+5%*!{W(%jRxAM$gXg$|)NcN$9hr@I7J8KqSKShX){v*=t{M zygC#&98k9A$~={IRz7Szu>$^eSohko8*0LhXGfZOw)A8{|G>*h1l}N!;t96&rZzmB z8;S|w(rbL&9rpe^uE|deK{|rteu^s)a5&RF5({ORjo%==k8VYe!btRDyD4P`*IIoH zYUSLwHQ%;qkH&noubee@KW^}D<4fHC*N3BNX^O#o zW3V|RzV)<)<~v#zy&x2c1LFd4RM*ENen3&kP{s4R$)6rD-#FoE=;R8EDbA1(7XTEx zX+s;9_t#p%9X5hOuRQZ@bUHqn;{sy9O-QrH4u|3JJxXg3htO9OOFt<} zJTG)jlkRil5mNy5x+`$SM`mz!H0}x~mh0YVknwFqVM>kxz*9}RI!?uE1u|BNjdW|L z|L1z07lQ^q8s6@3K7RdSWA9k@|%`no{NNXc-z5sQy&}!#lCpcKn_I2i-IRlSaM4#vnAxM zBO=&bnLgrncRtcgRFj9`5DQ4u`OAf?XQ*NnbG7rNr<_cv8T?LxS0Tr6Mk9#auv{tw z*b}x`)!;raKOK`^ zc(#Zu-vfG8?wkJNkHVV7JQrvQ_`OHw(lY?aJ=6OJ_}$FBKT55qVrNNr5h)Ye1eo(vgTI}SbCU)%I|tIDm-1{ zs&gM?7m`zOFs7kw&?gm@e0h#`ENDIXl0!o5Klt?=6743el4I~s5a4PU?ZGB0G=QDh z9>FheHdsL?1lFdp}ef&=jhSz@Tol$RZ4M zBLn!@ii|K8qmYQgm5$>?6xC+iI+@Lso4jA*<*@U!B>|gpJlA&y^z{X37as=EGToTL zrp}u91sERUUV~TiDeaw*?^Zh;F!s&qpqdT!oQQ078d*+@W2->Z=1lQ=CRS0hP#)@O z0_FD#if7fe!7)1)&@>sWTl1eB9OiSk&H$!P`F#0|OaQcuec$@cDv%=)@t&9$xSQIc z9^nPrz0@B?%u)=C`wNgK4ULr)N*351c12jLS>&)Bf_OA)TS1De+g zohI&a5jL))RP)v5lnKBK!e9)Y>46a0|^o%T~ z0hf@qdg63B!PcsePoa*N;Rd9(!9&soqI@fy>$fM|_a2I?W`pISMck>_(b9N?@Xj*%M{Ja`_GQTQE&h-=7ZPuyl1SOOErW%F9S!uC`$ zkXOy6J&WSeI<)y6(#&W!T#<_kFF8;SS$Lha;q<}7qJ|4e`IvUE*iXUpdqPSGNDMq;U=Y#PNQiAIGF8LSPSRm1dWiO2K_^~>f8 z`Bh;!yc8wgLyvq>4z~B1P1bP`V(RJy>NC6j*08vFaG}%X_0P0y1P+IIz&>syJ6Ga`7Zu(KleA!H}k#@ zGwxx-h5@d1t#zKqdHkr{Rgy)R@JiJizM_|xMU68`@ZZ$rL}v%usuNAvn0t32Y{L%t z185(fiJDqAHUWg2bsQtR6#{H^((KX}OL`W8ylL(tJZZG~&Yc8n5^|Ut^kUowE^2R;z4I}rn| zzc3Rg6D<||PzV&a!pLSugYNOvESYrVMSY|Yk#BCSG5NNoJDmVy=(LI6*J+p&Drzp# zul`y32`Cm58w6|rG&l)N7t4>9});Q1VF z7POK-hCEXa+oiO9VLoi9!c0hJWRQYzeclm@w_ASucx3xAMeKv_dG(u*OQUM@G%x{k z+wQDHa*&8ZU$owSdNCRm5%0RWIgJgBP4zgu=o9SQs344g>sc~6w+96Xyiy=xnc>4> zH0*Ae7)chR{{C8{*^pLPCmKC||8j|sKow+<#aW-~B$)M2hsH{0zeUZa0rSJ(jjR6& zl5cdqN+DrO!onY5+|7v3Wg79L%>W$hD0p4dsqT9=S5uv9t3hWwLBZ?8w#5Q#@yo+y z+AoUcFS!DqvHx^{a@^o#sg~|3J|?c6nz7ne5h?uW49AYYz98x}qm8ykgK9c&&4mhDC}|JnePe9$ya`w= zS1f1Is@wJ6A9k15#krPuQDa8k*~&Er^A2v@79Mj zaywr7Wb2@j3`hR$7uICABEmP1o_LP1L%^DU$j)DYnqwk9R*4G(5{}q+pc(3;Du8Vd z+etMujloRf>_Xlo-Xz^5x4eGG!_DQ+VpJt|VlHb-1sFTX3)y#j{9J8IPhgpx+h4QT z9h+)YI12o+-elrXkM8*@9r;vUP~xtwC(HyPN+_pmBC8Y23^sAtM-fX_H|Hy``{)8D z{)Q*J91+~)b;mYL2Z3(tq~%F;?NIRy0y@Z^2`4cA`x0DWdO-;nF9Qq36Y1omU@{Qb zWlr+;cxz}t(`hLBim2yHVi*LrlBFlAN5E$l-60v`L}+j5@R2LJq??vll{Vf>ddR2J z;w0|umQw>_mDjq$oPHh^P3p&*YzoaO|hP)k1hH1==&+KSektcx2{?patB5 zs9}gd47ze3`9>sC(RW#mwB8do_J{mOem54N%@F|B`u2<8m8K2QwtwWrKov$PTOHU< zl~QTMONJr)<;GHTXd6a0!(=cm7+U+jIrWobb<+COrJoW6oO!Z=&lHi1zDwCnNK_sn zXG!Gr8QLnu@8w8zTSDYHoNdxTYW0_J9pk0%mmnasG7}$zTNK}HX;%*a-N9T#``~A- ztxS;{BHSbQEKv>`)n#F^D@}T+lFfeBI0dmqB8w+>!A+L7V(_`I-|bSHH?e#t=ojYh z&)-8$lOH`0YhUjdfSogskbV34K)|)tg={lI*RvGNnM&cel%e&*5{y(Gq-Y(pE^yNP zs9}bSC4#?v0(i(c^nEg6`o}9N*Y(D?bJYjfvR|X^aVg5R7kKSV6w0M~y2C{+A`0)5 zw-CaA(OkY|hMwdPF=BnYJ?=oAz{2_^ktI#QGQC*6T-1132FAAR;kv(L-AyBJpk*nq zbcPRN1((?ZIKvAAmUH_)KiwP6w)6Enly1;F;hjM++3ruN@iA8_r*{<5$RxYrqNI2R z^Tn}%(|Ts0vx7RoP@t!-U#Oub)odklR2>6cf>yR#v3Re|VZ$b27Ek ze2mODnjvwXGt3nuMHvOyscAbiZ5{W2sgLr+R&yJYK$BI9)KYb*_}`AaHun_C^D{C4 zEM$`JOvu%OC-Ug)RvqNGAS=Groh{~2>{Bj>e+PS;^iK!Fsf&E6iQ{fZF=pw#6Fz;VI?eZFLtm27U9&IK9Hpt$ z%CUXA6W?H^^GEK|#Lw`6;Y%cLHlo!qP?zNT$t>}1Tb(#QgA6e|?W}RgZKzxW#U_?u z(Cz+~J1}kBQNYXA17;0p(10WUn_g~u_U+VCp0_OhdiCe%-S*%JB+rxs0<}P%-jxsH z;3t8QMrJ_`9a~n}uw{AXiNWu_`^BOu%+x9Dr~=*baS^z*`A;fvGu`*4jyU7BiWF7QAYo^iV0di^L8jy40rM?H~+siS+dBwL-FhLx%;!G5FmkWP>!bg7v# zOhs+W^WD|%ZZOqP=qdCWW$p}^$E(xiN`5XblTnV?80pZA%aF7>BD5M-0_yVCM$ngO z)p9nUuI2ilAxpG^&=7lf%2WfOfxVm)uSz7aZGI)Q(r%Q><;9b}lP4xZzP%3eTiFbk zB)tS3HcHl@7YkV2m)B=R9A3-3(M4#`-)-c0FUUXOZE1TGbNXjvMIlU}aADeZjG#Xj zW91QNO~H)txl7PGf^5&0;Oq>@5lMeyKLw(GlXxu%jCw)tk!<>|qzv@Kg!O#$dwP{= z0aPlrBmGxfeTk!Q(N)AN7OXn-Fb;I{&+e4RR{O?q}yNe z5t1^ho;Y_10V{eUofXnmHnYLui8ChQy`TP}+x~owYsQ2wnGXne>nDz^Yl#;>QRfGB zajr5t<$_+bbDpY$?E;-Qkny^H#83fSpdo1vI2YnIyDP1(U<>%Jc1bxxxZhGgk63)<_HH!JXW1b4({#z##|L)pZ&`H9w?BiZjJ z2IV)Lc2wByl1x*xIW~tPCrFr#7WDjP>l0^uD9g zoJcTid#I#eKC)Kstf+eT7|5bYcvptZSV$O%So^kz&~jew0kJ?jD^qvp8lYv7)c+Q0 z4{GF3xKtVKQ2&XRHf7y>$K`z_&9nxa$k4|82sF-ocjnD~26-j7e|yV&S_ahNm@dPe zbLrImh9-9G!{@Y*zUn=1y)dv@b_45ln|n&Q2>WKRA-)c0?5YhYgv-WgI}I0I}4^B zU}Z1_Ob16Bxy&)hj1;g>BH*WQb^&8!e29r4Ze!5uSyPc9l<>7C0Hga2$ca6HC3$_v z*dQ9MvW}va_{9L%=~ag?jS8*Kw1Qhd*ozp#4m6nzrW5dxhT8%!FIXi8t^x$;m4>`w zGrw`C{Iw?#k_oyD_L%!K`G_>7Kat+}ABgE`HTFodnj5K0@2vg@qZ=F1QH|2dhrExp z-z6ogzC~jpc#agbP*851rzTt0+9QaEL{SJIUD*z_b_2TLDMt!CF23G^w=0xI276%H z?Q+xi&v639P021V%=TeNsL9*K_;J4mwL-y4XLjLpZeVtGU({N16-H6>)acDkxn0G#bQ@?{KNYvUi)*^3?7+FM{q(8OULQ-!?>?u#Y55i}zka9Z`jWhHdS6Y0( z-S)|Iyb9x$(bw@z0&yo(m5a2oOf1`7N067Uh)R`fryG4{B-d%Ed)l}_i0xx}zl_2~ z`t_BZnYYpEzLw~BOxv5fGyEQujgPj5B3@WO;3c-e2m*<$%TjO{CCdx0bQ05miTsaw z8tgm6U_^?m_v1ra@x3uMz3oVAaHP_N`1P~oV9h3)_D%QMnsXz>y9^4?UDc<7)A%&S zsEyD8@Sg~m#?74#?cT!;rmRU(S8rwgw!;*yPmtc?+tOSs8lq4`wJoUlT{||(a@lV3 za#`3@(TweEW51`C2t^+=QNr=rgHi_}%N3;fZm7{Dr~#Q|2aLdl2K zWZ!z%uT*ENd*BWO3Y6`hQmsXKff<-mt6ON;j+FZri+1r-foS{$!W zU$?W(q{DK3NO?pPO-esD)2(->g{Inm@|rjFmeBjA@am5Opy9bh-Y>%Nf9ZphnfchC zIK6Ozc!Sds|8(G$)mR4dU9$~ldVV?Lz+73XyFAz^xcYsm%vJ;&(q^_;)=>vGcSu3E zeD=jp9YwH3)kUPPkRuhWX|azq9nKy-fo^OW3c&y4b4{240Pa7+-oJOYFWQnnyb9}9NC_;wtf+Ok;INu&jJUr&ak;_qj=H{F zYv}^jUjNg>W!M|#$NQ>%F!rpoHtU9r@A|#xrvU$k3$o*u2W2+*`jE%8zcxY~koNa4 z%JkPI_wRR(h?BU*oy0%?z-ImU3-S2T>RWjCFLwBdmi LjNx{2M>;U+?| z|Lb!{I6kbb)nxy9y?y@eV(`sW;sPOB0fn!g_3ra)U>zi~zvyR@5e;`LNvLgpZ#z98#+tJ;+ z(LL4vHWxfbl_c=T^4|S?ze4AX2HZpd#zG8$htj(cb}$XS^B67+>fXTn@DKghLNB3$ z>3R=JYCJF2cM*3 zIse54K_WpamtE|`0A1PkP|VGC`q@miMV0QbWxeN3+?^JYEyo7V;>~PYDp`-f0=;@8 zT1OZ`@&;oH&So=AwieiwZ-GBZHo37AfW%!w(ii+L7lG$j5ttNBhoS^-XXupaCNoaA zSVll>Bgw^SEN~o;fLdEEAMIq{wBYPwH0UQ|m`~QTPRdo&RhynSnmKL`!FUZyPmo{a zen|;H#y+Rw{lGkXgg_i_hL?Onyq`afTi^YyeJ+&G{Ay z{Fd&XCU&RNll5cq?13Ndd2y#MpXqQFdPfM!A20vS&EfVEY(3kuPYp0eeF^>x&7LC3 z-s%z!7ak=ihP6c^a$q)}>=&1Io|48A}W_tpTw}N zJ`m1|XEY!JO3)h>dqa^#V>Z8dl-zZ6KwO$&F8huO*L5tt+wh+c-#)MxXikR!J&y2x zBuLq~DFE_uB}RA}Lr4#r-1ZRNWyC)VJ45^*lFtVGt|H=ETddzbyHq4vjyv%KkKb{^ zZu;NDy2IC%aQPjdZcao}iiQ0yS1ng3T~cqH-WW0iw$^MgZ89=qgbKJxnEClkEj%#n z?0$01zIWvrk6&&)1!haJo7Sw8laIJJq#jkqJk>1*>Mm)68mWx%&S@DG$==^qCjuSNV)v+xb6bc1`nyo1kFX`4&sMx=3U z?^(7NU1%>zzJmteQ$8uM(<#&#*SNho!5cIK^Wkwj-JdqsZ$MlkKtDUzF;Gr<_96u@ zM(WRsYZ(!{AWZEzl`N}2VjE790eq@2V*Zexy97UCnIxYQkp4edCYW}LBOJ4N zfaTb@VLFJ3E* z0Fe7i2k7>FOyA?@P~UTj+-e5{i*4&9yY4PN?>0^EE5r4LyN0*uBE~OXYo-WZoKyvq zag&hoLrfp`48+k$B(!)gDq5}+IBgb&Ks$D?{ByuhFE8PNIft-j$XSSXqpO~`QM>en z?ydPm&Z|^@_as1lzF+^NV&7euNILZHF7CCKP?c3tn+Tcn^6Cbv&N8u-Z8MYng3f{>?L(i9THs}r28_gQ?oQgN&3 zSU_@K)3Vx3$`m8t{m{LPU0@ViBYfT-3PrCC21ELn0GDmtqR^g$J1-V}9LBNC)>*pY zsW}+%hGlbW8w8oAVh2R3a7heUb^|+!OgkN%Xtj=@tw`?}7Igjm+7wEbL^XjM( za8CTLL?zrm&e7PENHVZEozHAI|s_$+YcW%+i1s*(FCCU5o0#t*S4-BIHrKq1_5(V3%M z93Sj|!J+Zp6vH12`y5=6hc|?PWz7Saj+bF05&=uBn)Cw2;(*m6=eKg1qL>QA@R2zPb!+)RmWDlrBu-Y>gOaO#RRw{hEtvIv=2%^>*XLB8L-}N%rRdkWTti_hK)e0$=Sag2A}58C1FWFp2m52Ri|;nM;Zs@s0l`Dg-pqm zL9b3sPA_|~-$v3K$^ZJ036DRHjpsLin9eSIZy&+Wb+CSoHL-!>htbzg8n z(ol13p~>06s2iv@FMfJE%^HuY*SMIcEK#IYnRXWf$Q+UHiXP{4g%S5*w0>WZu@&dI zj!Z{4oHQ?JyLbg#A{`5dJ%Pk$+fz#~4--4sU8mVTm|B{cYVDF+XY?Hj<NFmy*)lr!=EAi653mI~ zo1~)Yok65AF6pZ_8GqO^ejdU$jWwF>JmEn%ojki3A6oDe@WUch8+H+gyvGb{7&03m zax`=*n9K$PCr%y{ANVsceoP|`VoQaI4Rmh{sRRNWHq;BLPAGB7r#`7D2_prc~amU5V@Qq3Rrci-M< zpzamMd(x}DE+2Na9^**z5IG0i4=ErGU)>>0^2jPZD#7Vhb>>UvD1V>C#=VG{{9JEJ zOmL$0WSRTS^MlC5%w@@i?j zx#IYj8|1pL<<=tsRd-eAV_$sVL{-Jr2sFt%fnt;_$h6ZPeTP%;E=Q~l<}M-33yvgz zQi_G7WQ$MLpaO?vcK{!ejbz6>O;-tXhFHj*%qf$fW}!U==K86H+}y0X;GZ;QLC!A& zV}E|qED2Pw)6ZaGZOtrh&MI4Qy7W3;>|4bhEK?zZ-mQ1`?zBAs?TQ7*V77%}S8MKX zV75_rd_Y9lZ~TPhn%nvwz4+-LC#s|_OpEcpfQ&Ia;f{sfD`-LxH)5%zc~Z)+1$1j( zO~ZqB-eEnixE?guuN?DK-Zt^U)ug){3Z9%?oN+d-*@Az}L6gEaE-kSO23hxsfY3rAlplzuiT??F=#$LPqGYUQ@F(CJH9Ld zRXCWnlJ%17Y;t$miu82fx;`sb0!c60Q_D9nxDIMe`a%UVx+Qbi-TR?hlfaj6V1iku z_uRjS$=M=#UPlE}4Xs?GGR(ymRys(JYwD)U#&NyK8$};~{P0A@gK@;$9E2eFQ=)Q% zxxa+V*B?M)$qER$JebQ0PoCrO$$FyeE< zZxGPuL6+R8G0IrWNvzcp3K7fky+3(!ai`L4I|oV;dS&A}X_{B+ovU4P?{IpR^SYz6 zZIk&y8ip6vI*-%!%LfvLe7)tc>nHerxIPcNoOyYev2p(VaIDlRSM3>8ZY;;IqGkB?$?r|A-PQ42^BDmf!#??C3+ z#5L9SJe@cMS^SIdoy4IrEW`H6`fKv-s+V^Twt~DIBS@*lti}u>c$@-}CQQ8xU`)gN z$#$8jT`~pQsssq-Br${aer905m$Yb?YBF$&{tcUFqkFjhmM|drpxLT>n1X404CKnB zg)E8$pu8S;8Da!$wA}p6;*b40;OM=(Ik3A?b|axg$@i%HaJsbYxnI8CtAcxM7q=s( zf|=lr26QSF#{KVuYWgYGI5nrfmtay9S3sUr@GwNRAn|sFqTZ3xt1wUmGH+c5XKQ-V zpXq*SsQ6M5_gc{xs$PH@Mzs7Zz#)wOuQgqqC8QbA;1ui`)yQgpS%$Ix`aohl7^iq@ zkp5)#*>(7EEh?2UI53QW%8j}}2CPQ4C8OaoLOlAgJX48321!zucsTXUPmaOWjBNJ9 zz9>%_7t?4Nq|^Yxr!A!22+epEF3ZA7s)2SpxA< z5lEETUS;FCi3|kck>XYc96G-ee7Io+umyx&C%nN_?FM=(tFa7oDq~_lwx(}Qe7~UK zY8gR=-Q@h$bEf3>#BtbbqilqJ+S~*kGH&M}qo=MsIxIjN&=~cm6l3UtE~t7A$dARQ zT~64k`ZlC0s`IBk-mfYS2(pEprb>I8LfDdqGNUQQR?cj9)$G*bQo7c_yqRL>Y2xn- zGW|r5R9L}0U68(myo-+raX$ES^1d5$1Nrg}L0y_;MyIq~JhGKO%ys%L9i%#kSo2Vn3+vJ@W+j%%%O|kYJVGDq-4z zMQssc$sUo%r0D7JY>ACs&WCmc88LUkwMp9qXVb}VoYErVdU&Xey$I9Hl{5+c+~M1J zuG#6UHx-6Gy^HX57(~=1Y7T2_{diG|(+Lba(AE!bu(M?Hle%X!IJb=3=;t!GsR4l2 zy$fVlo~yO%m|2HizRWWn*8i^U*IwbhxP02rIlXy*ca!qIOewfR6H#U2lkx9h5~i;U zB>M}PT%@;?Lm;4w<+1R2FQRLYD3Yzr#4Yra+L>U7hLl1i;3bfzOf!eP&jCh(E;x)( zY4qJrc#zAz?tP{!96>U`G+{1i%)3#twD0-D)q|W`spsn}=zD%@fgX>qvy0Uc#gL(Q z(_mtt0~HtgUcecn#&p`pWqtdKoLX!7_ZrE1C@wpX)6rgk!!#R+LrRGIOU^obL{B1M zKLlhm**8g#3RBH`kKlzCcez5vX$4w6mY(RZxItu$6dizFap9~K5oU_K z2wCy6Wn1&lAP!}d=KzT}2Z*rz&8vI%x=(jgo5|&s)81Sto{3uDPV~HLz_h2Thiu5o;GA>Uq1C85N)RIG<0*w&v}DZa_sD0jBNVQ%PXvwTaw(KYGua(tgR} zaIOT-P&V%{k5Ye%5qY#9^qi(Dd!=wCC2>4NsFZ2ww@@OYs^!oxHo4GD!t}at^tGgz z50;RE+H9M6BJ+xaNWfXPGEju*Jxv(*3n=Cp^Ekd8~TN9O3CN@(K zlj)X0V-j!)SW`7HU{0ALC6vv&&+z_AXV6!OCDH+Jx;E7A@ZQhek^x@Q0NwV>(`%Q1 zINj5hW-aw|Oj4e1*?MF%V$L9nk2MKnu5dRv+YHJknCtIv%RJKy5C~~55<8*1L5w-*yy+I!%joy9(X$;ta3`b@q*P@c6o4xbjlRf1aDv}^=+95r-;esStY6JphlrE zdk7Xk?#}zU3#b|P6ii^aa+H*E-708x*Yamh zmiowWf`Bq}D=rERNx{Ej z2%#&8E;e{a{v`$e)wu=qL5J=v?w`lG^NX*~sJ-v1BjXr!TCY(b^eZulU5pJXO42`d zq;k__@%MvCIQF9PSE7GhfbZML5txu0krD3ji8?Syu9L+h2`GEnLX7*a6DdW*x;b7| zrG7h$TN(YkA8|zXB$S-t_xI6ClLOiFV*Bs(S2yDQAY!wrs?+R;QoWK}n$HChXuGBH z_{|4QMb{e7qjbHcn|&%6Upx6B?HyMPr<{<0civz2+Wx~`5+ z+{in|tyPGodY0k)0{1_z)pbv;RGFs+Dxq=cW?5=#r(|P_(j}ybYMSJ^SZG{KDBl?5 zseQ5)+f!NPz(y_cznNSt4Z!3=b}r&>7pBp=jddmFBC*9Qe_$52e78{kRGR?P-uVDQ zU+HciY??x}H8l_#D(-rsQSVCfK*VmY6oOCj^^UP*S$4L^5Ix4>6f38J=MC=I09dG) zs>TYm1J95Zh}u7;yxZgr`r~f|*vUHkjlfWVObhJQ}x{#>kqbgJ@j#weBs40_hC-@j-81I zsNg+&KLdt~BA61{-Df1Ac@2c2eH+N;&8N(!-ft*nsAxB&%JwY~h+{}RK8*mu0 z<-LWuw?qUdR4N^EPth*zOZL2&>L7q?dDGBx;Ry)L*g(%q+qN#Yn2?IUomDXy zJm3{;5Xi-9?;M^Qj(KgFz{F<9qM-L_$6d`pe-vrss>OQtw~5AQkl^^8)$K<@XqL1s zlKHVujZdl=(IE3=3&S=glY^nlrPT$7e1I*R_kg_s5q6?M-e8I)h(auOkX1dq;eI#U z(`g7$hYz|p*~*&ZdMl#A{unYHJA`B29mmRUeeH=ofTF|MX-Osy1~O}>D>4ZY#0l-} zLxF#5kpUQl*t8PV&d~s4O{VFX5-a%2k1DQ*r~oEb9M86KLTITEAWK!E_iiTbEDEE) zpU0$5l=2*}kr&F@%cACyI7j-M-ifmp$TsyxkZ}$1fr~|7f3Dy?$@XYDz0=l$EQ3Ir z9fX7)e;l?otGUvK@%ZSyxZs^7uT%%hC2`oRgiTlvj6$hrdqEac>?k5=Xog(8odK9lgG20?;3`8xktD9cP5E{m?1bfwz*~zgnoDZZTJGZp$4frJVc+#>cD#CewmllGSo{T( zOm?jNVQfeeQx|H%V%Gxllk9ZFX9!XRIZaz_wIzRDj@EddJn<}R7{W_i>G9Od=%n@` zZQ_Zx$yZ6)Mi@p+^YM4i3roDze8mtHHSr?!t}GQ&$vTgQPjrfaD2l0{&oP3ZIhc8n z*%bG-a__6yJml&1cbW?)RkCJ4KHpRr|8@fv|APz+5<0Y&IK4c?zD~Nml$RbGJuwn< zWKTGf$X6=8TlClgKaIE zJSR=?&M!D^)YGYwqGTNoTZc*iz_|i3YW{mL12^a+#BX-jIex31tkRq!jLbuPx7WKK z>T%~#0p_UQ4$fV%P92Vx59&%0#V(+7gY3Q%J;$sP(q_jfYh37^8<8k3pVw&=sDrl( z(#VnCbO)UlMu#^@3Wl)8kqb^mmmdt02{b|-s@dFH#aS#VZ)SXkqX*wQCTR$H3rBjr z1?YGhc`~CO!)I9im9=3HZR$>QQA!K^a53W(ICdSBw3Ampk}4iFj`$JgS2yW*9&d@W z*!vGBJhmMA1;dy|$y{->nI|o!F|2v;t&W9mp&^6064QxqI+?7jQ5QlXe$B@(Tqa9w z87TeUP41I$x#L}5_%ydajg?0jL0Ub*Pj}ep`@@3CRqgUiboX6=fJl)V|D`)u&Z8dI z%&NMCK4|j`!`2z%Nc%aET6KBKwFVTBX|O-9n2rQ7mfeLVp1jUKTj-%u$qv1b*WLv# zm&Q~1#w5+r%>0+&$pN>Us zeK}h3MaRifa~UOvp6)xk=fu>JRlGq{Y_J0tOe^kS$h@N*)GNACt8Okz4rbafD8n^b z(qEj@Pqh8!qs?X-lCW(7eX5G+i_Y+!#$rCEMX&{hWl%m!a&f`;knfVtH9AhehQ;!#8n^~5Jt{z8| zligVto^%IOC_9|a6yG+7H|zt+sKg}UZgujFtkaJdR{skK1R%L^lQtq=Dl5Vqz=-A zar#Y(+F&U4g1$YyeQ=i`)Xy+*7pf1GVQfcT1o}Uo`2l68bq_vmF=CF`YijD@A00WQ z*t&b3qHtSnIlMLtpJN5nt%l*g!#d1ge_Ls4tn%nVAY?IpLBJbz-|HFWuaWi_P z`fuQLN&?#X&w4>1My&L@~Gr{4lIh z*cqG$C+_g&zMDgwU4!qV?=+x#w&P1PuH1@u5?c(-rQK@x;6r=FFjx~5!U@kV(_(1) zGC;qA7}~Bz@!iUV+JdVK4skD%fIV1I@0Da!PFmeH}Tm@B3S zIT&2uE71lVIwd}z=%)~D*~&))=C$0{sld2WwLTUM^}s$66>;>3=s`NohPS$3eu6IC zcgUjS7K?%ag+Y+EV<(9x5+b)_?0M7X=+JAG0sQDK7*+zUIta2q3Q0)UYd(M|ahi1$ zb=;*gqDZ9Jt2EBdGG2$%Y|a{MDuOR ze8YX@KpMY@_x?lsJ>KtzA^8DWk_Lx5y%$Wdon$F`;cWF$LSZ{V-7|nd_1zPyLZ_C2 zv{(+n=p_Dr82lHyjjgrOyz$kQetNOLrWEV+*-0bC!xLHyfB&?ewR}Kbh4^3qO(2)2w+rL?QT)AtI#?%V2eE{Y$sTbTjyHy1T(Rjb(BoagwNwNN<376&tyVDo5;e0jXIv83(Jv^g@pJ8 zoO75%Ts4q+jBK&yUut~Zl<#xGDh)dqr5i_Cr=MM9HlB@mBg|-(PnPs^mFLTy-k%8> zJ{Jxm)*&y~`9^h%xM86?347k`CVutG71LDEp|&Ta(m4{T6}qCwMVt9F}rUKA`BE z1=ne_z&6eaou#c%;zv2#8k=jQCIF3~1=bZ&X~zpE-~>l%rMN=$pJj z3l#U9*XnZ&!b&?8lk23bgxDkEf3?NM$`YAxE5Vre)SkNl=rNh+z$eJFrG?=vaTlqJ zgz9j4{&zKY&M!Hh*8M6VZG=ZN=1N8^q{lk5hL6R1JrboaHJBR;<(`E%M3YBt*3xL0 zcTTGc=dD%5^lh8B!{g!5|J|(}?QIhVOC z7hW^<2Q;{(d!Jt9Je9l_Mz^9?O7* zvT})*gx%TH2TfpMq-`O|+m!_hw_nx3dG@uzAb(o(k5;m8iPsW-;7R3TYubv9fcA;V z!aLnAtx6N-QLKvdqt2AkOE z3yaP+`RGKGQ{{B`f5wVuqaKx~z?pzn(f=-qwX*2|^PIko%x28**G)!$61$lgu(jM7 z4!Z-3=pneKw%oMj(l$z{_8&g6@X z{ePi}TB{T*Bm+lbuURqlO@fg(v_-;p2O8o70#l%Wg{E9mejJdtczj5K=qbm|@(7KZ;e|xLRC(&X#T|k@| z@(vcNUYc~axs>T`3gnm{0t8?yyvhzGxPjyJaTv7&&(|KrgM)_;CI<{pa=7A{G;16F z{s-?uvq|+1i4ch*Dk`IG{>?K6NhDG7kKtXH`vh_Aam1 z_*zIv6e~0z&IgXv!8bMqgLrY=5gK8W7X4D4rQT^LZ*{h-W8lX1>jFni5sv_tjCD)z z(fem`ylBKsu`9nLq_paPIuRqMy?CRL5`yzahSfwSS1O*7D+1vSP%h@f+y#EDDv;j- zE>K4hT1R+x_OWZaH6LUn%=N$m%cC%_^S!wH{cUQCyHs-{e8GqHq!$7naKLoV^hS*> zIZe=eKxnMF2~j>LDEiXnk19i+8LQXYVB0rykSXhb{LO56rA{eZgM{#;A8D2c#uE`3 zpK<}9Gd!dRd*z-qwzXMlT>4u7H`3b4OrUJaZ8C?o#$yQl%E^MylFTUe8*Y3wevKdI z$Ru3fBL^w?K~sNiu;2#4d-{QF)JBduCR6KK_x4;N4GCXuq;Y@ZqtI<-bzy;%_R@Mz z&T;b)Miz6#=%;0drqljVjPYRV01uDBgrP~AH`;ypx+dcB`g!Unmxr@na(y*Xg6Mt)r%p{oJ<~^?&RqZOL#7h#$3-;-|6`{cY|WrR^N2Y zSe}u@qsX<%_2}!PVsOVxT?~_y&-RwZbT?2oJ+m_RL!6U``ygnQnsxTQz9mRqcoZD+ z)tw9q_(FP!hK#cfMj(SRFY%C5&8?)eel1{QHaTb*Y&j7DPMRgVuMZM_;e1?9UXs z82f$8X3x~+Wp@hLazA`aax$$kw@Ksmp0Jy6I4lF2sGQGNs3#ZUy$HAUX8hjb?k!&P z)UmG>RUmiQIHjsaPB#%&9%XfOtLl};(7`gxg^K7uh%1KT>YPlyvHQ*ULIl8&q`Xjj zo%Y0QkaID@-os&2WV$z&MeGBZZq6j*zfqd$UP?Emxm9%4z=LNe7|pf!I8IrmX<}@D zFb;FA@B8PHzr;Jdy+VatoR_Z73W|Uu4y{M6?P8F*_fwIDU17o`1iCVdk^7c@nQ52d70lNqB z+4l6}(VIipqZN1{0_{6`qs!-NyLhz4ADu0l2h_eJP3{-9qdcD1VQja>KXz~3MPg~? zw~86NY!Md#YFr0PP{Fy68u-(`T7lRR!;=v`CP);7gsoPI~uDp@uoEC{l?ijJAoC1E5lYis1fdNKvRrBH*yvq}g z1dIHX?3Ap=dXao7M2!91YA~3_pD5r--6ekm$2q%?4rIe0DF(maN#Z~XM3WA zLpDff`|LivMc`GoxeR3LtF~?(r#w&$KdyfVdGoP6P8!zj5UG95E7Q{;8?)dg&T9X3~O#DcfAwm+w>|MK#VdRRpzs z&7LxyPnL~uWyKj+_m@|0>5$fT4A7nu5cUB`N@UO>rf* zRBO=-Uo9c!8pLk`I!|pBZ0fN33!^TA#V6vR1?g5*^x%;2O{GGV0CKj?t_((okJo$3 zi39SB`nodcdXXDl9?I-vJ@#P5)8ZIEMUym16I31|sd2eokj-Ud(?rq}9ewdO@fnCl zDY8qYQ=Y+Gz-dFxc^zp#!c2JvVDJdm4f0aFR-Bs3My+nrauu6_T{T-2Ip&a7 zifmm{1%woWyQ(x8G;x^=E|A_jCQjf2VIhEXPZ~($m0I-TOadFA_aNIl`*Co0t1^G{ z+-lLs1egj7U4JMzULA{JXgq}r1?9*;zPvdkBo5=n6KeznoelWMVrd|RKD@l^?#OcO zQ+*Wyt?-wiSOzouI4Ho_uCz4Eo#Htqy+J?74UXfKEA?xkofS%M3vdSJ1EsI6!0tlj zj-ModOQ;xs=Or2NzSi+SEl*Uxash3}ewRXOcYC-FX*T#L+Cnl67Wviy=Q_;5_p zW*Pf>^Kx^{zrKN8m0kvy3$A;0&65m1x8dxQLX8{`lys(|a} z7;jAbjc%DoTe4M)RbF7E2Q1N0b_h<<2!P(yp>{gmFWdqSkS1O*s%U)s_92!+1UH|Q zGTnUq^vz<=*Srkix>c+x;T|Iq8LHlCZWEle&~ie?A9Tx0lUGR%Y_8L`pca9hYXA`f zj}s(3&bI}to9?}Z72$~@I}e7RjJkifYAxU&M03eMufd}2a@8Rg z%ywLSXF`u!>>}uJ&ISf>q^fmDhX5x@V&Wlm|I=zdc=ioYC_50p3Mt+-2UoS(gKtww zHh!Jg$;6G%+g+)uf?-;#B(#*gmvjf=$2(@+T{dX57@qbf2Lj^UlRc`c15T|fzw#cF z>eiJTj6Fj@lDgK%$(6Jr7ZF)WgJuu-EB9+hf;!nc`zsclHwvGDyKyx()i>W6nUw}i z`HH^^i9c7w(-Z%6WskE}KY}S@k~#W~UiJSl_LgB$w(YwmO4N43x0t!e< zDKUU_!_W-^N(`Vh4B^l*ba&U#-6%D5cdeUez3*E8_51Yy=BDB%Zq9vQ*Lm*8e&hh- z!U9Lnp)$oyo-x$|N8>pCCd5}^qNt62b-&-Q#5y_XE6BMF4q0+KHKtk}= z7^nWx5+JK$2EaQXYd@VuG~HwX^#K&w1ipfh(F>yaSf^EXM9_egj@0$@Fw5qet*qF_ zu&10f@f+SicyDQh2t-ZeVL7QkIz!ytU7fT;y=hGMFcp?4e23f`;g_? zL3&R$z|Z_L{K~0Pq=md0SAe9h)N}y9>Vx_9?8}Y!3gy{F+SS>L$sKi{bHTh^fi)o` zO7DxT@8?|YVs}3*SBRP0JwY0L>u2msu0MmZinAZlf&^R_hX6UbK%M=clu;Pj>hV>q zWjw>5CDcwxBJ{5i2*}?EO9F+daOwPcQ%!MpYPOTMCW||X4DF$_HLp{HhHJU(idJ- z(4R~-V*H9$C1&!zEVxUVs(onwwwtSo^(b3CWDl~|XR@_W%s#XZ8K4Afm?dObF@Kpc zD|f-zdRxI+LPokZR@L_UHF<{E)j1jwL7?j_ zk=~R=;b7Da0ggg*t`abR)l_iDSQc=Z`38=F4N9;-+-vSUD{RJoY)#sB5NZj9+nW>9 zi}x&%O17gu3E|tj4THPie-wcwnxnby-Utg$k}h`~$tKj|ZagB~=f;UtgG%ZWr5Xw6 z0Kk_JJ*SW|;Hurg_6CxsJ1U@iryhL^i>n>KE4_0W|2lYmzWk59drWefYOHq9`PD=WU@uW zOUjRxgqP#F^uRg4Jt}9%YmJE+qLTn0m_FI#!D>>}8&9pNK?2trY!o^jAJ%}N&0Wyn zZUsr{knrEY1hGV271-kwy<AfB- zd!A=FH?^Q-6!M+qqd|#9&Xmz@zN|j>oTH~AH`+ucs_ohGXOSuBQQceo0hCdy0T(~a zr}mrrV&dH?(1(;az(pnvrnp zO;)oq-P%4?9--V*Omq_K0wA^wH2WsW;k5sdvqx{0L~+Mu1(g4g5X!&3B3N(v10dYF z_J52%Nj8dr?4t$Ul*(Te<-R*Cdv#C7zVN-Pm4Mma9C&eH-P7dXgcY1Tf*sZ5u;Z3J zqXLe0pSwv7kb71!IVr-&_L5xjSCr=$R{M-}+PwYAfN@tXY-m>P1jSo3hIe&)w)>@R z)U`Th&p@pXQ67utf$hydy3QD`(zRrD3Vk2xpOK8XuN%#<(IP5rZOSm7>fWdvEtiNP z`sX}1Mf&`eMxne|{W-i^zFQ?bJp|VpjP|`%LT|8WwSavCMDXQ@pV87{pioqe=`DU7 z_1=#o@VLbVXWTy>t|(JLM=MNnEFb#=I6SD^W+}_&_yKPXsnk}3@J#C&$Ki!~*GuXW z#$h~;yr~LR1;C9;cST3db}}q8Q(v&h62dSW`1nrxP|(HE=LsGvQ2d2MH%=3Mv9g%Zl5c6N%e zRF{l5>?cti-z4+FoI{_X-o@&90{hVAx{OGn-A!`K_mW=ErCe+{%m5sMhu;3NjnaB? z2AdsCx4wt+#4fzQo()&VL(VYixiRkrW42jW-3PXu$4^TwmYO7B0PA@Q)OI-QQN#)F z`~$(`dw16XAdhVYe7aeV&J%Hj#e3e8o(2T@f%-4hT!{V805!VE+IDFD)x5l%|Gf+D zL0bHK2?&n={)#W7`jv+qyMdohOYRu3m68sL8F-h-%hT_~{>1{#+h*oBeLVI2i#9)*cS;eL(q$qhEo|hJc+~w!92*XwKou-wS9) zpL01HckoFIr{+^V{GEmRe#96(+0aKBve3NGhdGiz$kSeFv(p?=SUyze@)oFA%`+b% z1C|U}LH8*f?)dWY0?avMb1ngc&@Jl{1oU{k`klxe^wwUBa&xw)MSW?MXf$zCOjcAc zE|KD4hW6(d2KrO1WMBv*3-9)lF(72@Arg!`N1xun=4YrswN^GD*n`t7;#}0w1Dg0D zn7e_`8$WEq&3+guz+nGzshM4;BZpz>%A9sH~I(lnei(#^$k=17{;!^3(xa=JPo0XsHSo()<$?Oo}PQ5SBv z-vf+I3UW}o?0fG+;T+d{|J$7e9duR#J=Mlh_{%f_2bL zSc)KgBJW)*F;i|`Y(VPwO@XmaY5`}v_rJzSURYBx=!Lt|N&K<;T;=slMBIMRb#FF1 zC4QsN@KxWr4gIC>rCGlt9!qu}CJIh^4iyxo-9nWia&lAkV zE9=r1!PT{%sfu2UganLcd6^xB(@KP%cP3uk3O-Q~&bd6k+$+Z5IdYvyG8wOGH`Be_ zf$&f-H;Oph$TVKN{Hdr3a7@*W-8b&`3KVqlCrr-oW9k#`69Gw+(qWpqV-EM0nD^?= zHhQuyG&sOsC|{<=cZ5>#S7jAdV6M(*>ycmij8;RL&uSr?qTZ9g#kbEp7`@`XPqI_s zH#>99Q##q{^|%BcVL5B_2(N`$r+EqEE)KdF{yQG{5XB8PeIigulW|Dieb7QEA_@#m z#)Pdqn6lI*R4}sSa8ulk<#32@WQ@CrR=koyw{oE*ywA5qwxWT58`f|iIre5^-zW9=R)cXLA|?G%*v5gwj&V*oBdGqXpW}4Bf#6jG zx2eZXtCUfXOwM&(9mBMZFRG<5Ca#Ef1W2#b$(=l5R|_>0f@3y!Ue%7ez%vmzZ z!f{*WxTfZDwTd@qfr6LPrD!a=g^nPkgk>%}_)97029mF&G!bA4p6O2zTtic3aKK4T zRea$N&RyqGmTYRVZe@%a^E2iAPx(^3zq^g-J1VEco|q7|e#Jz#1Tj3fQxPB8!>YSB zDy~7t^cH&1#a&N_0($dp~L)fMvh{x4Mu z*7Z|POGsaywSejKjFr5P1!tn=1k)D2U}|8)ujR0$rP5@xm`dYIW!;wwD;-9p!%R;v}4H0w~@M?*JXzuRp#5pDpWNjV_Z8JbE zRuUI`_$di!@w_)1L}s1158YY!^Ds?FB#d0_Y^v`yxUyt=utH?T+%Ejg(2btv`paWsI`5KErqjVe&LAnzV{V&*}j;-N;oQ}x|3OVm0n|kQ*6Hg;v z^$^bGeRy7CNP3!Fh4;R)D|QH3P4nH$M)m2V+1ZrjxQwHD>`S~@&a<;Hkr3^qT~fO+ ziyl<`cPkT_Ilz@AKWSp_CHO1Rc?3Up@bBi-m*HW;a)T5MDVW!MzS{ z2TXD1eHHtcbf)%MO0h}n13H64KsFN;q^d-AcFpT{bMXuYOy3W8#S)4Fz%K$YsSaJS)Du%g(Gk&<32X%G@;~XxyZws30mPlN$Fa}1(rTi^~2j+{iQwe`u8{z>I0lbEk;*G*&l;DUek$SvEr|CBmJwE zT(6;-gs+%Zma6Qses=Z`SM)mNcADA5s5gHHs$fwULBa_#A<>n$*F=UhD+Um?ci{)( zJotJd-s|61zO+a+Jwxr{WANT@OT?Vd>j0_Wm0Z2W=pouAXaKAH#$tnevoC_n4x&9H zU+*@n>iwrJ#ASNSk!TK+7RY#BukDhoQoYVhTS?w<`#iYYH>D@sy;DlGTW4>UbvTtT zBhsy70A?;#oS7_2#-!5rC|02Q9zeu`#nV`-5OwYBOeZC&J5@h*@AfoGHCy1WF0L64 z?f=y+=m*v5mx{c0G>+A$2VIk@)PJL9%6J>JnzaY1D=ygfhn8PJ&zr>5ZZz@JeS_J(TsZo2|g#H=uMZYo&lX8iU#-i7fe)RMbW`n7hE!cK5RX^gx zWTNhlV%c>K&_#A5#c%7!(z&lkijt@DKCHy;FjA$j;KD*{&iwCP)CPb(7LXNwkoJVAMUy*0~NCn^Xby@q2zY}4VGK2KYY z^~M5Ep24DoHBmLr+fR!x{-mX!gsI@vXX81)-m>%)&>b^(k z;0{;%)f4S~th-VExssg6E$3qaQG*&5_A~G!0!%&OYB=0P;|>fS2L95H`ZXA7Xqc&h z_pTkYkLP%RqGG88x!Hv2dst5ce)HROVB0a#A8NN$8Uc*cR>pnHuiNZe32Kk!1s5{< zX*uZU8ZyCcO+X$Gs<1gEC{xk=KSL8oQ>B?fW_AUcy0`uJ%Tu&-yD1v?w#K6`BIWeV%qj>m{5m` z8LSY;*00crOnDQ#^(I#y0Nv~dG0-o>QusI;gIJg%Oe{)<*y6^1P~P9Z{~WH*@Y;SF zhdBWg{|MLOwn15oh$o}mvWJpcXxd0dWi_H2yv}Wk#*FeeiIHbB8%#I8-lOC28za6n zaGYs=owRKB?O^#P>2QF5o2l_}+4G4KOm+IJHz>h<5U{s0&E zL4KAf&2ir@BrbT|fh8>d1;>W-)_5y_++~JJNa;5X4Estf3 z>vr|U{HjmW_J$wl)TOc%7|D?&c?Q+IMS9gg%_gzmXxixT*90=X!<7)g_dI>ZV;!{D zv`D563f(0nfpiHF-aomsVPVrNqw9oHB$w=DpwG6N6E`~)W(FvcLuhK(gA#8g{T9vR zf2&(oofRZ z$wfPnA~*f5`L~8~8zqbh7S`CQ`$-I$#G4&VsmMe=^{W%=+G9+GtKab5V+PC`TO#>p zT;~$2Bo#S00-MTwqEJSRMhNA!HIc(Frh4@wD;g7j#{xCHzgBRpzGC_d z)>2F6FNez2Hw9COqcSON8bXUuAm+!d zx<)vSgX5`dn1(Neu;==tk}BrewL<-QujNQp`wpiS&#zJBPs4njQ`j-*ho6~!yuOSb zQsA4G4DV0B-7M;y&a0d*pJAOsU&8!|(v6=?J>}H>8UL8?Y={Tb6D8((7yi5ZfbasG zSoI)OHS_b9_Pz1J7W26Aqwg2<-38*5#s^LO*!JIUBd>_^8+h#r+1xO`2H(o?ne?^M z?=by-#X2obQcg(vq${n-#+2172 zo^fXclP|M>HHJm@qjDNwWIRDTYB#YwoAhiIqQ`}Yj>|zAc5ALDrPJw6_Tqg{nH&T7 zqf*zilW3ubrpITJ~sSd*@ZYh&jtJ7h^&tcxFgr;n2MIy=unMGA9nh*%#u zEd9LZ={3HgFB5w}ehN;om$R#PzG!M5HHiu$g5?$iQ)Kgv zw3)=tGz*q|2K=JkCk2;A`V6!%HF4T9cZTDqC#%!twCi`f$OFgp{1W&+n)!G%kcrJ1 z?|Mx(V(Uv+#>%uhJ5#!nl*$qU26+T~k*f}+6Im5HN^O}`Wxw{L6`+FbjsJQP zp4PS>l!cvPQ1XjO3nBXC1;o5<(N6O}Z&|eR(voqZvv`8{$>a)}MoTSztC5LCuK~3X z74c|`xDEDu^4WG-JuKnBjl~AdT^r74aO*_;@!}z-oSo7fj#cN|;QdNq@9MBlxEEnI zuX)zi!qJH8*_zJ489Kr$VVveByJnr0V#U$BmX6=h<|2dr9{%X({oG8+pryP70tPGo zn;g0A8yrjldSXYWmIzh`2P6MbQnyrzdY{FJTg?Wa>vR!ti?j?f7o(r?%PcY#wWz02 z@SB(FTBQgzDWn+hw*dXFj;Ux5GYhs8T9{%&qTRR&O1tZg_DGiJ)Z%za)Bb38jcexn z=$axUzv~UvyOmbp?X_?2*ZA!E(`nZw3Kit>h5 zmlxii#9zhGKm1~GC5n-hn|(YX@EU?w7wR#1>Ro70pH4!hT^a3+>VxZvmcl`zr<`Jy zjFMBbXI<4#?sWdc;e1oZ$EpVir`~djl1yYFbr_x}Tgwmh>3xFo!LINtsNAU$xobU# zUR4ym(7fwmz?&Vk;%k#PY3<%4mwZOkf)(a>mRqBFI5^1g;A#pg;Vaaku?vk}yjJ17ll89+4$`|!lEOR%^Cc7Rf?jblRE z*Qe#kDAr}~?E4#Xkz{RK*S<(6w~qUpAb$F0T)L=8s>{b4Pl#}2u_W9sqJr~WzulC7 z?L{oE6{V4uX(v&_$fntcH1>LuL$*HmpiddO|EA_=EmtQJew!+xn9?;UAF<`Ey%;3| zdLI=Ga32n-lL(!qA!IN`LEJE4FVn#miZrnhu$w`#wgf9lVJR`)>qu}rx)5Q@EK)I* z3>TkMHsmDcfEj5y)U~P zc*|BH1T+~b!*J0fiU+1>jj&ewt_MfxCoUS(LFig(NeJr^Za)DvUz>wamBxhkv+H6F z_pYNNTD`NXj&wAvCQrzA`;crp16W|L?vy=Gvk2Sk=6XL{Z8WpjZBiPHW=3Vfsg{{J zg$@?cUW=gCtQsYv8jmt)EEv(*_B*JrXw10#qa(D=RBz*b;s8eavx`E=4Olevq@R#mIO(0)m~)DFWDSiE(@h-BIQb ze)HMY+J;ZevlNm!*P70a)w{$hnfAPW@zv?C#`e!;#?XbWmuiEO1Z-P}IDn4a+CiGf zuk_yPD3C~?_nV*e>}yXE0=HGPDY!bBiE)!9CSdBGaOe`j_##)dgR`YV@b$uD)h$jP zLFaBhxnp5XE?_ewIag(qT~#FFv^ZWc_FceQ*Q{WEbI*#!K+hX7rn=H(zVsUwcD8rK z(tP0Kg$-=URJ$kD;n-;-02ow5Xhxa&$oZeUpRCmqQSZ&zNkx?tpU2L%h5+oB{iIA3 z^UmT=ljF;jL7!#8i=t3o4)JR@W7u-*_uIWkkNmyFJ99 zZKNAeElm8$kFKQf^;zdTRw6$Fu|pk02>=k!-Eou&+3_3&T!AqDImnuB4jgUw6^9Le zGL~2e+2pkGV)Kpz@K^D$@@vIK6*_C#+7lyR$b5M$!2RLx*9!Aqum0=5dLv5hmV81> zzh&l#x5UdEW*XKg8KV4J?P{L(Fa6e~f>m8ui&EC%FUWb0a1A6?t)e^l2f+l&s28iq zMJxY(tQ75pAh<>=qZC!asbmmiU|q85#XrxP9IIGqk4a_M$GIH=;&=Q?wSknGpt{{+xu;u ze*2W0gwy?G3i(${ImAqjK=1E*x;OZ51c8{$xgwUv$mNg5;PB;0_S8Of_fW9`k{gGR z3HNYt=c2$~D(w^ZKs0dC=TlVj!=T3^UHY5>WzTTZ{HJ20c**@NNrb2De!Xbh*7;Ki zq5&PYhY1^WOM1Z`Wg7m%)zyF_19#&#?3wq%Rvs(rsEY{n#p2Pdfd9haQB}(JNoROE z$HrpMlzL&GjLJAQXvX-d>eOJ`*l(6&^Y$*k0pvR{zNee>{!FXGKlTTov*+YxBNmW%=_m_5=<@Ss5&}Aw)k`ZjwpqQ`BxSKLI-v^r{d>g< z7#u;F{R0KBmLmwyx}ZrJ{emJ*Svl^;S#Zq_vEIP&O*LV0_rb$od*{Ko#s7B=3NfAi z_vUuNjcJx6TUpm2N*DdyofgxEIUpAx7H=9T4QC8vz?cm`k;FWv1#Wtz<__x5MU%5`Y$NUc=Ylr z?qR+^n)nj8tr=qzSrPJo2-p7tu~>|m{9~TsZdcbZjejq^|C5XjAbt4q|Ni#>`-8V7 z@BVW|{vTg~8~5Ln{{Q%*|6io;O#wjJzKrG8N_-oHzb5n?w=D;&&!sC|hfNNu`$W)I zv|H5^I0E|vxiO`g^0iFV2wVg@%WUy;^#%#74TPb%0I18>%%;vZq-OTGl@tc`P^ZMZ z7i+Na1%4$}B^oZvxO2gQ8$h!2{mDx{FKXEgc9{CkD;OS{8 z7Ze$`AR0-@Eq;ocj0cKEWN?UgH;i%bCX2?tKM-{QbgHsD&-##97ROPc1jPC>n>vK@ znJ2rec`K{HD^zHEEQp!VvEFL(zCHPs!x zWPza<(X*|v8KcSNOzCIyNPw01a0-sx5VcEe4wR1tM-gve8S}=1fVzJE4c>q3scFC zdln+-&2~$O$8-EQ06+AT%b@A~`QF0Xajsw}CHqHU6rTAY*UB4f;w35o)CGWZLDN|Y zvoGZ(E$~?l-lhxu$iELOniHW+_I8&Q|U|^!VN~ zeFR`;l~V()e6?B-i*o8uD1a{K+>IHsNQ-)o?&Xu?g%%1MViC5*PH7Nd%Lq6CWi z>^WyZC>v^rj)PvaF_bnI`HPipaRwTMpzA)VmqzRx#iE04gxZ>AmU-VeBkHt33kvtyoG6!wpb;ND z9N=CYcUbHZ%mofZK^{3%!1uY5X3P3@;AfywXXwhsIwhngoZ*rlh_?XhTw-NG+j;wx z5pov!Pg@ICOj(MFp;fjE$osz{&_G9%S4U_Rkcu6Nn#iqjOGzM3b-7Rws=zFt_ZmbpV_FnB> zr!g{>{-7$~rFDPtx7@z>#;o*WKHriAETjOrxNFKAoU8FV6VD`JV^W}{IROCWcZF8| zzr1dWV5w2JW6I&K?*;AH@j%8GC06q%TMcqZ1R)q7oAW4xUlPYL|LZ z$p3ml4-_T;pl!f5k&n;$^irLDgM;h-g>mJ!4;!8-&tU^txEn5F#zj}?Fjn2kQLeHx;(dnEgEol>4d9?c@R&suV%hqk z-T^P7*>iX)~8Sg4U`Z`#Z-@B?RAvV#gPzH+!r zA6$ts{B^U(X%fIP0c^Pkp*y_2j|=@KyYMFHOeGj}vB^^b&~2&k^{*F_M{~J4lYYg${44BsM?Uc)#LKwj`=*>Pl4op0 z&~EAAumd3HQVzI=9d7Qz^R0@u2*x{ApUY(d93J(&r>NaI zSL^EZNL>B8K|e<=2ExmP-d0I1TcmZChWl$PX{v-xvXXpCWJw=wxg&I&&kUKrs^YhO zP+>7v;HVx0XbN@};^Ms;JO6`X}oJ7_GV zDXsMa*H4|M5-UNwOP88IiM+-g>oeLtwD-;_+w~7obdgB{Z=P}m&C@3*`L?^*F=Ai1 z5S~LYk?lfazX|W6S-hIXLi%hgR@7Gb-%j~MWYOLIcBg<%|764kvPQk zlEB>P@R+Oax=nMXseD~gOTbZV`%T|dL^EVFpf7p$$qQ>C8UZ7c9RBjMFTM$rx zU1mWTvW27Sgqm+8ld;*@yH(_=);^lpyAU1Ju%VC`t1?XGiX^D4;F>|SW=Z%L61j*D zBPfzgb=jL%7n67$r0Od>BUL+FzpuQ(xJcf7>AwCXC$+A87aNeD`&&VFvA~mO0$!RZ zG%ryA6dkNe_ID`-wxKpUde@i&^spe~?uccb(8CzmVB8zlAqKCb6JFOX260C})4^2P z5TaeJ-HbxOQ!T#-p6Vnw>V~!t>D~9!C<)rb-r##i-h3%HeY7pGYMDvd{~})9rWB|d z82x-2;;5dhqFR@?itGLGpfB<`K|RYsNGh#;qOY~PPxHF}+o|9%lX%fC)JCwbBF~sE z;+rOvy~VfWxROU$rQZFld8BFnY|d1IpMRh)hF)*9vbYF%=r0=oAD-2)umMN}KKj!v zHzxp#QD6E_i?C-Pkpw|5(#xNPsYU_}<_7i~y1%%j&&$TFf<_YYSmAd2QaieZI?VyJ zw>7ME1aNiC=7F?g65o(x|HN0gqRk$+^1|kwFI@*kD@^8!ceGpZw&t!MyWH?3k@k~_ zX%T+q@Ww)phNYh^bIYn#Sqq~8arjL*0B?>Nc;2fhO^8^|=CTB7MnHt5(`Am&`V4y& zttJun*gxj|yS!A_z&T34l!RXu3nk#@Q25JYSwS{C*%Uc0W7$;6;uLy+n$)&Nx)M_J z{d2m_pAbLbezc0my+OwL`E_g)W}H8bP@Mr0L+2Fu#0{Ng4iHO?E#7(x_2^bBN!K~9 z@!@bpVRO_&q=h5wLzi0!Z@WlLO5)yJ#>3x;}AHU-!P?I%gz%Xs{Ov~PMAoluo^8RUPm5V8YW`TCEFREy$%IumTDNl^ub zG0iq!yXw3E^wS!HhZbhb#K1;^s_caL>yNB}zg-HFBCfaVUdos~j~yQxNmjmdo1V+~ zW$HI}#b|Dx0?Sunsu<2MN_UL~P;oamgWD?HekvFT8-MT~yqpSMN~nWM%61|#QNRWS z)D2LqX_!#8zP&feSL@6l&r(;8)v{yLt+D@9AS)J{zKG`zy+J0u5!o=w3Y}oHPAJKz zTU}q~=&Osp1z3Ab0OOqnE>r;DXyZhW0$w3Kbel7|J1Rqaw=WHvFmzp1)(;RLX)4o` z1??Pbp~Xjp{=Gm0!m;bLEJ~f~l)>R|#~M*(cb{g82FNgLDVR>-3AX~!&AiSR;TQ>i z92aagUXoWP5-C&?=67v;dfCz2EF7tj>-kn6S023odp)f(3s^0WmafRr*{w13RHv1l z1JjnjqQ!az;y6&H6j8(kOAN2__H!70&^%;KHD2HE^;Eu2nus@WEY12#^$W@%vcnN+ zirOKQ?8Y}80l%Eg&9~*lz5_qS5BRz@MMb{R-a_ zsdHTc;bLEF?bkm2)-JgpbU{m}3-#4>F|ugXYuRS!J|~*JGfMxPyS7o{$3cq*4VGf{ur(2u3t$fCAU1 zA1dQWH5RwG2sm1?>7~%h&7=PoO3N~h_Eq7?(3_!1kBF*x0qPiJ6vJD7M8)^%eGwg( zUQ;foAh1rEy>zOA_c(Ewd{0z+pN0@M68=zrMmAb7#v*cxpss5ei9Np9b&C1Lp5C`QvTm|GJ_oH>y=j~tdU~0Z+a%_YUK8n1l{4>E+)TVP{#* z@cO~>)a#$QVpH3&@XtX7=t`Un?zJn)AvSgwg`J;nqd^)VkF%SXt|fJoGN3txardCj zY{N*UiSusclI#Az!4%JQYt2HHi0zg&_NRw_PYH&<75UMrGy^Kbq%kWBWl4Gr9Rn2` zDkSbBgaFwUabm}J>R(4|FHt%C{O@ozawyrl%IofLoCLQyu-X3Jj_BlA`PQgqITAwo zkwA~%oFWx6`csxOAb0fo2(%zy%-|61I_vNM`@*T+Mvlk6WY;S@sxAb^Zu`7{H_MQP z|BlJhp4k6A-r#%(ywKqz#!bJAgNeV&b(l}nih4oPK+ZNF7CfqQ0IIXQd-)R>@|@0l zh4@;qCSWO@nVk7GV62n|vH^}MBY}-9B?K>I-IQQwm@FzvL^Pc>2Ee9gvnRU!q?ubh zM@sq%n37++S7os8jnWEO3xVvHxoP-as3Ou1Djh?8GjU0o9ivuvX2wpc9b0(i$^D;W z4Ul}5$$1)w|5ox*nWMKB#FFabq9Kb#fpYjDI?$r6qqJ|+BP*pl9B4o$7a$PvixL^F z05oMTE@SN7|f zzmW`4D_(>N8@TSw%YPteU0JDf#KduU=#siPkaB6Nww~*$YXB(Wrl;jc>FU8=Q+C=4 zfI4U8=G&ORNmEsU*|Q=0CH1}xS)5=!o`7ogDb4YEyLjRh4HPIK2qlS!s}0@jen~DQ zHrx%IbRFH_;V9QZHZ1@WPlu>ojxYl?Vx$IiBwf@2GUlV7U!t}t1FzY-=LFNgMv@dZ z%O-jp9zDu>pBg$O>U|XRlq<3lod-DMxBM^WGtM02p^~wvrKKyHNK* zELI7En*X^QtOtbuTj>)^9!q>oMG>UTqDuw(V0x%5-7Q=uqP%ciPDB8?RfXjMpXJ;i z{;kDwxDyk8fXtZbO$P{9c>dFVEK6810V$iTN}+wm-y3=tQu@!$HcQAQ!wo$f;w|)^ zrw`Kdz})xjdtu8YAT0tQ2@v=V%+q6oI z&0kzu;}ZS$&VBOJp0*?!C9o`3BGO!YVUu(+?|!hRg?Z6L)j{@B?TI)JvJZ}E#Ddg) ztzLZ7&H;1@(6tOIr}64k8Xz1>o}Nmcm^ZDdYVFUO+6Juq@sV#&NMuuLq#l~Y!vErkZtwE0}QF*6CA;T!D_F&e#( zHD^{&ZbS&S+FS4fWx_U&6st(vyGO$Fj%$fv!k(bCvCUi!y{{Us7!{nnX#QG7_Wa3@ zUGVvr1?=3Hc;QyGTVHV@yENnN2_|=8Wof7j0ZiEEp;Ym;K|xHI&D4UHmMehU70xae z8(iDNFsLIm1ZV^vl^ggER(-i_!`?f>PI!1y6Jv6CH-h)%_YPnpcG$$jye%!kdwSOg zJ5unK*%pq}Wx@l@VV8EvKUrauN0RZt#@fCXM{}3h&r2HSw%nqh!|fl-(iZ$!;iX#U z>VCp5ZsTo&_T?ATy004Dc=sItStPWvK9XfO>ToD{x@+<0M&o3m`qxYSn{V?}&X1Te zZujr~N9O2fo&OBVuMm3{)C6F2$HlejbI2MmFzokiMA!}EFf0GzU}pQ)!Wm$6d9=Q9 z@|QTR6p{`+Rsmq6AA9OUmjcIAI$DrN_|gI{$r{sfZz2LU^&h3u~~3; zjK&31%e~f~cdI4?mVHHT*%>3@A|N+N7Pl_Vs(Ifet=)7XXgQucC@NTE|EGbmd-_Me zw_c!*OMiq2&o!HK{`hOVLC2;l$IdNLHDRwBT+yGW)w^Hvz8><_DZQYUwYfBMc+0}@2}llI(ZR+zO~34tu{f0 zOuN(J(?!;thhMV2=%m2*2M6LrO#`kyB&`(p0xk3|yCD=ktT&Iakl`=2(|K@gcr1mP zz)wJabkB^DtC%h)v}bE4iR4hb{nqz(9GJD>J^V=-%@L2;hd>3rJ0YANNeZyQF z!r__M=Ic5oeXo9tkS!8+f3&twaB%xOqUKJQp4G2w+C2UpC58e~Nhd!hOX;7^lu-itD|) zV|i0L{`-HBIuJOd4_z)L8gI4Fj4B*Au@|rGqQ{2}+)@ zny&0Gmj8;Mcr#o{SoGC=5+SuPo35`8NN1iF0r^7TZK4bH1fA;r76Tv90n)l2n1*aA z>4~(~?cO$dM`V48*1ss!tXi0*6d0lfPsUYBGZF}0moYBXxxk>Ta7|ax7NxU$evW5B$%D@}T!n`^`=%WC&0bU)X)ObSu`cpLdv~3m4iM z&w4Xqr804eW7bX){$3^o41l7d40u2MSmJ!&w5Z9LAJA?FjiH6K;&YUbWvV*OR$Njl zZ6Bxx-eC2Sv^0CIk;QW8Ck1vQ=)Ib)wWTAHCytI0E@nhqqhHG2>FCQs)(i8zhJ?3L zyL!BZ3~=e*g`>pNe{9re<7`TU2zlB|oa?(p)oA*=yiV7sa^OLtt%|%t8B`kA*L&xK zPtIOlQ#Gg$lEmG$Ub}1!kMRlFVaKiEksf)M(=*pKCkq9eNEn@fuPiSpPv(f0s7)7_ zEo#xHw>xWyJcQ2Xq9LbA`wah3;Vl95a+0v;E1XOm!keu?us^G$;B1B6+h;KfV}&0c zcaQjyzjXX+mQL3nYUoQF9_sSw`Y#&lr&Y_dl4X-_vKN0|fMH6L@BuQbT7goj(6&8AD{>(o=;vU0jl6 zWvyT98_Qe0)}ICmynlgDDMo7COBnqG>F~)_cOQ2BU(5KCOryeShdk+PTe{^BNgR*3 z_k#vs_^q`kRp!cJ;&>}HUO}>1GkkKJ*LJSZPN=}0|Tn_@7g(N%RxN3z(>(VS45~r{e7i|-dv!$6F4|s z(B*@i%7S2e%_MaKEJ(+ri=&7&l9Bz^y&HzjtuMYo6e%d)WaA2XVkr4rhj!MNmYToO z(6xzbKH2m}G)TYEtCwE@Jr32~_vnCnIKykXMZ7PHiydYE-oEo?PuI_Dr@BvDm@K4v@A7AA8=UWR&$7u~j(Z*dBEnJHitmrzrWeoGX_=2dz5|3+U@%nDSt@rOz|Bu1o$V$j&k)>$P2UuJ@ zUfXxcodr_<;PdMzeg%Eh$)cw?`kFa9zv4ubNWsP29IXNS_8jW3aynvp4BL+wlG6e7 zQ?z^;SvyA8&}u^SO3z-VW#{cdP9<7_KH^>6LSx|Yz_K zQ$yUM)bCK4VoqlSI5_)D_5BJ%G)!Rswb135fI*nY=(j_4YmqJ#owHtQFIR#?sne7I zL+@kyj_>w?w1UO#tV`#&heUXPuek=y{3@O`4rk5R9*@C8h_%nAkFnsAAI6PnLXgMX?RT>hmkC7MI#2( zb?ovfnM%q&w5HZo(h5NvaFpJ}Q3>?L>Deoz7}<1lb3KYlAX_JRS?t{pI&{$$60ijC zPPEf1%J9x-9rk>hhy*c z!C@yJfr)x;j=I2I(it0*bpD-R&l{B==}#dy<=HlDiYOjYV_GKr;9CO1>u~B!+?a*> zbDw=pS0i-624e@sU8B9fy73esMw4GeyPLkY6pC)mDrnX{`2;K(9BqRUiHrL6js}l5#*6o0U^k6u zkoSiZ-qAYG@}yO4>8-&JoS4CD1`hi3oSNW6wM0!PO7zrQGw!Z!t>5>nAZx3GMO4LE z9B)UBeyMB1Z6pzdjwxHJ&B{c1s?A*E*R)^@%&l8N15Unmgr6lZz$D!BgxJf6W zlghRNgwfb>z$}kOa!XQZ4sFB-*wifzSDe17GQpncTye1f|Jxbs72hVmMyAiU$r(VMGu^UdDu z&Dqd!!`)__-`qOJE3mjV(ktxtc~WfrfAo7`br_*Dae~E7QmWPuAYLZjUx_oeM67o? zt^nW31R{Z>S?UZeX4_?gn@k)!KUxf~9w6Q^l=bv0ApIp7HdK!^duFzSJU{wR9lexQ+c{@6eSP}aJWr6*>>#y}HL@xGV6 z!TJqnCk@-Q*R(vCx*1V~fXV+xC<}S_`dm~6MPN9Z(e?1bfqsGK9jLG@c3Vx<@|Kmssh1x_#I&=i95nn#IiA$Uoj_ z1)HkAX7|GPac%@Xvk>JS-w4!RD#_z}O^7xe`bd=$df2Dzz+Oj zcA1x*TO4ul371S!dgc=^Xs;*AgK%t%xT_0eMa%{9m+1_biLtK_{tUpU$hrTy(Z?y@ zw}>);fvKDm#`4Ewe4*B6aRqCFt`VL;)8W+uf|VjvJ4e+!Uh)pA za5?jW183$oY(32z``Qt2JrDTaRWRa*#`XZOer~EwWMVaT>#&!scL~E=BV%=%{t{+B zxtM3m;_S%9IfB26Fz7f=R@H1;W5!5kN}lspqysnns7N1tqeug?M$OF>wxiT#j7B~i zt+2b}WfrHh;(8n^@dopNoEhCeJrp06kn5NavDOWM(Dd89-1Ic}+2E8Uv~gg4rj`3v zy_3Q!{<+O-gwV0AN4ZkZ{alOfg3K9>ma9G%Xzzy)Q)RwYHq-Jpy3S@4zy1RScWybr zHc98mb}rk0yw}Sf2a%N(>@(be#)Ar|nT9!?!mht%9{hYArX*;;e0RVN8v_7G%Zw7` z(7(|nUtNDJ`4Cioe?weG`S6NXsLwx6+B*EXq|P{|T``-kGIWj8PFMKo97t%XmM{5U zC3_Kzs^i;`gBhD6GJF*CSg=Y2<}izj)Z-6W356@SoH(o)v??1*AK!dOAYk;xelz!d zSL=wApZeU5@~Q%evklGYN=Fs`E2?NJnb|-d6AH@$J>!%h2AA7Yn>EDI@t+7m15pwz zEM$RUp;nbxN)Y(eE=xGn>*JA98P!fbxifCt=zH$c(bpZy{i|gs;5y`lkN~`YOwss4 z1nva&1n4kDa~5y8Y`M8}-4_cakG;~~(ZWjKf_=O@Z_f0ZR_hx+X8-j%y6<>3Ii=$t zaps(m>G$bkdk<1vtaOc1LsIG0E7iZ`!(R#Lo$woNLuOrrTY||sxt#HEarMZ!UNTm@ z(;D+w0!)@g#+$}wV)e35UK0%LuAs2_MmrXFe!>Z%Cn-DceLQ|LuKVE84z2}@IAYf| z5G6j|j2UY*$M1}2kvw73ux}%nN~kVMSdY)vilQoS_~fzlQ(+;|cY!v)EET8jcz;E% z8tecF;%YRJ506t24q|oI0n(G_c$x%$&_Oi9v$o%XAPf)fiMkY=DK>1(5;!oMpo!Yf z{eYvih2QeZ^zNW1x1&}RU;aVTYPEVChDXA0E%DG4H{C=c@m|IV=LW%@Mq4)d>fBnX z8Y<__Tr87)vv(-fc1R;#Ol10fXvXu@!{n3*ASndJ>&DX35hx z$7h-xf;v37XVety%8Z>Ly-o~D5j}hTF<=0cS3y+z#0u6+zuyUl>h0^bbsj@eM6kI| z*>OK?*sxV*{akd2&+kGu*RN59qX^?`poTT@7cu$SkO<4yT zob(qu_zQc2qu5+kI!)zK<6iDwEN2*CzTGzJfpvHdrSjpsYyO#o7*+#) zZR9P;stzUp%yugoJJ$7~fc-m4&ULxY2yX@znoli%IRH&CxB2!(+~N3~7`03xryzGA zglZLFs^I5{1XHFmD2>3m+;l$EP4oY7wQf0G%IVXzZ~r3j+a>mo!w#bng?k(XTMVTi zTj8p&Ewb+)^p2clb;^HTkb z@mKUK?lqgL_Ct|yWyf9dmo|5=mnV*0xN&}8s)`a!`YHA{w=R=U{&BQ zWmH&)!xt6|L_^(QTn`uu%K;-F%EW0eYlc7~Ogl+8f?kG=QWVY#b`HajHaiM8uzrQK zyoB@Yeg=rJCX}SgQ?EIBw^R()2Fx$ z1^Y9q4c!i<0}^s?R%#z$G^uWTQ+v-AVkgZ3TaCBQF>2cS1)o^>PSO`5NYiJctDQ$S z?{@a2Wu%(@8A^OhK9zj&?ay9rz8E<+nF^*Ah~_q3Cq6tMV*%z>j{@H8-2plbF-!2P zt54n?{E@I#`n0bed2jE~p9GGvNCU`MRbqt>Bdo;G$-sBH{Op#cjN1wba6YN4F2d~7X{ z3&NpZa}Pi--m{L-!)A(yg3&BkE0WIZ#giaw_+`iQY}PcEY0G8TSPtXW^|4+LY7FoX z%#R)W3YqV0kJg_!i5GT(`bx808}64?5NL$|UbYh}b8am57IDawU7_^wXl1Is#^#sS6AY zS+SfZxpgO4AT=5umaP;Mff2;lUr%(q-GQ>D&aHiwbr{WIR-l}fsr^zgCjt;mtYPJp z5l^3!_E&&B-4`_d+)$*QPV+=QwrgO4@Ye|-3qzLqHuY(qxNANzB1w_;5A2LFY%-4N$e1 z7QLi6YM9Xb1&o@Kfc#h#uBJESh6~=F4!ThK#-FRo5Zut>41?b%H-#SGZw7Bv3IxC1 z%o(9D*^d87`+;6>A^@*me`gbSEh--?1P=}^;-A2w)#8!$0pZ}#lMVLqUQmst3XgPMSOdc!0p|u_m0iTXqXY1E{ zT<02Yt`?m`sK^z^o;2^wE%Gnj*`7_+*OZMAzBwRl_p?A4uk#q+b{uj?ma3qf792rD z`XceDOmFY3GUE%aC}$H(#~U`fDMT@AXxfBa%pj%bd{W9H_%v*t0-0n@QYTqp@9TB!T$%lChXGungJ{F%N0>zhr<6R6qHYK$W%okgq-urZ^i!udd^U3nzxHwrt4SWP zzQNF%KvuWK=YrpPKm9YtnwhQC%pL|bMNCoBL=^u8g%mFH9fNeRIk!i02=&7#fh4!oMA8+L+rq)mox)mfkWsrrcHH-SI z(>D`_rLpPN#D0?@0YsafZMfZznSu_JOcm=B#mLw+sUT@!6iR%3eguY z^U@NRc6)9~w1_(!cH#NXyD%fdi4wo@!8}$E<5XGkS)X65EGYNn2Ft2m2lT@ zoNRgdJ@49?s$3sIBANyK(+C0x0k0QLG_0#C;9mOnXO<`WYIKeaE5c2dZ%+|CbN)X3 zbPL+y+`Y6EC@|j#?+%B7H1}$2J6i3PKy>&X0D;W|Rb%)B!37Gz2a|vtK1%WP!S2CX zCK7p}^P4G<_U!olh{cJQC~zcCxFdurY$uu7lu|IGC((-T=J=u;?z}!uUQsBgV792$ zuXrT0zAu~hnrM&0IbmGX0o33BMik5Bh%(D;F(whVHmWM{8y2|)Rh;y3yDu^8J%81l z1LVtUFJ=#l8@)>u(zVOt%tkYwnXA7ryIKkPSpInIFmt2V!SOScI&YNND}3y% zq$zVmD0ZYw*I>?cIN6sdXdC7twlF94HPQ&}E7&HJc{$L0wO}5}svZ4}SvRPG(XguQ zJh5-GcC1Q)Ay2oSggre#oXoE;5JJ1$lFByVc#LB-=>-2VTkjoXs*>uV9yOnRxf~&M zS6s&J-V^yg(?5Glok(|`Ng*8PV{KzeY_1^9YMuYwz#Zq|J@UHHwcTk9|mQxw5ATjAud z66>2XW>CC>zT*Y5$?z~U(;lt7!9F~b`kNC&FJ)r}Q?X~oJC~zvn#=xo>JMifyuVbq zfJ;6XQqoGB9T@s-f^`R%0T$kbg6jbbauCBP%^#!R$Y|g3RaV(%%0b3iECz$Eg;r0B&{CO zzsKw4XYz=6Udz`YqMQo6?FXJZJg@<7^YH@x#q6uYF!iMLEi9_fb&RfM_w!ga7tbl` zU}K8AKdRD&f1j;3ylixrTOoRjw@Yt}P|gBGGobf?M^Y$uQdu8C!u9Tzjax_^4WhVY z%jJYYiALAStRocKX@bT&1Kn(dscP=q=eP5%--c4x)~en&Rf$326;(}?(1LXxjk*k+ z6?W5}(fcAghr}MnLSQrTIrtlQ3Z-K|;Z@&t3TOo9QMEFsv z0Ubrqg&b>OH9n?2?X~+s7R+C0PzVu{mWyVC3kGYn3|*@}UpMbhk_MDcT4LC5(AD5y z`LVg2GA}eXvAyTYu=4IIjS`(Z!36p~EPSJjqilwyv03tHUu3eF+|RF38RNwUI(QSd zhWy1~cS#kZAbq}9KR8@xD&r*UezGMiBA ze0)F2^#F`>R}K&Qn5bJ6l7v6mgFXy_7c_em7IF0^^+mbr>bk{P?wAjkS=S50FsL+( z=Ce%QYM9Do*@KBVN5^4V(3uJy0|8CMnvGWTm-Fkp$iucX|AXssL-!h!w#_Eng@tgX z*i)h>iixq?bB&U{8f4A*Whp|1{{o)4?`<9Ie5*6xGnr6vR z262WNM18fu{2S552{JYM6AE@Viq;$C% zN#qIZiDJF;4%@gAV=uW8(4R?{<^2p0mG#*#bG?l(7acIXBMtCvwP>PFk~maizN{!)V5xgbD6F(F0Uf- zIz#UT#vCQ44&zhpZqMrWd>5icsM$PRt}PN?9}c=OosMK;x#Tf@e7U>ivJJNz&%GDx z5zJeL#Peo_jzt|@t59h(;3EnvEa^3iwA$4mPBsZ~-WCC=awX}{twz;tGM0KY!v3^! z+}QaVN|)ZwWF9&*)6wp{%4oLZE%u%obaeu46qPgQb6|cwUWvkIe3^0y5eN7DHN&Ih zpKsXDZBHHuIcq)EzeRdJ_2QyY{lxF0V4r&BPulSGBT00*ert;&;qZ&vp*6omjaix=A5=Gkl9-f3ESUN1WZdWzTvhzNBz zEyWsgmkQvoHIwNZOA3pcYzwsPE;2O1-ZAZX|?h(qM*oG9$5 z_qdv4HbUiICmJ$2suC z41fF+<)wiBwf_y>Doy`Gq-9BvE;Zze!=_*N3P|=`cyd{4BOM6Lrz+_o&8`6A;*RBn zwSuN~KK(}GEPAR{J%(;xlikqn4HcadiIMoiY>P0gZ)Kxb=!!VSzaQneu+1G?&!s%9x_- zcn#++5@{|L%OjrR;6AhsVR7D>%i37yHyd5718d`*P%@{UJm}@G$*>gh7``isl%2_P z2I2J?F|5h~S~(fteB^2CN>dS9+}Mo3>_vs;P|^oC0U^?A$~#Db;ESQ zv$hmTrDRy}}2CNJoYv0fnX-zL0SK?tg{^%` z&5mk)y$vT6-9EL*{3{Gp!V0W$9|%2kO)hF+C5X_7)g23-z}wI%uxG6G^?vWrFIm&orwaY4;_`*`Y1#*o>2jhLMhi`^`9?&{ zsm+^WCj5vJ))*{nND=21Msio4k-`EF*;HINPi8r+h||k2m*%~UGPtFwsLsyEKC`NRA=wB`(C`D=QmZu z0KzSq{Bbtt#If{p+E`DYI=ercxvbL?IL-EWG9o`i8@bB|09%D_PvAPz7-NW4HVOb< zVqHI7bbB5&tGw0o+!f2h`<*W+r`mQHi7zK5Ep$Q5gQvf;8{Sl0f1oZZHthzb>Pf$sq!LJdg+ zs(AMPGdDtr{lv}Q6r=esV1HLIv;o!j)1jx7pjXaq>dD)WpBu&@X1qqP?_VSFw((I9 zy(eODur)5^LfBUoIOe;1=7wG1xs0Ay=n){PNXTYNoGCuUaf-cDXHQ z%OriUL&z2C0S}~8YJUrq!2M4k*iLZTGoOamvfi0*#k-TUg9Sl zep3%t36gwGi^DaY%%tdS;oEH#USc!%_Pg^$=QRfS7s)ALW@5Vq`OKGV!)6O7E$AO% zG2th^6^}hW?p_q=R)5$#;mI9VA#ifcFPMje4rx?B<_j4P10|z< zdCCk5xmgPE-y*$IXb_^NG-EjiF;zPgS?incKbp3HWxP%lQQta0_vVBUH+qxEX^NJB z|9pU0$h0RII7XIkfTz-yVL$F{_65b4u%241;hU56#Mm#u{g+l{yI&99Qmhc#7~HUu zJh8vO{iDed^>CinM!oOK{-?fc#diZ(%O!|B?Ms|hVm>sD?-?#)rn=}-cqbBg%OEi8 zOht&(5IAx(65FPifebgCl{0=w#2uBbG$$%~XI8Vjs6Cf6P->XW;!?fb?O{F#PSdnj zocRSEw&CH^Vg0(iyYjst$hIf1AT-hK&=>oId&ui`vgkeZXiGvvX&J3nRy-pv@rC_Q zs^%_8iGN9;Q*SVq*G>_#M3z0eRk5%K=Uos0sSxX58IIc_X0Ao;BZ3fIX#p}((f7L^@rsH9VOL-ZVNR*ox2u2%e zdrMFEK5GK+W69{?KT@^}(RpvasBcddIwT&!N*v}vUh0o&W%#ugX&G33^q_W60j}pH zX+m_se70OR&1QHjC))D_(q<3|6@sHy%>G4UE=y6)O%GdYnNt!aaeejQPc}d|G&VG# zk^VIP80g`oD}tWPMkVyB$|fP#W&?TA=jKScN5dw4<37>|_|N#mgWxrOaj*EIM7>1p zg_~Kt0viqPT2#~(KQOENn28@#s3nil(wRH$OeFvze~?VIZtC}9C`bEE1h+`t(V{v1 z-FIZTIB%SGQQm6|LPoSv3c6m4ms#P>;Db5eOvx4hr%$hq+wtMExPi{bA}~&%v4pjQ?UnuCt2@aT&|C<-V=e zTIrGuWHaBw!j8Jkaglem)}~e*nu_!$We+<#@k<{wn7M8jdg%@Uf6Mk8%;ytFUuKtN zM;Yi?=43jxr2BhBaZHrj69x7K!JMf%6nml$B+zW*F_k=j?IPSyq3bOLgqw07wJBeX z{EJ21vNnoF38Jw?{D|`MHc645BI`nKho_HhW-5}3Z<;w$Z*pwz5#P%$MdTtr*8;GLYPy&vnG|ykI^!c`{d$*ejrvH3fXt z-QWt`1hu9!iSQ9fYv!f#7~~E!m9?4JW+W3g2h~n%dmztd?~sR0H0z{#Qd8z&>fFE- z7fTpeu*GAwn#p$;%Tvh>So$m?>O?j9-KotxH^lmBdkWZk+3nC+Up69xT}ZN4H@d6| zd+kg;_rwnb_Pmgd6Aanq(M}UsBN%m|c6&^F(!pMavw!FKt`jF6 z!0$BHJ`wJS)eMyVB+Z{77NB!^E4KOfZ*x%rN=Ck-cY6Bqb24MxSv)+07Md zck}JJ@*s1AiJK=sk_fM2xFi@@&9EJJ#uoh#ECx?De}z3{j~hssX!bn$={b zgP`puE(?V=qSIm#Kq2_2wHjKpWN6V@HAt{`C+=DZ8K3W3j#Bx?jUvyZb#)&EwA@2V zyg(zwjmPUZIYh8VlEJVXy*{}1jYUvkwd7mz1A-UvYbB7rL>#PHKGnde54zp+s=BAm%AMn~SGc6NvWt&aIx{LtSXOuLr5%N3??o z;@fE7xI=v*Z^fYKzH}n0t^Ph9a1vNC@L|4I%A|?ngU$!xeyVcwy0j4r#J2lwYqI0o zt+|6sc|8_v74z84amB`zc2)EuL*@ zcW;I}3zUeFw@%QON;4EZc4ZoDi@b^1wyBh$M0oUXTjA$hIob~zKQUy13l`uYVtpqg zZo$j&HMkDdPG-Aldfy&x&V_;Ihe87H?Jq^zBKKlN#$}t5!UaG~ z6@C2dGVk5$EsYiI^S9uFarQeYY4!GCX18pBglY1ge7W;Yvodj=ieqj1`IhKZDEKaa zmCtbeQ!#Ehb%bn2H=AA1*2ZY#TU2lX(mie7%>J6exrCAYBg ziJQ~5fK+G+r9?fiJ`g5M)AXzG^g7@)1Zvmr@xTg_Zovi9M?95UtnIBI_(V|%|70HM zAI>lT$n9nfeEvh86xQSb^eg6CB9fPO+ z-hp)!<`Sno1k9K@WAbCbvZX!KUt4(8M{qWWNJBMkx!19Fe?KPnX>PuW)lKHr;m8_;3-FyFXydN}{%b9OIR z`QKDcoVI%x=CA*y*8_iV#n(3b<^C>C`|t3?|43Z>J^$xs<$wI6{~ImeX_$ZisKauX z6a}|gDmc;=_>Wl6!v-{-0jlDQT5zp-I8ZGK>Wn!6Yr#@X|K|tBeVSBZO7!^UXL0TF zciEeh_89-4$h8ylGeLns2tR1Eui>9JkufoM`jb$>=)sM5=SNWMoYYWXTbpr0ge(hQZrhip1fAN6AFq>_QeZ`wY7S)HoP z=dtMs3*~mR-MPKOG6J4>a-}N;dHy7jzyVhx8t6U;K(zn}Ro8q!@H{(ovI z_@^?!Z&rV8>0VrRcF)H()q9^Go%xOUqZ-{V<2!<(+a3%!&RkOXt3Fqfj5?z^;1@v; zI*WuDd1mrRH=oS$%xJ-DIp7e7gpfa<7;x9(wDsA!v{Ray4zENA6EllOvjJ({zn$+( zy5`Ndn&#Iw;(YktoHG=WGY6<0-~CVtM?<05wI-BmfBhU|qbC9O1c(>*uwxtMw zRdqKkq~q31&|L0yqubdxP&vt3<{v9lDDHKDIxT}+2PV~ww5K7uwt*X8zI%@pOpaW1 zjlU|sM(d1G1bCA^mDdt<+f1o*3Q%n-TKi;8mb?G4;kj+&Fa5b24N8gpb{73pQGhkk zI+HR>IjH?NgnfR!Y^DoldPgp zg8KH25AAY41^qd%{UskT_op_F?dAil_v1A{@>8f&k$t*HI=K9PUTDdMk?&}o_qS8? zb=l^5(ba(Vm>u9N7Ore`992;4!-S^EzI?7YZ?1Zy3Ja2wRidgtRc(;^l=-6DX|^Y7 zL&A*j9-8;-sQ&m&a$4Uy3lu9|LnHj3X7^8(zH#bNi)Q!Xtcix7M!6-pWxCiR6h%I$>m6_aXKTA?oALGx+o zQi@*O*Pd2OOb0##Yy*~>Oyx*$i{cN`^(9o_u$eq(skE67zTOo4W&39&dqDoqLu8f{ zG(k1ygN?Ow@B8Mf5-OkaeaF||{r%Q?6lmJY9&O5prXRcRE;KET zh;`;HF6Dsxp-`CDnHHEd|Jws^rs{gg1z}BTf2PBi(K9AH^!IhSks)W>itO*M_7NUH zW`2+5YaSlo8-l@3t5<4`6pW^Y{KM4AE~hb?zi(q!oz*^z?_GJm!!X5g5Up9g_~Za~ z;Q7tMpUXi&awl{t!Sn77nnAFlCqZg!D2SMnl^zw$( z(vRdRz@kuQTHO*cs8faJCVwVys#@XM$;%vG%yIPQ1bcOcd~iqZD^v&qwm=iT{pY75 ziuc&a*ppBmk}`ph3?(7Cs+pON8=50vz>_1ForFYFHubNKFq}<&UCBSKt#a%==5gqF zyk0!)ClOu*-ur?nnuF}Z_s;Pe{jadP-U3~Aj(&ooEMzf?@Y0FR)jz5@;KFJmg(>@q z!3-|aq)vEz?VFuV0Abu}4ABN7IM2X%4-2STR4LZ?XQ=egC&U?gq6#y5tSn85J%s+y zv2?M%{qc(R8elrg#WXltVud}mkXcXtT-|JvmHzL^qzq0l)ywo=Dlk4EiRbrXj(Vo= zyY&m4k3r23=^8PXQ339lH|D1JBU!I^_$58UxXl&89uaUQ8n*|em-J*&LgO^*VA5}* zWgfZwA0W{!^=Pw~s!@~wLlf)e+UaSv750tvqtb)mq{4r-flTJ7>tk}%ce%t{GIYkK z)HuA2S8lvC2_!_?gKtkJ)ectp?;rDdj|>CK-XUfAJ{V}4?tfX5O>6t~bttYpo5`$C zD^FM~1>Cj^B2T|Dt5aG|*WHU*q5=xSP}0m-5wg#F{w4)$OzcmR^pV<<4jKD3RIyQLv4W93|{}ThXcm;39h8a-GGtUGj?k zHB$9V#kfAUtL2-C^hAh|&2N0Ed${(Qgg&eMJKlb=dOF)YEre+f`aYEAnV=Y7h`rfk znXhSy(IpWh_&Id9uA;7i)}P~uPINBsl(d7}a7cT|6Zr4=k&q>_u5 z`5F-b4p0_7LZLHj->~Me=*PhEru50qyr~GDmIk@29~~jG{tppSB4#+NyJaT^V2h5T zRQj6sTywWjKyA^{EOAx4UY#Fe+s~(|XZjwWz7Yrz{}>T3!?!UuOl;6m?Rw-~)Nuc) zJo%ICUJpG6d>l#QgB*|cE|ie$I8h&b9wG?+giJ{^ajLR%^DUYoKl95xk_FyK8~VsQ zM(Q8nRZ#jK5({5ME5WvEwi17DJd~oXh>kJ-sX`qwP;e+laXk;S?1T;HqHCi~oxx+9 znJa|i$?L>mSw;%!FdBaliyx>WwdWaLQAncq7OdEiWs1BM!-T?f*v(u+xu}_df3(?{ z$j~rf+7rT?EPPb0N;Wu&qZ{g$<~A08;o(u8w`7!0D70=ELnt7s>Z-1>c5j_O?XKPc z7X3UOtt4dd9^q9hEB|;ajy1PRHi}2r*fa*n^Hue^mVKPz&-k1+5&un+ct6~2oKRmM z7w#Cq)(Bi7gJX8$=I#aXZbCgxa3XD5ZeJRjEad!a;JJ(0&)ez~Nsqb+(T;jiERqA@ zR>f}Vhw6!}5}J6^^z7~NX1{Ke#3X?62PHGVuZp9sc4)>Rz1@z~eoZ%Q+JPfN<@)+4 zjfiSIcZuq(9?R?`B^a!APKRCOYZSef`g%Y6*1gUlke~G6IPF+(GCQNBcazUFmwJE6 zZe;<)A|6}dle8xg&SGxxl~jP)abqZHAQj>R3v6`vGf*=>?$g zRRKD|nCrGf`!odgfcSSukPKJ>UUIOQmmvFtM8}!q=-1dPHtOE0>h27sT}0KD)268R z9>eul_I}WI41v#RWKs6AxbSAOGRj0gk%1;sR@Lu1c2iS$ySM z;r>jYmv|L8JJ<({v?>=(;q689)vAmu{^Di;7m%JTM`bkB9vt-PB}yW6GTzv2*92~- z6Ut~Dxr5-XXwF7{-uYr`+4Ff1p>N&^;1TrKYsscn9z?S(?`4%|kQ)kLt>D$`U)&cD zq0V8nVp*k2(z_=UrN2uGTkBJ|eS=fY0sgaSURzZO_afm~=epjagy$JmJ=?dPyNJV- zu#bwQw9RdkvlMhxEKSG%Q7WFbWYJyEo=N54J8n^~#zhrtWIIXrq*{Lu8?Zv^5?)mF zI3U=JnFj3h^a{1Bzx~P+TQz=kRK%x#^@0!1b;vWoIjx&5*TM?(Q4DD5V_O@(Q(!IB zqnCX=>pFt!EIHo=XklVa`Np#-5(>S|13`= z-?xB!3%Ky8=c@R(eY&IcB+7)_^0ayr7b{rr(z46q2*&HgI90v%0y7D5xIur)(Z-`u zFfp@xZ9n)5Rk;w<>bXibR@3<{56l#8sXd7k3&5REKAwv)NHyCM6@6M7&-Qk%bK3d} zoT~(z;EJ0-bqQiEzv*(ZuxagxB`qUZYb~+RM;3 zEOMUn#o+eEQk>{;oAE4esSwH#9E6Fqk{aYKWv8~oZX#$irS?Qw?H+t(3M2{*(JxrO z1MEi@2mVjfOESqBz>9AjjMHNa!djA*N0g!)FYX_Khc;f71El5E=@U%VH6 zDCXrM8^b2Dp;KG-KwuE&GZ4kBmT4e@zx!%#bJ8K#p6Ba4iCVEn!ub(R0h|en|N3rC zNswZUts!f3Jn#=jFMTdJa#;vm;4{0xVoMGL34IE1_0E+6srDg=7w3GdWkb6 zjgeM?1a%B5TzO09sJ6kFsSO+~zp1s4ibR|PQ$PYL=iY6`X_c2NSj+2QqW}|iyg8TL z6I1*W$kl(FxM<=I(3#(uGUki-2Ydy8ubsy&!s%!Ah^~=pf zFq3TEW1RPrlo1*k6z@BQ$jQ&IR~8~zQSddcJ+Q(De0xH0P^$-{mSWI9tQSOhq)1CJ&@Zv`SEnJ+O4mMaNSrp zo8mCpM-3Y{{iKb}z$j|CE6w*_83OapK>-=@C8jaGEszM%ju z*-OPjp;H+HB&Z%$Wm`lWeDukD#cQj3CvtQ+eH=s-kN=9H%E0mW;Up(Boy<$bDL?+U z-E_kqK#>o(=C5O>dGi=t{aZDM(to}=^ZwFwzHe21^aWfw{b@=rgz@6Fe6e1!M?t13 zY~PO=3`fZJkib9i@KCxu`XWd z+Wf=C+YB-=0DN-E?}{BlbMW9&*oyg6*xd*yZ+)L3M-WI^wxKI;;s(!c^QGV)ZOtQO z{(6i1%qRUT-W5C5ICJNZ@jcEOpxqLReXoMfVmC+5Y^G4M>I91@A_qS&E?IPpo(UhA z{MlUdfb@T935VQqn^ zl~Co09Dz=y8AavRE(NNzw0eANy4nI$_ZIA0`~x68jL46(9*5iLHW@Q=o>Lc} zo6BpuDX!V1`RTZQ@FZh+A+-LT2C{J|6R1*l2`Tk(E)7749G|6>Cp)T9atf$Zd&{Ka zIq%`@EHF#@Y+%HG?`FG|I<<^U)Xi6i^Ygh0ZM~fx_109H$_+Y0Xn(qMDqdVsea-BV z2Pus+VUCd}&|{-plilu)>R+3OeJ)ppR=u4EyeS%})8<{x9TqePAW8(tO}9YFPa#yF zCk0&arharok8ZZ~4T@b&bmMem9E9BPr%G|F{2O=H(Llv!hdzy`7bga#q6hH9w~bt~5#!5R`Ro|*mN%GraXzLLc@QnM^+RfXg+2v4`pc7jb?T&gLuXQTPDDQ7YMWv{$hQhTBt9obNSSk(HJd~mBxr0Q zpl<}5yYlowEH}k)RbDdt-nx)8ATShT2JM7^eG#0y++`Tq}H#2mNJfq5``$Zc~^R~Dyx&OqVopm2xB{Qm}pz+-TxYC~A2>0IJ z{R{T(i9)NZpWA(CyjIVI4;oYVgAm)9Ei6JGshJB0sV z0HI97uz|iYpGh9?ok8oXhDWl)-j9B^JM53$!N_NL7SW|_a#;;a%F#E{p8_}88vNbX z+s&i9cY{UdN(d>a$%4Z_-TMVRVvfza6_!U_qB}jMCV}tAx3|{$LM9iBwbR*lQ13Em zTM2NHiwnlDIrtg+#jN^K@C?N`X@NjY)_Pu?$&Zzwj?JLXr+K3#M>%*^WTQ-8-N>(k z)v)$+2^oQz#!blf0d;FW04|5l4p66wBsH>0GkiHFg>rLCU)Wl|{2+L@`O}YQ8U?+^ z=U5-_B(av*#Ve0vGZ5~bzr-;ocdw$WvA&>COjiUw*k{;~m`D?t&(PvTST8}{`2a9j zVpoy<{&WXbD_`qp`n9CsJ4tM7js@=HIk6Gy+4^%7m@KUUT(WAj6Kvw}mvDL`?W=TG z@3&ekoz%qO?Blhbd$=APIG(nfH;?0qhL2$a&Si*ytbg0H%~skUd^VDzi^_38o7%~0 zruVKdWw14DEKOwoq#nkg+HiVHK>W|`=(88ux-GVbSw4P?? z%hd$$lm5(@9i3g;k4Qmv%Xn0{oL?==saqdm z%O<-|3pj9_J~E*P8%wGjIO|7N6GA6P>H63@G~jl$Z3WjC1vnLz6(+$(*9{h>8GI<1##JiY ze1j46)&<69oBqH!iN|CsQj9C7E&*@3_}zx8d#>v0AQQ}GC{V1IjKT9-pU>37s7FM zN2@K|KREKqw#9K1aE-zwx z_+KR_1;w!^@45JW2O~+fK}w8HpLQgwp57&)hTi10cXsSX2>X~-dKa~y)ycLaX$*7 z`w!>)8&&FWt6PB7sgLZ-d)&c8=c#8KJunE>pi8{NRStJtsZ}*5uQHt1Fg5uW4MVhckGq7-y6ldvhH9BMbL-^{U#)ll@xl&6QH{b z05d6uCNuR*2hl7@o^}pi$UoWCJ5&g3cYJOYBY@w7R>W_WrJ=U}erb(~zw6k5d+5Af zb$?^?=h>A(;!vr#E55*Y{2)9o9k0d-QpqSDK;#pBh}HaJVv6l`CGiA>FYsfp>FY}F zeEIFLo6BiHS%I^fKisFKCM@O{qr+pmx9%tA2~3UeadxWao99&%V)uClk%5_?BM4AW zOUOE$YvWX||)aMlWeR;YkHRo9YuYVlw-80DO zm)OyPC(>utj%UEm^AYNQ!;Ow=IMK|!{!lW6X>g7lFy&3>3Y=ykKr&3qZ9Stj@?J?9{b1oZ2V`(p4C21PU$;JN`MwC*t^+R5FM*r`LkIp!uu9sm z_92tSd5CsJ79#?Y6bc=%!e$trBq1* zZW(HiU{vqd0VL@Dyk>{VVjqrmtKA|l3AUT_(W~btaFX>hEhi*Xk==}S%Hum8m^U)@ zfLj=zAw#qm6d{-qo_iDuwU|iDpgoycl)CSRt-;>->4w(xWZO0owDP1`)B$?W{3z}Y zI|z3Rl=-A(;9;~?ReXa><|x!cOTQRMPAe2`j7^oWY2!_x?pmSDhypU<8*Q_PTWWWytBA&}i@gZ~D364w@+V^ruHZG;l4L3#c; zwN59~q+gL#Lf%3^CER}KfsS_ldS1}8QoBImT!wHMUqlBq;~&q}dv?}XQSOE^Gy&Fx z)xfj&(VtLWNQw-UYO|!M1I0HYg-w$VhgjrsWXUPnx^Bh82Gl9gnqk$s)12|Eu&E#u z83bmayLc~_z6~I^H=!l7WQSN%S)f&W_{HAkAaj<%?|s%$T1J{A)&DVnmJV!JR+mj8{1`_Fhej>Med+F4j6|L2S z!xO0lZRaz=U7~tS(U45=x>EQrl3$Y!Ev8b+REK@XK#?|H}P@cMz4S|5vAy36gNscynu3bRlu-ePK z9^!U=!zCXmSGupaznv~4d+_D`;`+gTr5T%bgmqRl7}RE~dzER2J(fb*b%FhbSpgP! zzeu;fg_eUBa-7B-ktlh7=TxD+j}d)ce0$aG#mtXOk+35jRp$HoWk#b<6XrsdXfS4n z=sLpOwwG@cPDYYEy0?WrI7Kj9tTm!hVKgC7>5kgYUJr0rBh=OZSzYev6Mv3R_*m`x zTQ`TpYOG95!m{lLY>8jmasGsi_U_clr zH5z2rz-ZbYx18`kGYLW%W>c9G|9UgZvX0U0T{!sHG2$58!=%rp(>BdD3LT*$L1BUe zSINKk+ zsnKydh2@Fjq~89Yg!Bh2Ru>>)=Flw(2A$d`wq9OI4kN?u%Yll8< z*H?cFbr2M%A?Oz_*tt~8uQ$!EtK~&s(kP%zp?Z27V3>lE=%fK+ZRSj&*pQO0&nE$h zqIGw1Jmvcn&-gI4oSI6tN*#DeA8992E*c@GY|&=!f`)($Jh* z3_|XXt~Hf(88zUbp0u|Hapyvi{eDxmnMMPpc7h=-hDSYFy=*sc7HsJXJGkyyQk9tHu zmgnWnLE`#6Lq0c7Wti&QK6gUA0DQ#MnG&fHg1D%WCHb1>6#4aMNdL5t>EHh80#}XI z4*cDo@L1~*cE%Ar=R6P-dedV>Jzen zo`1EuJ-o)zIJn9dU%gBM$H_Zu4>PHVkBUcnwRh0%kX~)~wbZS!HKZn%xI=lh5_h^r z*3P%P?80ez&Dl%`qu#%0QFOS@s$F^vzi?fVE8wb*_G*7)8*}>Bq(a%Nj}A#^=dB1o ztf-l0PzP9_K5k+M6Su9;@oPmfUmH4gxA4yvg%p0{apa|!s}HNUH|qTKqP3A~D~8zg zHdPwkIp`OC<#WUyxTjrVEKctVPfgSDFAJ>2@CeRQ`aGX9cQpv~K}ptFqm-_Db6KT& z$}5({51y^eDh&MD@0?dDoZmUdzr>8=)LVoW|91SVn6dl_;6}jNF8Ml=P7Ii?3Kx$= zDAAM;XP?JcSS#WFDHP*Udc2$Y44FI5j4mKy2%C2T9VMVSo`B!h2u6j!Pl``_YADkr zkp}9kQtxvvQJf|W8*6|C+&uo_NO~_lAGDj?RZ?*4?--(>?d<+Dr4(ogYL9!{htKHO z;`1@oY;$L8-hGFi)SjldhC^=%b~YK~1oE|~Y*_}jG>a`jw9ve+h67!=u~ugON?;)otPi?+zu$3 z48>EJHV}9&IW*;+f9#>Qpsz91|8~M!2iN^{5Axwx+X3VX4AhtVnEqVfuN24rbcp42 z`jo}D7g}0foTyV)z!&hN-y&|l$p+KQs$e@L=W_9m2&HGfSxUC1YoVY>UUVZ+a5H(P zMW>Ud^elW6omdLTv!6#cRJX#z_t)Oc)!jz^a(|3q+8+0*6>KLbpN>k%cR1zJq>94N z<@ZkEh^qez-2Q9g_7mg%%5Nc6=d!nWBdh(9lntM&`x5Z@DohLa)(xc2S?qUKnqF%B zzMVOXmCE{B0L-dvLI=p>P#5;ML^>(=&IRKfZOv60ADO^nVr3%`U(A~S&dH(z*LOWg ztA%``;lI@Hme0o)vP za&~?`69w8UpSg;Zc&fWWky0NB+dN4OYTj($I<=MT3E$8C!lZ!T6UBI+<86bGqyo*` z16v?nw>y3qM>KcdT-N+|4p;0~+o=y1V#ak3^53|5kbZa{kZ_y6;4g7iQB+X;StAGow*06*?|v`F%Dm*@W@ z)w@FkEZhyYNF_Jm>_ue!6F^S*i_^JB^M|Zwcg*#lxr+=i?ZlguN^y@t^fceOv<;i2GNlAoF2yz_-D3--7yBi?n6@g*s<*64wr5rZ< zzQ^LN018qLG&gXm#Y(d(p9;5buQxRwxywJ!CTogw+#@_)cL$@vVN!`dZ!8_;R!6`z zB8^!f*Zi7rQQ?~`=Kizs+RQT^1!pkU~wHa?HMR zHw8JtAFsO0B5a=wq=@CDnkl7&BHf{@6ICqR+eDyVl1H*b=m=Py_NJj)mOWqj>y~Fc z1#G9D0hQp2YoS^`YS#1E%Rtwsa<6Y{Up!RHE;t5kAE4-Boo+CqPDTGat-61eB6J}L z_{8BLXTB@6MXqy!XRZrWT5&xwoXY7k@4qkno|FfVs0?s;5Q1HzU40Q$MwPhu^|ct# zkyN?x%%IZ;fyA`y2D=Or|X5c&V|s z0|0GeQUM>Bi_qJ$9}59_|Dk)%14fNL?Rn<4+x>ytWG@~S2nRtCAn0f>kCh8#4whlm7-F6-TM8BHy=1i2fq098QMfCC_%O5!oTI(KuSkF&FbJ(5u7*|s!u9UH z`Dj|=asd|>bgV!%@@0;a!sm=9YeuTypJdI~-+tK>lWV>;-ha*2uv{&;csb57Gtx&H z`lIm+lH*2e7C`633wq*-Wb8>Vc0>enIOnF%u`9?y4%xZn-->apwv~Ux;gz->zU!kU zJx5L!Blex?5LV4RB@S^*88}zcvV>O$kN&#B{|UI#pvYYaJFaEY)8bPL(7eAx4jXTJ zj$z+;af#Uz%_1>Z<2f^hZKe)$-f+&cNtTV#oP*dKp6%_>FE@a2ZVoM^(gCh@WNLjr z8%`~F4TXcgi2CI(R!Y&eBS4>1%e1rK94!sWV=Era?x(&^3KWHBBYkp+iK4eQ~~q}#1N>)bu>0QB)<8UQ3z0f9W(o%j_+%O0GT~*vP=Wq5%>3oR>NaCH+Hukxg2V1 zx_ShTdDkC=ipzrRFo_rIxkk!9$i#N$EH?MXbG@R2x_IHzU4QekvL*8E75`1+|3+^&^1UqsU4jD`uBn^WSD%ySseb&G^+-Lh!U@TUOt?e% z45*d_BC!bm__0Hl!}#Chl4gK&oElhGUb*{7hD6dhB^}VxzRi~fS_mSL$UK?tjo|Yc zAz^y{7@zF^2aI>Gie-kr%PwbaA_JI}Xw5>)G_pMR_DMvX%zF|T`Px4aMFWIxE8KbS z6VW~v-qT33WHY}c5$~3bpk?j6nA}J5zcOD*37{?`E>oen_DOn6>UP%VaejUl~ zmR#-C_zuSPhdvT_(wfahGe}9ZCNyFti+O$m<@w&d6Oc}=-TQS+HPH82EZ$?@u{Ptd zyJ=fzXLf>hfau}oU=bTI)dc|N>?R=H{}1^w@SK9j>uCIHN>(U%XjO~XEDLGJhSAh$ z;Zq3fl}fAQzizPz^x`=@1yVO97T8=}_VZW}PA}ZJy4j^g&=wV{)rr5yBKl%8Tiwyw zH@GS+sZtD*%rSD^U5#FM2)7H=%TvA#DKUlK_bLc@4lg%Twz!HYr1?>#Q!d^fMik2V zP^_J=+@yc0&$mlLuOZ-iu8Vq|ujkf^aaWsUJaZRO1R`IvI2Y{oVz`ujlrW8rvI*Ar zAL(Wrf(z{h97AyzFDVrvO@$+ji&jirMxc# zlmPJt(4g=t)cVT(jqhdKv*YaxzN%ATqvNi-Z@YUfnr}3*H={+41Tgs*N9Q2cEJMlJ zo`TA37LZCm1&99(FiCdkx^9w#Key2n+dEpPrgkHn!h2$f$eML~|KYrQwV*kY!^DrJ0U$C9!3rG8KRE|V1-sWNJElS7~LHpth1 z8$~wZO8wcf9tGw1vHTOMS7TF5t%xNbfOyM$5pl-1sgog^t>Ln`Q&$DZuxQTS0+oW* z8fJScw%NleZkBKg?E1|L&xgh3oFMKj@Q#kmd>{gwdJ4l};(y}_(X194zGr2>`p`4~9bc$unq{fwea0u! z{Nb-Z*{Y3jPeeu3(`QQPn@-+n&UGFQ@(B7aSzTIRk;M`a#Oyrk78oySHeDrpJ*CP= zDF9>|bP_&_NFO-7ErK+}n!@V-d}m+fvith6sMiWgf09`E3Est5-~!KSNElNCiubz@ z6ra^Nu4SZVD=ju-2}K^FIJrBvF)7b&fY{f@h)Jemo}zAzSK=quZv*0 zGt>fIV9Ik-DeZ{V50q{|qv-k`?W0k?<&JRW5cN7LcL2!Yl;E2Qdz`MbvrzeMBXI~O zEfy+JoiCw3=>j$4RW}O;wB6NrIkcwQxZeUJ*zk0@uesBXmi>xmU;n(*-{kCUDX22g z*G~HtP^_JG16}B+a})USXTEA&@Vv(%Q+HXdEZ50o6C5O;M1k67KKp$BW|EKXEjHLr z`65+9yUeTYCmuYR9%FPWZP^qkp=l%MHim1HOgf zyc^Uo0V30ys{zMZNE$*z(s&)VfF|2;d8P+FsPf}fGRVG)LM>|Lk8%$<$Bw1(o^cJP z=^wTX5h^tj5qF2})TP`ZIe+Cx$M6rb9?xj?4ziZ|n`1D6{<{5HPaFp(=t^kvuOF!) zg%k2@8H_lcIQT%aGW;34(q<-;rce#$Nx|SJzV9pZ=uL&WD0)uzJG{RfXPjOt zIowBpEGQP}S$Pe*iw%c0dBKtZd$F|>&VO?;LGJ~_!-xX6lTz+TrW$OR4S%$$CP;Kh z>OvYz_xJjpo&gZA;{qBQYIg8* zX8UEP#R!^K#`cp0>m{I4J}fXhYoIyCrWRm_2x1K~qP+T{&*Q?4)U*1JS6Gaw270{M z*dvy^sGF)oo{;!0Mx}*SKE9Ta1c}<*Acski!kR*&Xlq5~Jg!dY+WfIGWbO5Yd{~q& zcY=aP66A#=L&*l5buEP^upXYOpGh*<_g|83w*gyA5+9o0xwTKM#Ce3MT-syjJSVXD ztKmOjcxJu+a5!k{-8xOPRqPx7tzpMi5~I`Gel(VkT{y+ z6&J7`{UXhVN$RgW_}$&JCxM3#6HN!*tt&&Qbb0WYQo>%?FU8;#9gT#5ZeDYMOB(56 z2UCx*i6x!h*!(A|u`tHT03rkuw_DF!#E~`Z0kG!{MUsen$DJuU1wbNy-F`v04?tf6E-q?20{iqFgrmveQ{+ATfB`X znC3P0iN?IimgqxY0%J7mQI=4YHW&Z^SaXa5Eih)Oi<_|R-Z%NKxmwT*+u|7-#z{*QkCATBfRmTlab?4t$y2A8zs zo|FtV$EygA<4{(5*{N3cCNOC_#K|P#bdPZ`2>&^|W+*QO=o|OM=P=L;RmAUL9ztbi zY@j4o0T>^vq4y^P7UNa33EQNEVsP0j%5~2F~9iFd+6C{f} zekvZCf~?%?d3jGA0VdvC1xP!I^vio3*-t(khT;>lj|DHB_e=}wMRf*SA+67WbWw0d z>4%UvX%qvq7e@ngH0&LVKTaA85NYBTuN!mU*|vR8HG5&;J-Fes)bjMgn}K+e!l^g2zGdHNOrWH)Y^^Zit_jxy?uC@JJet3x@#J;YRq?NGI5#?Yb9zkZt1ilAt06(jU& zx3b9K^^Rk=B?$5pC#a}SM8XQCe{$*#WAK@pS5 zq1{aX!t~s6=GNu+dTlXQdMvX^S70Gd}HSQ(lxfwF*FiEdrxX20x+dD{wNC5mvW#qfM7`!nOqgBLvw zG^QogmABWgg>0vTMYz!^PTt&8XGtd}4ZIZ=VLiF>Jwk*yS&yPGj@=zceVnhX50tT; z%}%##++EzYV)qhXiTRyMIO%D^{BsawAPNcqnF|5ei-L1d_vl`EFK0dWPOsk}Je3%^ zWUF;?a~t)f%Bg{A@TbI}?0b4zK^yJLo!35)Hx(30$p>uQ~Tm3 zv_EpHjYM@a4{cKu2{LPzrPzUVsl@C4h1aDxaMCi>_-l4p8Am7q6aV_DQ3rm;0dYfi z?#N%3a}Q`zln$YJ`>pXe~h}U+Ki`*3Y zWU6~UbLN9l4Ly&9mAb!VEiPl}>SI593@e4hBBNP^%(`FKvz@{>=85@OTn=4-Hw|r# z)$!e-7fx{b$1iU8>2JaN^05!U7XyS&mV0XP%kv{Mj!m_ylpXiuT3WC)4u8qQJx?TN zj!@q_c`ocmc|jB4Iua9&ej$Y~Xgjw8L-EsRNy6(hyj+`6W$y)im!lu@X?1{FG~Mtg zI!rEsN=lpH?VuwZ?S176hqoM5gih7{ngTT4rHa;rpw>7(Pa7}>&ZCq;3LZc3F%Uf! zu6JWbEd=X9RfH?s1G68@MvO-A%tOxh$L&>lhi)WWOv|8u2Gt zOtXJ|fg@7%fG!vMx56C8Vzf}4qnt9_(th;%b`&uhDV8zXI{G|)t^L*M3xud|Gdm&z zDnqw>?G4epa!=)ia?u`6$A(1ivfbEpn>YjX?E#NaPzf8+{U<9XoJ4Qk@>l#&)X(jR z9s>J^=T0c@JZ=cV)jQ)?zu%uH^pkZDdRe!9x6^>N@I)RUqNAL4!La7KNp92fwMbc>-Kub~Mey8f)%>5p9oi zM@GP?pUBQ!bZ4Y`P48+oRpl<)%qi%e|8OXOp`)3wFh=ThR}8R>1Q4|_fPkJvfOK+h zMQ57jbJHRQAwILUmzPj8-|?*%-@L6Mam9D2IoZL0?w7_4HMBC+!M7NnxbI_$VCl4d z@I{|wa$004Zlsv)jMOO`R>c_QRxoedw|{K3=`|h+#(s~4C+~;Z$mL)LWEW{yyGd=p z-p1XR+{fCpOe^21tsTLc^KK2g(^}pTl{(mJ(o0@elqN%&FvS$Z7Q;1KhqdxcZ^C2Z zVdTS5S@)nh{!>i;Ixb1?28Y#`>hq~WTaGm7pr5!m->|I3x&G_}8NbNMrnny@1qID! zT}bMpNpG!B>s+YGp>#477u_$8^G$E^4S=3qq>1}m5x#nA2DPL$&%QcXW?E_T9xo8C zZjdw+h%0~ZQ~RqZX=@mlYWz%RnqtERGa%yT_VHpZ-YFfb0=koc^0f`li*KUw5K%bS z^*$X|SO_k4oHOO?v8NjDMt)a@%!_LrJ`hus6n_U7;1E~h1Lp<4x*;OI8+dm?YqSrQ z@^0L6n|CPcXY%O^> zs~(Tnw~Je{z|?~PQ_$v{74#3KTAp~(*IC=Tx<^zbjwNe2mmyCyf)D-Yw`UqBaus~H zFOnfG`SZ6; zI!V$wSL=%@yw-QCcJ^(Gb|7_#IN+y2nzKNE$m`{7z2`Stx^3%nI|S?I+?MZlkX#sH z_d#eHYQzmP(Hws4-ea%}5V%Mfe!M{Qhvyjvz5o|=sEQBFQ%%l|cUmnfTDDxh&tQqV z6AYAS3KW#Hc~3nX_FNZ~vfU1d%Vd^&K*~CK;;*1~7^TDK`&TYF`fZT7FfDG^Tm9e4 zlfb~WQo_^Qixu>u)sa+!R!R~+5`l<;d95Y|buatt!MV9PM(~zAJQwbZf$>O6RVuTN zNFJRDgM`RJ>Sb*mHR1@eH}8E#=Lw&$x}p<3@H+9Htao*F+3f>m9avw|Bh8|_{a^XU zR{ch@wn*>}`}i7@{9<7g4{z1u9S?k>eQQ^t?|@OqT++K-MT_bhhe0ajR$4Yz4=XvO zCVn%?WE{|JKK!8DqI0jfIJ-li03u|^oU#$-9PB$sBkmQ2`QZokHha@zxxT+NyV-fG zWe~yvK3@-U+8mYWfJkf``?e}?U5L>H2@TvyNLPEIQu+D*(>seF7Uv?BQ)+z(20mVA zEA7As@%o00LdF=x*xNtuRC!XFh`KG>!H5yzfXCx^`k~VMx{YW#S>g(Rd+lRd`*X5= z9U*@7cwi*rrB`}iZgx$}Xa-BVmt?zIq*oF!<9(0(NhDoz^36FdJBeR8D)5G4(y&e5 z1giHQ!P(*EMkN`T(NIpb4T6FMIboq81-mGht>Jh*<;!;gY_t^;LMpW7v#$|Vnx?3iI*IWQFY83JKFJj zjo8iCU`7Ir(XkVc=t_J1Fx#HxZq8JX)TOnUJ&zPF=J@)PXZIXcEBO9#TQ_l$PhSyN zSw6MjT+UvVl^lTZYr5+hXrEvW z4wi5j?U0NFRhLIsThZR=&u?Rvjp=cPR3%3A(ETtMS zu#r7Fb7a!wyoPhp7G&qrSSQtBf+YkaN@|k3s|uT$BN#|D2IEd9nkfiGeb=@DY;T8~ zINv^rtJ2_Ex~Ilupm`)qcyc-b U0oLMRDxIpGt56J9zcYULmIs9}QH%7AHypr|m zpqF-Kz^?oFz!bWgi6<6bl@y_;7$7o_mU2zPC^mn09*-APP{1&gl1E}di~pG&Wt&w*udc|B#t`<)s z8vMR=tijKtpIZ~!kT=j)q5cH^%SWF26#PYxv7LVW$^jl$!(oM2&H!n?%o*)MSxFqZ?{8 zw+l4~P`#8xbmk-pPB-`yfZmR_i4vFl!k-VZ&((x4zpd!cciW(Ra7~ego909)hs8_C z*L~PPUGUjn?8c2fO3R&K><9EhzwZ(^e-pfVPl?gR-bPbif37%RlAM?8f9B3(+2 zJ<#Yhg?1t;sC|hCBNu9hIN5YZr%B|NJ(-jUYVkz;5{_paJ_<_K9qu;YoxO1D#GMvS zvYRaZ#-ygvl4vniZj#fNSYCF&ATdu*DB|)2;m6Y4RIw4QmFg{2Ac0JffmzKKh=9M+ z!p5Z%#AzcAFsndYPRbG9zvu}dLmH_k+oSje@d2S}nfdvSqk_!63|uI@=u^C|jnY*H zEvAc$^P~B$EYg#*M|ckvvD!aS2D;)Jx)QaRwroUT%MdHNeQ#_whK?28U*xl%xm}XN5wSKb-HziuGY)F@qJ_EQ!Af3qn+stCExWP z!f5`Sfoqw-kmI$I;N7y!k)8Fz7J(F6dhJuF@IX{ZEBmobd~c z<>PJi?PwVfUTj<0=8H_6B)fQ*6%7n#QNK)!3}kn2$>rF(Ge2#hv_CF@_sD4g=f+>B zTZ4D0aBde@g!1R|i^wX!X#3ya=xkAGPV~pAkDG!%8m+fH{q4vnW{@M16q61=&=~CC z`n`4#M|j<3{6{6ZM}lnDHV4)?KxAseNEi*D`0S7fF)ote;@E*lV{CBpo3ytR;>WA- z5qEBX6$!iSMJv20L0g|sX?PAv&dV5$v5N+i7t2a^qWLTc5ih^i7pBirKezH2IE%pK zL#aE@%ITaKW|^cH41aRcsO+Lo{AzzbbY0W?ND~G@<-h+LucUX7Gc%(7-_c=TEcu+c7mjiui7tc`Y&IeNtLhQa$s4Q#Q z2@q@wS$xU7W_M%W?yxKSMP7$PC+gK?!D##XJspLRBU;XT$6t1*DU@gg?kz!+fQvM# zT4?keF$cZ$`~^={N%hgmHHts8IR8MP4Y@1lbBrBMCF~yoa)DT%-|L`%40A+to*cWA zR2sDawlJ7>9cYRo$GgIQd7AmHdqlqomZVi1?BYayj-znMkMpV5hkyXJT)Q?tSa2OCch94~)&O*Pcg(K!(pKHAd{mdL2Z{-k&e?|gqFJGKt|hIC zaxMiFJ_E)9<#j4+dfqn|mhfT5HhSi+zS`y*3r8~!1Gq3uje64egC!B#3Tk>gmu_v> zp7V}sj>L8kVLHiDSO=%JDCXuJB|9(WoEcTJa~hY>L~kxKbp|fU3D@bL)Jc{?X}^DK zAAS*Wrcx*OxvBY>Q_FMkK%t3K=QzaFll^680&C+lC+U+_m&&Rln6r0|PHMpxpVNj2 z>V^UDW4nh6wCl?ZTqP!byloeOLVE)rIV>@m`-#T+Yu`?KEw>0I~C61e2Te3_*sxn%&Y5!jLG3~PYOR|VPwe|n#CrwGB@JZ+kL)T7+&=X!9%po z$<`276|^YF{Ktagmg9$x!uH$4X-}>tUKB7<{ATR# z=%=2X3O1uW^w;|!Ti`c2N_xW7u`9XfJ98HLX#~sYoj7=nIpuinK_$TNS zIh%#{fs7{nh`jF?_J>E&ruOwMLs0Vr?TIvE_b_5Xx+QjTHim&olfbc3u-{)roddwk zhl&Z@$7At6jield5j+LoLzc12Lia6%wr->A`ITfPWXhzdkiDR)a^oP$VM(fREO^%d zlva>L^eTR#J)$Bk$dg%ra0tm;eO3R)T8W%ksqSq1S91-vG|r2}$KPZg+#2GGF!17@^ZWB0h$YShHgWWP%=vhdhupQ&|R8m&phXoOlYXE zokQ$;DPG;D**-2N-wz7Nar}iA_!Jf3q|%pU1JEhmTWnjj zPUZ+K!|TSu;m5thJ#w-?;N#1RJJ%;WdQ`_Z zGA-5N4@3Da8<&oL?ibKUo(H*psM(tg*iD64Qko2W4a`Ahk(E$-2P0Y0}|g_ zAa4mP$=tr^b;UnwOc-rd>Tj|pojiG};jC6?>OFR5wkRUldbUKP?N~-A0tH=j(W!t+ zdWw7}RdS)33a8w|q}1_3NOcN2vEo?%_^+tU!E)1!uXUO{HI*j4%0(i3(=~zOKjk1d>1;CVS=55IgF(v7_H!Vo)90*PkZcpuAoIa|^81gsL6&V70e@D! zO#kGs>w*V2B;9Ja26seg3!<6ooa`%kVWToB`&=ocxDrm4RXWLAU|Z@bKLH31&S))P zPIve}r1Cwiu3F6s)ldW6AuW8rWU$h0;#D#-Td2~#zQ%AW!=R=*<=D1lp9~E5pBOZ9 zIX9%<V5Mw%as8-(v_eO>@|@6y=}SZ{njjebr+l}nwxU1Sya#T^}0 zkZ*$7){BIrZTlTNL*F}A;*xWASkKeWx%Jx2?(Km0R=MR8D!~bOv^NJivTt|$iCtWF zI=bByG=nvkIj)Q5&R4A?stCO?PHJ9nv;p{@P*3sx8dkKB7Au^`8H9@Gxw-LR$-+j^ zqX?4Rm+!xh@hYWse!H$rec9^v^l{+`)p9Jub(Mc9rGTQ49NgyCTIfLbp(Hs3BLL?% zH5St<-KQRmU|v{EOH(g(K}vVL*-v!*m=Js|x6@-vpw@yk;nT4t z#Sk3^C6g)9k}F9G8-|c+?A^YFSEAeCAq9w8OQb1tp3dC6>szvC1+O_oj(s!C;YABV zNeL{A7G=a(&#%~ zACHVHeEmei2CJHnnAi<^=w3TR8RftKz|2lJEyD9;ryy2sySCclvPTo2A zbxu1yjG5Cfo@H%CLnqhC*_=$a73%hHB}32eHJ%p41nsD;nS2KjsW1vOPH-BNjGfm1bPD{rw(zB_xRNiuqCW11=$!&1fos*$GM26C{ z@K)#J-t9BD-kF1{=mWCh4z%}aJwao8mm>3rSscq+Nb$2lV_?9pptH%kF?97&qpc_O zR)M%t4qIW@*GKwqPlH?K3I~P|T>6(ICcJ8dK`P;jrXn)nNy>QMmBPVNYzpe#Hc>7( zSOLQO4)ofdGGO}rhjdL3bY`n{34Khm=Ms;vna>Uy!}T_CccD=s5HZ0rne?(*z4K50 ze2+Jr`sbqkSb{yh`_FSFw<87;%SLhf!K~hRw|hB~{w+YlPSndiZHrE6`<_lZ4{xFU zAWqTrGGarSfLzRUJIz)Y^sL-u7_u)w4mr$qo`}uG%446wSmwS+d-suB^a6D3TE==? z*LT#72`fMlaMIrUOqPhh)_~&t=CIo((!f|wy06!i7PU_LB3b++@H@g?RqgOJXzw4x z+ZEsz9oxklfK<~#yMSFJ0wCDP?-qZt#TAq}N64_9Ls(nmvn&#l7eUTN_w*CR$4cu3 zX~#7s0)FB|$&R<_AyKTYuJ+JahK7ZU&IY@k4vyt9U*eHeP6*hGV2yRb6wScfzd_+a zgMr(9$Fw7U1w^Jm)NKku<(|2dfb`G@~`pME#-|IRS{^`97D z-HZNHQ2yVU1MqRsb7Sz=M*g%*wAo%YJu&y5K3~?61UrbQAd|>9kNux74L*=wM_v$r z{SX0I%$Qpf)=vOMm$zs#P`+Kc!%Jzo^Tl`I>QJq=TMC1C0Fah6aDHVCFe_$%ruUY< zg-ar^d;S^Xs{57Sn-~arc{`nh9qyMM6B?o@gT>A{N7=afe^S7o-y#;&X ztJ3TzA^~=7un9r`a##BzjO_`Scv!+#AXa1^&mT!Zrd9W%ndN2cTz*S|eHxO{eaO#3 z!ul#qDMeH%G`?@SQOwg-6(mB#>c&^P5OhH79p^KDj(&4|MDW-W_9~^q>Gy6ZoNFBv zvH`_h2GM%eG+XZ)4_?eWC~jBb+p32zGrKw<|GeP8E@>>Cf3BQ88u@7EjJ#OOLAllr zRgh@{qYQl{EK1tTMfA50$=Bp%&uBx5fY%aulGS=haxcV~Me;TCvx?*v&k_`q8ugjQ zkYmk|EX`EdW3M*M4#@Y((&gbDNL9G&IB30ue61=_&Y$pV-2>BbGC*pNwLb^2Awz!6 z^=Q{yA4?4QP4yDEBU}emC|*E`PiL zU>vOYilzfU_JLaP%TFYaHu8&EhIeL8>qO?X5dDU=z|V5B!7n69p|gLKO>Z$UFM%}n zj6yBgmh=Aodhq|bRhz@X7tMNIYItywh%}GxG$IUV55mFiqgE%7Y|#nZ0R?TGd^+SMvVftUQS3y-D6;1!T zGWJt5B@r!Pl8J})Wp#pmdn7rR?mlHAh=8o=e3i;F{y3cEk}`V>++X1K&jNO572xK! z*!abNE$G={*pqla{s8+sK(eWn8fN8|^r!(y^2M9s%(^+SARgUaeoZV zZB;BJGUc%@u%X;+l>K8vVP`BIM1?@=lf#Zz>ry_Dafm2u7{+f9P>+K(b7TG|fRRTd zemXw>vp)ZIvlC)4Rxtdt?(aP$pqqu43qV{fRu8rZZsD4#&EWS##Y6wkvPSjK66+Q+ zv?C74K)3g9lm7kX{#r_c20#7vr-7m`CMg~%|L39f_XqQ&|6Hd3T6n;Y;-A0hzpiSm z07BN!-QdX>94#@zE=PQ6FF+nI~~ zq3Ha|#7#k;Me;aB@%1Ck%6m7^1G(j7G4OoL-W=OhsW4aF=o)IY07zY=T%sWaI~@#n zt9+JjKfei1^-W9lL8Ryb#51efdxzMEij#H~TLTLg?Si2j?V;oyIudu8o-2YwdDan7 zm4Hxkc>d{PRc}m@B$%uFZaq^)p?UM@bs84avnapuLOpA3`(X9jYTaTRBc$13;ccA_ z)6TU~h`nZH5r~8If35hw(j7el0)!Tb0IlPJ+-R?Xh^lx#&`7}3a%3nTyiD$?*UxTt zxn=|!nXjrfk#%xvLGgY)A+|1Sbfr2QaIS6ukKq(nH~=~yo`6cpqA!;&R1*2n0ekwT zGo@TWAv5Vt3P)1qo{wlEHv=Fk6Mq!w)t_|p(_T*b|Hs)||3%&PUElPGl!{7&=pi5= zDLI6+0umx1APo}I3`2KGBOy72ba!``bTQka`X9sNA%L7qhI9+ zKVGlT{}n9Y@Y-oDnz#JJrGz0ibC z7pYWy_1F`3Isd-J0&d$Gm|#y(UH(|URt(sINXo>puAcJVj1Ct?`#x#h%oy^KW1U`> zA>+qxQf$)Fk7u(kUvY61GiLS=4_B1mAx$ZW=jZ$f#k&frC3`zaaA|B++$XIQp6q$; z|FJxK`24ym?ZT=T*0UuOJ zjq5Khx8_2tbnniRP;zeL-cstRBs)`9>~T4^;c1#$w@E@KjXqW99!AYbckc$QIrG&f z5S75eep$g@K%Y1zqXPOFL)Hd@%Qm@1UrCqYmc2l#pBxgVEqAI5Vh{y6_XC1cdVM}PGSv%#}D6NRQ6^7g0T z>CvWn;EX}&C)9v50tjx|3B4NN3JHOVv+K2|!_E1J=cfCN_nUQrdqrp^RInC?ei&*r z)#T1I-{cNT$aSWNPuLQ$U5>N~~r zTHHjcWVgd7YpSI-*PUEpp|nttj?=by=`<5l1gg=(H5&10_J(X`rY`l0)41-|o+hX*k?5hH4#;=w8aiM<@AwDyDMmv8snxv4I!5a z7@-4xA>R~`h2{csV;ALd{16k%0i+?zdzFe@NH=gX%3X4gf%P#jDzB?rX z@klj~Yp3e5Y>0^%Cddg{<0iZyq!IEmoyBx$$uD|Fz#QtMjpM$rA7{>&-q$jay!85( z`{Jf*P4(XOzuf_Nnp(tA`%Q9mZ`c`{?6#iXHOv$4ReszSs2?Ng@4GOa@}Ku7WTEYx zyF!WnA_o#gxO!82cl#464GU2JaJBM_qmV~Q^m_;I){uWo1?zB zD1z;lI*!qFIT^+0QmH`QJ}fVM`!;XVtu4@sF$*_u}2=0cf$@2jL)ed-vIkR6QqtEa-kuygdklYp;GUO;EkRAh)AY% z4CCNp7o$Wzojno^p<&2f&4Dm_ql{=-U||xC?xzAZB<=08U~yyxaN8agdINN>O@p2x zuL@_v03VZ}4hB*i%na`=w%|XzeRuQ0rZN(ERLFhcNvI;Z2b#zfWA(zx#~kS#5M+J9pKfb2l~7JAH_giCz!*V2tG zopsM`C<9N{v4sZq2;L<&m>{6Y0aMlFnOyM}~{lTE4Ghct{8en6t}6U0u=k8-i^i6pn+5+tre!9I{Mv2Mu3lVlxi z-cP=64*%_+sczg#r&RF!Z03Uu$U^;UUhfrA^#kz_%kWrQSGoaWx@T4dyssck_0ca^ ziGyTi9X;pQLL*@Xo09Ztz_n{2xVhjqlq9>2;$?DqC1&qvy=N~I&5(yG>U3GeI@Cy0 zr&>WQTD%_1s@sOPymu==+2+-KW$Wk+d*38d zKYiI+5(GE%Lbln~R=pzDM9k_cBd2h^xdST$xr44~Z-I1au3hfuzN`Fon)3YA37PNx z#~D`3ZASwF+bE6FvBe}iJyu#_erOAEHo)?wZrSe)O{*`=$TSc5*?uJKkFOuJvFK&p zAJ@W28W~?BiOiA;yY3%^P>`L9T20Bo_0J@4`DZ@`?wkaU*T1rf{{ecD|OSRP_SIh_*m1NsXWB!e77o5T$A|Z8tzSE3SJImj# zq=jYHxm;q542P&GhE4sjaO-T)>UTP#lblvgCMMocn6NAIu+MDfiueqQ8={$~l0vAUnkne-k^&NMn;GTdH&{9o zS@dcn(U$MZZPUYq$OgR@#p`?q-W;v55Q&&0X$@`TcX}gt2xR%h8!@(s$}BJ^DTaC) z)mV#5tb~hQU)S@sN`#R)#~!GI)@khFYjs$ybC~2rC7MBdu>F@_??%G0%`g z>*0>pty48MKI&m+nps@n+^ZMmeotbD&Tk-1R1Rne7%&x$KeMhNXQRsmo5%h8O~4$w zim(Og0cl9nrSt2st@SQ~t&Nz1gmdQ<075WxXl6&#d0=O@7)?mU|BgMH{cL|3bEeju zb8*xf_)JjdUGO_6sdC{b1O$HW>d``p6HNL*Z=BC6f^H}yNO=!iKIg1kK^29!p!G){Y;s+y}MUUre zf~cfp)&cCG{N{4J5VsN#^%OIM<2E4CF@35m2V6_9G&RZiKTHyXe4kWG)1}+K$`6TB zzt}b}CHP?YlvqGVpNR=M{U~YZ^RK$2XUXwxiQdbpeV_c6gP*VTpTcL}{`AIJ<*`}q z{zAWdRjE%w#akT)Y6q;)hd%7@lbC&TkFrb^?4pwd97Ta`NtcWvr&my;a(g>lI++`D zXYPUm$*12bJBGR@lp!dPstXwUFNw2XY~xtnl1%lG(>*WwLtz)P-(hDusfy#`;Ff8(SWr5K z`PJ>9-uh2_DoA&-b~L{*Ov}5;FaB16tn6(7AFn6P&!-o#xp*owOe@opdXvZQ0fRzpSksNxasNa@A{aBtXW`*7oi^ z^w_(1@<*Egh3!3rpU7E}G)wWj)^&!47N+?6XCksF@%mAg5m(W>QF zu590MvNQa!T~ab568J);0v^%*y}_584vJyfr|}?stu_x8172a2Ez-XojCY%_@V#Yiw*DQe-jmpxZ5R zX`Y_S(YJqzN5W@0MTOm^e!jo_Z}gWNMKKj*UeC+OZ#!A(&|)UE`M+no=haWu!{mX&!%lyqbTopUQYcepAt)wcaBdrwO$L`jOz1@ydilpmbnwoVkea{{9*dJ zCU@=W*W+s3&(e0e0lu;q`3H#FP;yFHcDqB~fqCrKNdgxRKaRjR6h(+a933`^wAF+> zCyF(e{UPu&%Tj8|MQ?;b^%%!~)23-TmI!3nY(X96Z;Gv6cCDiAyyX^q!b3FD14Qr!E z;wDhyLLqX8TOXXqrWg3HJp)E*Zz)oRXk6y4?cXrf#!txQW+zwa#!v%~cVUt-GX^Y(V{J(4r%e zxc&o)cUMAh3P_D4py0Mj&ytJotlpxOU(@lb2dkK52(xF1OFre@?7gVh4jIi+`~1}k z)kV+~PVSSP6Lbngy8eX9Uxq=GhfTtksDRU*Mg6&hHbzb8_Q**)7?p3V1;s?sHfY~_ zNxE9EG=dHc&W2(S^AhBf`8pzhxkk~3)LPxg`IOBmn*{Bci^g_r`kGHD5f^tD7QOWO zVWWQ6mNy4}C(v4SpA}|%4gpaqAHI5h^+67Zf{$V`cS3C+mAF*)r}rf1ZXHnW9CIC1 z?s`<@`z}WZP|esWVjebq@&05luz`-xYqQoX)2GV37QMWi5?Y$;8S0$Smiu1&;H4%n zWC=@*Z;93UloBigJ+!hvr83IzE^@YpSh)^6Na%vrVf?{bBLyx0k zi-p?vi-lUcUkLb?WF^nrDz+L8uRV{x7Jg|EfB%Snt=8Y-UcFMMs}yCm?b*+W81At$ zd3y4LgWGd*l;k#Ve3cGe2!(KjWGH#-0R5;nA+qoHw34Im)ezXn()M4LUs6!4n^DD?TfJ=WC2v0C)y7vj$ngN~-LSU6yR`P1pI;lL1xm zDe~SPug$T)nS1r#m<@z*0=(34HhY_Kg6RujJo~eisfn4;S^iC@?-|t?QDXM2`)u#v zwB=j-B%hoDv&nEQ=BXRak%dVCDT_c+&DqR+yw) z^F?dxhVii8z92%lC8vKQ$3-jxM1qLO{AAU+_H{4$48i-eeRs0i2l8-d;r33+V)zZ;^G2awMnfXcU z9D=?0pFO}W8Y!0UGr#C0Z=rSbFXP_IJ^wXYWGQ>%$UNy))x~sKm%`**HPI=adBF5Y zwLooIU1E)=d9Y{%<@dmXo(%vdR*sBtPH8q#ZEr?YI8zAnR6OvC$KEWS z6~JSZ#QUCy&7Y7eMcdviqZ5cd#f=f5zL`EUI==Ev#+(kN;ISA&Y6lw`(TKY1j_2}O zm0*U3+!9c?K#3Mlb!c=%DKNcSF#ShII=-DR#Vl}ycT=dB9Qdpc+rP<4 zGfhjy=?={n7&%X5kctDxVCa34!^w-p|Mxv z`B;tzEv6WrR2Y@YA>~goC;pe&c9=_SS376gCJSC_S5-DAHsfIse18<(@JXG6Q2vq> z&An?3O^59%MT@PTh)kU>^A-%>b7$ZbTQ68@Pq_Kg$8;Nxk{iqvjqPVpOdFZz%nD%= z2X3xEM?RRWyADd{BLnjXneJoLGE?toN*$pKx*Xq5%O?juoEflAPvh#bHV|$u&i(lM zi1IWd3%1kTZt!1!(JQmh%s1ck=Z1+to<4FIJ@qGq0>!>vZ1_QX48~c{oeL-V=$n$9 zU2&m&C8pzjX&QfH0g%_0o;P_#N5($=y(eL9k=Z(1O1q}BX?Jk*vHW2Ca|y-;L~sYi z5*RYnH;<{#?|=gXRqj3?Z+2-{mOJVE27YXGym8+17|7BMjb1#iEUWZqr=yExTe#go z?w!lZ6z?@#B*|*!n<=j15_@F|q_8X*v0vJXFYu=IK4;)A<+&=gi|iHqCtlcyFF7RO z-p5$1vbKMZhee!hD3A(&L(1WrI=?wEaGu>DDi5o(x2!v?Gwu=CoUg*W6*9Qph|DT{ zqf0D0m??+&xrlkzxcJ3`w%+mI)#0*mbM`}GuQxvel+#@=U797?RUXC*xE+3{Pb;1= zTUyeB?arwM<_6o_vzY{(A9>P?JdCGH;(+tpZ}vz^^nNnoWsY^662$q{uRf6h=Csu} zY@UCa8q@LGstvdN-WV7QZ$eWN$X(`;`}}u93l9<3WG!#NB^x^iS#L%;yy%PGWmtYf z&1Vm4vFfO-3GyVDzk7P)8+F}*+)Is)%q1#{HuS3G@+w2X4)@yRG;yQ%tqZWV>@=IY z^2*D4KlGL&Mey=;;bxzmBI1=u4L8fp7G|nZ@D|1!R=SWZ_7%U;b?Y|c)oh}T7;}n{ zI$CeJ2_27Qp(7#!>X8I7Nr_O(_t8a7CdL_%;zOPKM{GM_ z_C9Y==Bt*k*FnDzH#A$IWE*2*P|bXvta;iu;bH!s{*nQ2TfL3e+i~A`L zw#<;DRq^z?21yhac|nVR^8Ht{boYh5xEFD+bOQqnzi!gX(x!v9sV)wJfU z$$#=kpWszP6{|>|I_YZ0-M3 z-xQ8xzCN&{QEtCyaRYTZU(&E|-#}^l&~!}Oh0|>3TU!nmX*I0cR>(Rwi*hi#I81ts z{upGzH&Abx@bAB^+^!+e@XWom;+AV@j+8!*^$Qw8%oX51XL&yJ)C#1){})>$HB za_3TtTsmgindqKA6iHe1pr&lV+i^bkPs3bQgtU76xm-^^T4QSGAmS~zA2AT{#CP>m zDA`>)`s}0FWT8*^?VF!>XQ*6vMBU9>hbeGnpeXq;@P6^bOmhru84&!gfnsWBn|#gH zdC0}Dw*Ph*?i%aIki&1-qQq$gwD%EE-Yup7YJR{}AL!iplDgYHiUAv}p4{wsN*A3{ zG4{9DkHGP$&xr4XdOCr&m}odhuQkdu%$qDyb4KY7xQKnQa+I^Yz9|r7G2jm`k(T@a z<8TA%Ep}{@a1MMRgi$102Bz~V<_U5x3>U!Bpgnt_@Ac-vmau)HOXrhcVBMh@Gth9s zX6s?gZbgVTF#0~3B@%V@AV%p~4aaK9qw_x*L01dyO11onnL$XA+9bGHYm0}%KfJQk zy7tBTqlB`JQVLCxz)a+(4KyU&ES6vF8%>Ftg4k?wlrD-gZSUjHFU|RSa(f*|(7o@K zTL>ay-OoKtxsp`aB3w5hSax1s?aXz?=dk{JZ{}>;Hmkx3jd;e6;!_4HB-dKwX7{q9 z{_<=0kf)_!`7f%;LhV$bP0vBMwvjSdgQ#3_o2t($69Q}6-!g3Bi(~yHSssyTKva(; zANywZ6A~Dl}>OFJ=J=*mLpV2hUvdU=(APy|;QgdjwWVI?tbggFjJ%*L~NfH=2|6`h3)W zvx=m}ea+?&jd!Ni2Ulh|u{MMC&B%P|IAD+bv7EavN%@4pJP5$%5C zL)B)#uj$j+Mdse(%X97c&gw9EWzL;Mf&Xa`^cidHJ^q~Dj*S6D-hc+&kU)LnrRy$H zMtk5nfO^dcV`wH@OHWVpYT&KVc(7ff)nXccHDu+hAQpXL*`|eYk2a_yW!E*m==9Nk zRADGE<0vL;VQMzv)-7jl5*2+p5MCiZTS#v>=H4cvf}=2v7_#JJer#c%jb?tZbU8W^ z;*-fVW0}9AgIOtD^7`ca)W_L*v-84pmVY-c4W)(0>`Tje=^4@DGq~l!a%`@s54}n_ zJBJ#V`eL6T{hoVu&pTH|?r?RQc@9L)YNRpKbVUM-R}+~sU*i?j{*x7kYN^iv$g zt|b&hRO{Ofm-e=5&(ibBOv#NtiT(r*ADZW(?ZITTl@?Yif5pbB^}7zB&mpuf`t9F4 zidwqEgi;k>&<}ywXN_R@hSJc^`QZ-5BoGJz8M?SIM|fgXV7z&4Bl#daHYz zC7-Aa>LH&D(6WcT&+oolS@$P2Bz~3EY+ja~$Unj(8&e=CEtak{+XyTyCM+{zCj9xN z)G~-wC?OX=k<7ICfxi%fxAO<5f-2#?ppWWg!&3*Cm{awORf9S!vu@4nET6Jt`(iOZ zd)9p2;n~!pRc|z)auxVP4D@gqMXPNV(_rnr??Fwgd3j9!7Qm)mcIH5}x@i^9GH{O( z=wF_&s*F_sfedW49NF#7#6Sue#ORw3O4edRuIe0Ac#l*Iw5b0v%4c1w(#2Y-LiVe5 zp5XYouz5`sEM()OP-gA!TqblB=eWe@@{A4`Op;BPZd~I9v$3OzR~`7$aU9Cwt&vz2 zyW0i?$k?TnYusenKMJ4UNBpy*sog9`bVVUD-;byWF3m$<7~6<6nM`h?uor!bkAaux z#H3X(i$;%qOb1tyzjZ>7aSvS0F$Oso%q!t`-2w({k?hGwYQA}sSaZwf(S=XsM*D$y z@QDhCAKSF&fh3Kd7bSo1m1eog`s4MdI?A9&d89Y&SJOK~wfYeH5@WF}(v?>*+@C*} zO039E6?57fa(2Y1r4!APcv`dACvykjdEhdHeeO+YrF~L>i7Y$@9)`AOeAsmY@SU0W zn*3*fkfcLMt_-`4$#(-5}0j@}!;?2lm zYQ%Bf)Vxo?qVUZena-p8sehJy5;;tMCNG%Ic_j&-&Av9A;W~ePMdwE0Xgu2M=r9E4 z0S?pxByNyU(J=z8l?zw&!Qk+5tM4y9Q6IBC4dvsb7)l3bukB_+Myu*u*Hu>2v1071 zo`aREr?-42NH|u7d-)dUGZ~G_t9IS5KeC+c$gOEG=4nqq?t7m42tnL;b+272V)94E zJ%_xPTm4r`Sloz>?&&=m)|9V*|8Eh%?`o7K%r;OBNZTQ*<_-h-83Nqgjs@~8s&V#8 zaqBQEs|RC3EXDaDSH_f7f^eTAY=7#GYT~+R#pHV#>J`!iE1_IqVy3u}5i*vl$-^Rc zPUAMWPZgHBgTf!9S*ZFp8)pb>FShmWNQ+M#+d|gHYn)b) z+9x()?^I*!#mT=*dsUvVwSy+kW_1#t@n>6BR;ZO5Zke^ONt@5KiR11d==xXHMP8L~ zYB^M3ELU+d<80o(p|!9TgJczV&8Mey&+YTKxs$d?VcK+aIm?+E!a5Cl|ps7@$s@h)KB}i7s|G{nqu}(aqp}rgU)L}k0^@H z7C(rvemXH42&^Z3T2Dwucpa%N4`!ScT6_CNRTH{Mh} z0eS~z$>WS?$!l2k1yCwd7k>icfxI$CETfd*l3%k*h^=1j$pKF=%7Jg_8NR0US23U0 zfbp-I_oO1U@GO?zwW^6XpkUm(C{&TFPbS&Yym_G zGAMt2WL(!Mp`O6GN5iE;80IiS!$6Vv>ic0^gM}u_N<7vL(|O-@{cSMCFulIfQ#xtk z3)m-*dqT7s-eSEs`{TlYJ~a5JYaLeZm6ui2pOCHhLWoyluP9N&&raedyRO1{9*nt7 z?y7CWdi}wB<+aRiQBEY3e#9>$APZ;=;Kw5UW?Nq@So)Mo&nS*$?_#Rjv++LOHCyjJ(P&JcBl zQ!_r3-_>DW=Ir5oanlW|kQM25`hR*w?l~$LflSw5KpoInGVkp53=09&F3*13o^09~ zFE$FgbiR82__Y>)GH~Oyq;6fFTzIf(HntPvVBde7nI;uh|1?+a!FKaN{|py}0zggk znw`{Rmjy#_1w}QWBpAQ~(fHy{4tvD}-mlS})MZ^{HZtJOma(jv7`1SgbMP|%?@RqJBZKHV5Vg8?9cTG-E#Unx1@M2xu|ACd|5UX9TOxZWcolv5 zU!TJN^D?5@|D&`ApZ+xvoj%U}+;|_5u}1Dp7yFAZK;AmwWSC?(g%Wj4DGv4ynE>L#UfAer zs!}8q&7_=`0YG8(!MCt-eZi%I;G3BJ3>U=aOG|^i4|$<;VOkCF=tXzI9lYdYpQ4>F z^fF-ip7t}BrViMVLa3p^TJ;XPb0zd|HsRXuq8QjKzRkzMcTpj3s0+wS?%8!s-L)d}lKiT5S#3hz$qKn+EOol=Tx6?qc zM6vh1?-wvRz>X$9#5d_=S}PM7RNnzgIls#CJ04>F>-H|KKPxwEjE3msYiEMcMtFUfcupCR_Gwk~rnIz~ey_i>7YnPx+R{7Cfz-Zvh z)eIsefqMuXN);TrGCXXam(S~JH|A~MdieU1p1JT_=O2VRjMV7;N7$==&_9A^^YjsS zDgCw03W^|}+xC#+$P&9^KCK~&RW$%au)G5XTFvNDJt^1ps!B5jYZTPeaKF{}sNDJp zrwFEPJc*zUiAFq_+3$oC+*jkw;M8&)Tuy7;+HvR16xM|QkJVvyIIZh=b4_LiK|`wZ z#_WqXzr8u}s)*h%pZ5&#{OUW896#730sF;I$q>q0NfixYx2srcAs6F%Jn|~bMGmbC zVnIn5h=zH6N0OAgvGw9G8pusD#2~tHrIieheRG(0Q2*$8ByM#^)`)==_r&lu8U34_4H6ozka=x`@1asq z3Fi|Zxu2)Imyr|hP4SmKfEPoiiXfhW)vUGtn&_2$|@ zai{pdlGRFGS8T(XtjV|PfP`kZHIl&bwcBj2BulyP^i!D=)l8FnRYaEan6j&Iaqv%O z#rUnKsGJX%U*8z*;;+cpfHj}b?^r*sQep;CjWL4k|S+IPme+%@jA8Us#OSS0J zYGFG=;~#*DMJ-t>fHHa!f^&*WA2b4F>FAo>l)O1yT#9K|Gm{&HfL&{zZ>c4Qc$~>> z?F%Ylx2Q_t{=a)4bxxgv``{rXG>n%_s) zC4H9IoN!E?@OY3?pvt#Iwk+G4y9yMyEyhPu%JbOqxn%eynd)`VwU$T^%Xnu)p+G!eVV7?vx(i)Wk)lh zS=-g(xyyUXS1YR%f)qcZJh>XoA-;+$HyJ4r5KWq!%?-$chaP5Y)S(mAmu%f9-Ul+6 zWv6CmopI_9^ASKfHvaV2M-bMb%=a~u(@1U@sMyczY7ZrDMl(JPLSOI4$>tV#MU@ECz2xeeP{K4v^V*D_)4oR~b*l z4jr)(eOy5{Q{G$h;PQNXG_(e0PUbnqZRNr{FPPlGlPo<;G zgSPnLhd_o*OajNo!mWcTG90oS6Ez^B@jik#CnDesqdNo981#hcX%%wb8IChF4+rA; zDDdw|Rvc%n$kw?LovZvXpEgR9xhtwc_yx&-**4a6&n}=AkJ(>k;@9)D=`5N| z4Onsg+1o$C9^qgMI5wNOsrRuCM@l6$eii^lN^$KhpKTkB%DpRs-GcrzdiVXG z5y4^}u+moAThYQW)R0i&U%do2QhUKjkk9qA#5d^g{_eiUGy1}zW3_VXQ(gwtrm8=g#RUuwcrqb+#0M!|P*DSe!5OmQx-**@m~j$~14 zk&@5m@kE~IY&3oR8;jKA9l;5W^~;qmndTLZZUm>sX=?8`SeWY%Irr)q?<{yBg}#R8 z4bJIgQIo}530o4ac|5+r*Gua*kPWtp$8887_nY5=rZ9<}aX*>je)CE*R{)Uw6U{H0 z&tU7vhyjZ#Q{-FK#$ww1&M7$D3vs<%xTn_@+P2f;I+rCjNus738YIW4iQW)^1b)(| z^EMhDnG__`U{L-AG?`!O4tAOSj&Oi~{;ps1F_kr3!@Fx^ZUu1&&ckz6a(Y6od2#p2 zG2!)tk@PfcgWBN`tLd_}rIxQtEpg)pbU6Qf?#I^`%;ad?nlO1A7DXG>BTYI5G)LBa zPe2}AYibkWt!p^_9N4Dy5msHE?d6F`+EJrjqh?j8dG(@=kc5J->dD^$qDU}Az{*S~ z#dq(@a>y!>S`f$mDIxPJ73Omf^u0j}cl`Y#r6uu>qm{w=R8#V!%oqG0)8Bn+G<#`S zti6UV+=|X^J|ap`xLb?!`)uqRFROwDc$ks6n)t`;Z%t0zAp9VYo@naFDQF(m2A3V( z6xP;+e&7o52`^iDV6GR5UDh;meElsRLQ$F06n53ISMrf!ASl7~?L8xnHEFJ~lnve( zNG~}ilkB@M%6shJeVM^qZJA9$Z=14qIQnbHsFME=nB<1ye!<_}hk{5rWz6dPo0mP) zo&-jI8@%O=Vn8(teh}|*qs`22PhuuCw~uc>_KsM6!)f2uj2QuZo_Z-xM;4YWeG z<1K$*3zu(IL7&0m=4Y{QzJ$M&rdaf{?F>2kT3DN(BK(YRf0k80$Vt% zQYHow^>VqS13NyQJX4Km&9Yh@Y`4QM2(AnUW}Bnm9yEO7@T*iWvUouqrGMaccYUBN z7RtFaYixWCY0SxWe^}1fb^s*SLpg5QRj~n(8kjqCt<2@%-dY!^um@iYq9Qcx`}7Ch z0cOamBRYmZnc0^3+ege==Qefw-Iud#Mi$QWfK#2#mo878aQB9v%H*QtlUaxbU5-5< zS9hpB`)0-_t9XJ-EBuBtEsI*~6`UvC+TTwgBNFc#o_?5BIKTIB>HrsH6t@$Pf*>!w zIKN<#30X&L{vWvc2A?J^TKx#V#c7Xa70v^)hE|?1%n5f*wM0)jFsB!9K)xISRIApF zcBMo$qdh!NpSS>NF0ht_(_4O<%+E6r43+6bL|LZcd{DlQNx1{yEV^H^wib`bWKws8 zJ~_6wq7+pH7>=0P5V@Zz$=0vqMv4G5-bl^wszo;=@}&z7*qRvezf{bOm9??|=wyln zCG%1TPQa&2$+)&xs6QRHz^_c2g|~f7H6x#jApv*_nlQy4Po@0c2eCGRxcv?Xe{$-( zlwB9l3z-l%&8tV@w;@X$Qi#bMWo=hsp`C?m2-E|>yMWUo5~vpg^JlC;b_Ih?diS=% zWJVCSZg`)8^j>PRKj!Sb7RSL@@@x;oJu`ogX1jnZF?dQHcs$?rY}sjCbh79iZ-79k zlxu0D)~9SGxwgln;SHZ&jSh_($tw#+|8kM`kKGx@I-CJ|XA91Ecw+^l;oxAF1dNz9 zDzkOyv$ON0ftx9?yG_dHLi|={oHUp}+Q~J%($uVlU%UosUVUiWV*rGJb$&NsEZ7c< zltxaE*MKt&ll#rnoh#HVYdzxe{5p&V*TJ-D8(H_~U{X5D1gFUJ^q5_5|qW=@n5K3NE;yX}TM)6hWikx=Ex z5oo2sZCFMO(>J3Mh*Qgdj8HHi-S!@m>iYiBo1XIr3*1~^%HnYK74JX1oiC6B$rZ1~ z5az}s>56z33B{bWmvY6UX)5r#LPQ%Rg-N?Rah!B^KyGN7W*5Z*Vs+*qdNy-%i|Cr# z>6maSTxH}2r@=?m@+(%#HXnu=u1(2FBi{ec)?{MU{kK^&~454}d zLW&^uve#iCl8JS(ON|*!dzYiD29u$}$oqb7|Cj1;7ziT)k3mqNnybYlSO3ytEJF#< zg;VP|)@cqI)$?&efG{t4-bY}WnN2pnSvLSC6T{qK?RjDcRHU}FxRh7CgE)gn!^OjoUwp;q%DxEMYq{XCJ^eI5)Vtg}&a};dPAcBU zu8?p1$}LPe5wt8Vkvv>CdebO5Y|Yl1?!!&gpNcZ9X>1A?Uw)HQ#)q?e+V9M85hAx9^HP8Po!DB6#n;vA!rSFhXu+1ERjEfS zpQxqiyr>|O)B^F-fq4Tt=qnG#1 zJKw-6l)G6dy&Hdh3j^k{o>E_@M)=PCCds&^hS9@@|0rF*5WWz)q0tsYsg?cJ$x6KN zr*hq0;*9O~iCjB+zYgV&Ro6JnLEFJhzHw|1f<2|M{k{m zE{3k5x913i6axuj8x!F&Sup>-uhjB(mpN_mwmJ#X5Zi8jAD&TeG8xsVR?}Yn3j(!R zK91uuB1by*jt$n_V!}(YuIrsnc#fBDTw~n0#Uf|ML#lL^3E+QI5Wigi@K`^kpkq$- z;v9ZPF_j9o2`#vXqZt|+%KnSk zj~UBbwJ>XyIfxi)#AmRxxDBk^{DW&9Bq(oK)nQF#iaQ?H!&&Y;n{x&I-y#|LsRbOm zPYw^(Q>DXPB;H5udgzZ*)%&-hAo`PpfY3+xbcwVne>G2}fo@v?M{b+QM?|aCiYu$c zYS?T+4IB7t>#qEicOKzEEf#C~q00RV;l(n~EO)O%oEyLn16UFGtIB2%$j+{wxp&hy z{{w+PO8vdeaJSkS6&kpI+_LK^N)z&iy~Xb6FTsLI=vlY6X__GVsGP?x`wo`n zW+LLdRsOoAFeUff4Zj8#s->#!ZLD=qb{hB0UKT>l`lTi`F|e%aX6VFgaKc}rGL7}I zf&{^y{#Tw|yNwJfN{;cT3IY1-r5FNmB^qF?)2HBpW%@dzcbO`rJNzVRfUFT^BHaE)g1$v0PmOZTqkoICaW zF|668&xQ>TfE3@{RSpJP(Kagsf53iu@O(sPd+qVIsQq5h19GzUWB&oUw}M-sJod4s zz$2pO4W@4buE36_AZ&@*@mLba9i9}Ze8eQT=+I{GP=NWpPB#wGY2AL~Yx{4(OitU! z*#Y!*nv(-N+idfsvjGSlL-*>fI$ANv(1dxyc`Y%_^#|2pa#Q+~MdoGr!CE@5Ku(;m znAYWl(Y=lq=AmSm|-+Js(V&D|hrh z!2MPn@fhliLqs`X7wCQ$dZn0l4wZaJjyw-|(OH+`jn136u&ZNtv;HHwqzo7O8bcno z=_oO%eNGU#$29G5(0aSAsyV;zk%p5uIa|5gzP8`dZW6^{{gJvI2iuZ9S3X&2&1NLN zc{sS#|A@fRbRzwUe3Vo$splFlQ#X68Um7GYX)qg(eIZsCNWzLfU%gC&f?ys4FBaKe zeb?;idbvx99>w7wLU@?n?i7Nasb4tp{VJh~&I{S;zx@>6t0~AhG<{CO#CPFsAPAIy zIHt<3&GcxN*JhVruT^u2{}GxuPZnF)!BuFIu=3-ep@?(pN-?xVe|B)k+v{ zwcKix4Ml9v_VB>NfvhEIR=824K>Bsy1KQ`wx2uoKCALYe?xO<1cQ%rvRCJ~9{>QfiEai&8 zCXcml`Cwv{W(x*Rw?goX2h%lYw{YQAV#CmTR3Ni!?K_PpcsD^>80+5iM?OO2u3YEv zllx!{P9-lIM$Vo@--R*DGvTlsi7qOpw;YpI4$chb1(U@}PN~4=RgC94vFl*lNmh`u z3BmLBb9)oM)k?MftG&G^!Vu4)g#f--gLz4yD+XrLPnroQdKikORVPW%HRqqf%zy~g zF#^Nfv?Aiw_7eA?OozA-olUV0Q7VF#DR%C@N%4vc&>R;E2eaNC7;EE9lVx?ulL6h> z4ihQNr7#GbZw3i~|EXzG*YG#|FRiqFx}Utw5(X#$@K_VBlXgGuJe^9tR}GJ(iC^g(#UNq+S7>-x#;}JXS`L-RAASLDklb^LIQi#fa5^i!yXo8s1+Na08m{yk zfLOj2H)9^N={hI?8nI1r-~q^Nur+$X(M$X_9>mD#bUjgdL3C>c^C%a^*jDe@Rpu3Z zAc<>a9}6byK?rQpV-OA`M)3D)unG-iz4g-SFB!CYKA;hehr{%o;yQynw8hR_wn z8M8)X`2S(-E#so>_V#^gkQN2$FzA-E$V==97&@Wi6^Q-qORnhkrwMuwKc zhq~2hT?JvW=&V0QJRCaW1V2*pt!Xv227{}%#`9MEH^{s_nfse1<=biU?v01FWF)Tk zC$|oinJn0k^}Y!z3*|+lKOx$L$MaS&SHNRi5>M70&ZV^hO50^=a?{$nO`uJmC29*V zyqt8T&ux(heRJqb(8i{6RQlX+0uCJ_Ph!W^%p)+JrA^w%O>27NS-PTVqNznFe&USJ zcW^XA){o>=kM5;F04cIICOqNEFej)pP$(ce@Rwx0BWP^|gK^z+h_p1li-i8kD<=}f zrp=!_po!uzIwlsZDU2HY{9W5+fej+#)tFAms~5zxg#8P+jv~LMDHQSOHLFBcdNseF z?Jw46s(x(|+S)06Yc6P@ds|mBOzZqES!9drN9FZcQ9PX#>Z}+VV%USx8F+#(!?2_i z7I}4F2Fqu@Vc&A~<-MJ6IF+4u-$oDuoB=$MFL%zRZemr~7JE|JMJ)SM5~xRi$HRiq znv8b|Yn-ZHjp;X{4`S7?;rKthunl^v4JvZK)5~0{&7sV!H+>#RM_}^eOfzPQ)v3Ix z&nhP&DkH6r0tFWxBrXq~(3N!}5`=AJ3YD-UX(u4ON4ud8d1gwicnvU&$YJ5o za-`E><8VpU0`hk(t=4*wYzDTJ?bHA66T|D?Zv^YvBUU#H9Szuw6hf|#XJ;N#>2450 zp(X;IRcILv+@#?%F&O3|w|h~lVMl2{_@A4e72#H3Rp8e199jOdeG6?WZ_kG|vx_L^ z?jrUq*$Uee#bvOK{+`N*dEO1XZ4vT#i%Yt6%GFkSK)j}Uj!Mwh?7WW}VI=J(gvUg} zsL*TLgb;)vun8MnoW;A71tgwh)22F~+@&IVNf@!T#Cz60KY_mHf(%E@GUVfG1jJ=L$GqMF2&{dg~-oy4@-bX8H$h|+$(fgxU}w(NnB1>RH677*0^ z;o|v_MSZ;3v8=#vK{U22zX>7vkfN~vC%G6M02*QNSY19by9)+_+YtB^Xj^!k(z78h zb+-8g)63M$WSgL0N=jHh?(vH7b3>D3Pv@eFpL7%?Rrs5HfP_IO7zgM2hu$v8t4()* zYys^^J;4d{$pAJ<*x{x#rq;Az|G;UG8?99&-7g(yqDMY>5Nw0SfP_A7;NQA0-q1XP zNn!$;LsualKvF|vjaKnv#CHDf8tF34T*m~Tpi>t?3g^+@kj#5(MuKL!ZrDR^vwgey z?_y?mXmkTYviv;e6`?k~O^d1u;0SABhTzV^Aj9+MU%MH&&NQDym{c@Qz;W|)K{QN< zF*1KLAq5K#H@Aov;qK(;cr2bsYl-KB#|rW9JZ&*CD(rZ_8#~WGZq^1%Fp@@eL#Ivu zu#|9K9LwT3f06SSsU?TYL+ueMxlEt3)J_T~&Qmqg6NkR+*DFhoYj;-*eWGCvH*S)A z*?X@)_x+C}MsYN9mCxgx(^VRSxh+3-Kepq=J;jy+J#GQOZqv%11rSpaHzE#V;fL;9 zlMzi!o{Wm2ti7-yE6h}pLO6P&@74rDJJJ@=WZfK$E60ss~^xB^oIy5O`WErRX z=U84>Jmwtcg3de*q!wRik@JE@m;rxC+qh?t8)+LXckPg zm%P2LJ;G{pG~>DN04Ff46TSpC@Y%`j5_U?yBBf(WE%OLIQh!h_l_-uxnh}4H4^LNA z_dcLxTE8aU*7A*N5h+bEL9VxF5{)(_{|IgV-cWvoNC!$@beyX;-9@TSkOFpv-T|E& zJxr5Sq+X_E-zE}G&B*7ZWD||VE;Qm^1Z2c~;_q(^Eiwx^I$Yyaygb zL_5x{YmoMGKD7f4-=qFux3`Z#sMyFto6(t1I{Nlw5ic$uc3qnOSBN zd)Vv76vAy|z@`FvFNJ3~W{)pOW?`&o_T}~@L_0J_@R`}TAi)E(RPN9*W2J9Bq3?h# z^U!Lan#+IHA~o+Tj}2+=%y*ZlI~m^!&%8%sDlGe6fKGTycKwDg|ET5pdLj~%#!Cg= zCZ)e!zzz5(m9&fWS|j?9@Ms{JsSUs5^=ycnJyCC-^m+bOz|tzd zUnhRANjDa)+soId!5mZ=&AV3#Sj6MmHX%KXe`LUs+j)I^M08Gj;B5!1W-BlcI(uhL ztKH;b;hoLjX_=>5D0Ozk+01j!FoTtKc_>=IRW~v}h?8GpfICtIe+|?ja`zu>GVsk+ zjl3Uq{Y2B>tnA^l^C)sKZ)M@5J0kR0!hKl5P}~ufi1Vd2o*Lp|j<4&P8o6jw~1RCKEk@(yalMrWkqoijN(*P1ztk|IpL# zQH|vv<-l~p4(V`U+3jD67w`=D2D%Ei&_ym~Kyg<60sMGPn4E9qtpg?{e$`|(Yry>B znPR>s3C0q4(|0Ole0L`WG8$}Mtw*xoV`L*^AF{;>9<+&ZfJ+yc{|U$I3b z4^m;P<9^3ey8vSPg?^HCl6?{kEMp$9l9fk4>!aakhb)?z-B};TsJmLkb4L7;k-+p; z_$2y(UBC3}?nK5b9jn0g&FOx}Q__0(M))h`v&HfspcRH!H(UbX;QJQQFqIeN4C=jJ z@I(_v+sv^3e$d6HTWialz^UKCbSYk8wVT$kM+8>m(2pMiu;>Rf;^-pApJSijH8^=m z*^E-l*J_5YIO=07pv_~8Tp^$rZoUClS1&14Qmln~?7%2~E9J>W@UH)=_!iM_+jWj(Z$%JM3~jHT@gowHYuVp z0_P4A9dqYAMRDaaZ5Rv9cls*;OZ-$NAA|8>gX~8NC{~md3M-`x3`}a>&pYOE>03CA z>P!q=FsXt(w)nrRNKle2%cD_428ID#52>I#(ln7nZRx#X)#1dbbyPhuZ4WCKmHP== zJmyaMRUq>MrLRXd>c;t|u-0PPj9&tA`Xm7#D`K;gAZ6`*Z{d2ZU(*HkqIQ1Ajl1XY z0FE1T9-MWDb&CK2m$DIXp*TTKJv`=Lo%=)?)&rG#^@Xy|iEisGFe1FJGE<9P zp^1AeIF$>`z?}*6t@?W0LmPxDD=}*=Ns@qtt%?US%{zvv^jW|uvJmSeg>eu?y(Gns zD>6fJuMB+q=WCipw5sFcKh9=eg^CA(NZvw?WoWDS_B~{=%RB zb;UXu%nkgm7|Yfk?Q(YD?#Liv%w3-GW;)3_PlF#(QCan z{9ad126~=0<0k=;Ir0ZtFb@9P8!v^;PFJCkG`wPc_~m1T%^~Jez6qAL_v`K1`$kxy zlNYw>QK~j}qp%|%xi5QfS{3XHVF71*DD1ayO1ow%ZXfg}7RUk`4*Ls{eOq!zBGmtH zew8Nd2fKQnV9Y*}avb<^CGh{7wY>~L{cof4-$<@B_P-^$|INt$M~?p2g5ck5?f*+< z`|nTu_Xq!haQ8Ur|2J66f_xoPjIUPhHS09Jm!NdmyA*1fvKjwH*Y@vqtOACJFHk_{ zLb3CK6p^1)XOOhD9Aj3~(en_Zum2a-szp{k=;oLE?ac@8?`$tFdpGnr&D!3ebC~N1jN;`pv7Go1nx6{<4*9#(fP2$N`u+BDWt+<>dlhE-eCRO9b46A zoJ6N+#O@GC=54gSb?Y>oW719M^4J^<1rI6tp3hHnv$_aIz!=zj@VM3|^%TSppMKQO zZ`IbEtlw2x(!W(!m5_(?Y8SkYqmEQg!?NVI{fS=ZyGwAq!yUw7+;}urPit?v(=yc$n++ zxRFBnYq!lXibu0+zIR-Qe#m$}$*M#f=X-j7_#iVz?n5ra^Yg<|@>1f=`-UrA-IA`@UXh8ob-ki;l8(9CO1R8iv;D0lES#Hen3CI%ZEBRyZZs?ImI_zh zXSTPR)1jAn_9zk9u`+?TW_V-dR(G{aqmJH`kxmKfA2zr7r&MH%_bP$OD7+skA|v25 zJAe6=sStp8@I37(a2rO{Ip(sC>?|&S%N5!5IzML+fSWtvuyMK$(mM!BY`Ey?ji6zq!8KUT{7&B6V`gIa~@7e z8;nKyknK#NGQTp<>h#8af6;rg!4ge`W&RmDQ`g^8Pb8}D#||o&GpObs5B!-wyt%Fi zhsgV9`{q7pdl}$jI^Y=nUEjGqHH)9`|7j?Yl0I9`pS>M$kHerP+IncQw)26+%`1jWTjpvYg6ZY%5jSK24Kwq>{WjCr9U^?@U_}$>h=aw zMm!^+0)%Z$VAl;^?~;Q)so2HtlZpvGdTz`Vp-a1b?Q%AywEYL5F8fan1rhY)73??$ z{Es<0oW>NpDTn;M^j6W+_c@JFksT2CIh5nX)1)5sYQ33>=}~7iVZ2TcZf9~`W&#&C zDGLzm+NIzPR;R~E8Hb8v`4e8yk%*8V=Wrr(1f1`MfI{}xF+lsMS=H6et?kJ^XU5>H7pJC7D%a42E_z; zNFv_^D6Mzfz?fcr19TM_)_rhaXw##4_SNLr6xcVI%k`hRp%L&vJ}YH%A>?(D3m{hx0@cXa4ou z3ggH&*N>Ubbg7 z4d_E~Gy|Lw%F}}TO!AB08#E4pZH0yK)cft@RIwKy)-|i#F-9H056si#kjBUw%K^d3 z@T?_YV5{V%#W9Ok-XC?J zC1QF{>xj-6Bua!j0zoffj({K($DyQyAK-i(v3ZQa1PLuyz){jL+S{rW$uTA zW%oQwGboT!UwIEdE8h`0C>pLD$!hsyGf2C`a~E;J?KD*@&wgCrEjJIp)!xXYDmkR{ z9ZgBeP2coK$mSVU^7lFQ#@p0zR!$w8jrJW{sr*n_p_t~_nXY|aJ)QFVjs9;atJ9qc zxryADsqYmEL_4CXBY-*ZXPQ*gFV{bnZuJKphmf7|(BZr$^7>bb?!X=-3sm3Zvn{7v zqmHs0S~Jp&l16(UN}i~4u9j!TgcQmIrZ?a&bN;G|Iu{~=ve9qzWcL;D-+llLU591x zfq+MLE>GCiAIRy}#W4JAIbQPAmMZvi8|#m&0#c-X15PN$PX-;!gD>fY6??MVSlmHUgLx!=4cE z<&Jb@5PlwPdp8tD{xZW`>$fna1-+%t$btG(I?iARLBmF=rqWA_vU^$^Xv;Gd)gE65 zzCDxYvDBIV{z$I0ufN&2@|D121iFYm=`|`upWw#Wc+@7D-)a6dZwUtV9Rg4|k`K){ z9w(mHbN5&Kp>C-spmedxdvRP{bf3e|>q`$XE~yu{tAY4UtyAlwz<1*YYaAGwXtDB( z-{2_Wxwc90K%?)eb(>XIZ3B`Jg)CQX>*slwkGjhrzwO>N4&2spP0&8xPC`3#6`kOt zQ%2O}&ngU_N)%=dG*b;&K8!a^vUW2RInKPaWkVNh0HN$?PqMy##kt_=w*P~4vYY%O z{9hDZZ(@-wvXKto>ftonWng!x1)d_;yCA7ecOH_j1iFHXCh@m0OmkhRK;Eq?yg$pO z?s$`b5#L;I_BQVTmTN2BkP?ku)Cr+m%3MaVu3fAh#US_oS132Hish?BozQ}-;X@1hFTjkdhSohc2nR`RlL*-)o(Eo%^)`JfqTFF9I~ya66U<>!qTu+q z2m~WQ%QM`o#GHVKn+X{~vtuCf`o`7wm7632UhBGYHxSF)dZ+<^rp_a3&RlEr785@g z%#KTB&`3O;7h9Qw64F51f_c{nz`@~8hS`iNse`3Uay3+nLU_fPAM)nvJSYYq@r}1x zuv$;nJV{{Jh{Q5~=+~BUCnAT0-|0LFs_)$9<;ta*ulw;f^@m@&;a3y7>%&f3w5zS? z1U9WA_0T%c?X216^p~F))PUlXS;0cWJ4juzhl-G*HBGQeT)?FK#S0h zal9UP33ay{%`32Yb}tc>S-V@$B>;~`Z>UCvlcEW@S_b%Zk8>QS0VkFQf?x+>zrxd+ z%qOJ2IWr~sU^`@hB-#~C|I_u-fA~KU&gbU`%T^Ozo{b4lpfJDe8@r(j{I@Yg!6%d% z>HncGug!YNZbEpF?B$=CQcT8`OZKUu9+EEIz;U=xFJ5#XctWX%pyPPAEu;=VV7wJ_ zEJ%!n^sPGlaQ|^vf#Wl4Qd9>4rQm{Ez@Pwgg0if)PQI#AqjHD$ST?0bLa}m;S=oA) zsW~&acOu4NhUKt}89}zCmt?2&HLV<<+7w7Z-;-S6&P2}~#%z@vNjzD~J7>IF^8}9G z46}~>D+A|gdIwQ5aq}@U;96AKh)KO~Hg)fXW!J{7Q=Q(1Nq@F=u{`LJr$yNqgI2(5VDYiwE=<_wp|~ z7AeaEM>f>l7A-?<13|}QP6!6>p%oXmZpBM^$OTdfH-+JKg6vV4le?lye-f3b#xM8O z=WHECWG(iK&DgpH4&63*&+I=|Xz<$q(^#|VNnqW#2f9vb8=uIheDL543g}$kM>jN%4xRtv#9soR8cwFARHzr8%lj$>1I?({m)+Icew#_p!EJ6qmJiKuupHw zllsr~f}$u9FNt#+(N#TvCpQc@pqvX_)=t+Rs1jJ3UzIt;nKFi})n(`Po_ddZ?k70J z%N?TuO`>YuCY!0%-HUP{n{zpK(JjOp?5dS0fX%@tQiSzSR(~fK1`ZX^S95P%C?4zR zo-JDNj3{8457(N8<`kOWC1vXMprQ9eXE7?g57Jn7`XaQFQL%g81rXu}6Di!|v6!5R zeRPzDqZLRz{sqJOb`b52J_Jee9z`(A<%z6!Hf$P~vq#`lj+UvsNU92G4$Ti0Y@oy8 zAL>iwg!3k36!d-pC&LX@G3^|zioMef9-)`xy$QB?K0^HiId(C1{BLhm8DBwsd#^^b#wTMC8?eGB%k}F2rApalvM-f6jX1;B7C#goWQdhsLMX z=6|pTC&Fz;va=SSzfwboW16~o-yMRX`3Od{v?ImLl%INjfzBM2M6U9`PHh@AWHr(D z8ht&7&Twc)){FzBfzD1zE%SxPTN&m8OkZHNO;7a~9*Stk%xjHE8);+a>UIeyPBBuV zs5FUS!z2JLGv-+VKri;p{Y;X-iJPvYrP^Y@3?T{K+pxayI#PXI>&gr&VSc?-8vxBJ zBby*ni^dl#oQ_#-taACe7)21yW7(@XVx^qpvLrX0SK7`DBAX4D9nf+sm7&Wb%6(q* z2U*n4X3A+o&+&y}dy*v0LcTlh;=b+ZRyId~7)?8N;xp;_%8hHxsWa+CHUi*HWNi&7 zev`0dWauAo4$*50#UjW869u=khs*TRKd}++v_iJnV_UsWi3fKuGR&%O>)z@ZnjP~o zX{Ix&7|^rfd%-|F;M+W=h-hkF@Xd1Z(_v7-I0O`tUVFGQmjg2*wbQYl@sEr+jd)__ zmUt`e_|iePZ}3(e(Pdd}Ds z84kT9wB9&)enpkliDzylO2yLxbKLt?e|ojS5-|s-dl2no+?_g0gExwcy=WneOA8!aa@oQFA=Txrca&( zWURX4pLz&4p>>S|8Efqo-K7r9D1%Dq%AN3RDTcbj;vBUNhH>SK7$=5|ia4wZmzZb# zOG4`jGtLdeAqi(REVJlW(8yJCT^`38NEOQk+_s_oMNFxkIjtN zW)vWiU}C~O#gVx@Ki%rV3&&V74}r$#B?H!wNc!vn%e$8Z+~>UDGWvc<)?fehgA#ay zD{&e(XxdnkN`WIHwUMJa*hqYLA5km!JF8TNT9uM?&UTg!rzK!8-OG!}3b zK&J+f`rx0IKp$M`Pi(Iq#{43lVv5oAORwsl&Q9WJSFu6P#I?#o zF(|AOs1sgKNQYwKTTxQu(qvc-t(LZ6(Dz{-QMU1zn!(BFFXVv-&pYCgBs%uh7n}>y zBkpy(!L{^VDOz-Q9@{Z8m+RVN9BV=^%)mC)s5eDFWa>E@%Hb(^5G>qJTtCi2r!w84 z4)5MN=wgS+yCHUdCME6Adw5(6J=Jn!zOM4?Q1^k^^DJ!B|J@$fpfO$J@{!|*ln~ws zU_w=In{LgsBy8iY{b+&e58X{TlIY>X3Fk?MhOxTix#2zHP`D&Lj>t=uv=bFF*4_ol z7KP`6+1%tV!>yY~=`96s(nPa0a&Hz+AMTlygoR$txD4nx2L}h?v^_L^E+7et#=(7V z?VzjKSuknEDMGKwS@G)V(9;iw+fu&Z+39yho;y`2&q@HluZF+M&D55!E9l2%-JZY3 zYc((^ZZ4D7V@IyOu$R#Mu|^xlY1K#cqdz&lf#b@gb5TS}wg!{bs1 zfXxK0U0;20EODX4_Z!i#U9Wx;FB#GqHM+tR>v{Xg{3B-MHb$~@pTDU8p`A@y{Ily^ z1oWF_1N&-u>ZQ+tJ&O+P9(e!pbqoL25CJ~(2Ob;K$(|&Bx9zW&YzJEvPLSwmHwT!f zl7FOnau+J(_IVcrs&ggLqWqLinY$H+5~geb2;Ia-5TFGsrB%r|fECA*Jo zh=d{9AOD)*y|H@2kV1VVb`)STZTat84ngjDC5wz`wXaaRfvPyiG@x0!GYMPD!8R+| zGZ=REX5TLQNG5_{usFYvQo#O2A8B#6a3$qn`PzWN^&ROVv(p=JzsR%N?QbX{z-^so zR1;fx)w~tOj+^U^BubeWMMFhiQx{5&<-CZ#iJkPc&#*67u^E3SWe~9wrHeXM=7f2B zbJbKvZ``og7YdZ7Ahp=~^!k7C$Dobq7auwC!&Brv15&mu%ehgKPYG^k?LQhVuzE-O z`#t5x9Q`(;8sMI{nCYAuxF+qUo@tIBK0-alF#;4OI$PzPv$sA1e7lcXt~(d zvoLvlGX9j!v}<>YWxOc5l6MY3%F8x2-nh}0$=RltI4&On?Iv9aBVyoW1bn3p^eIf@Ob0rd}Vd!3c(Io$>ymDBH zVI$|K|A7a6{brR&ukHccly`JIF0vM zWSyj#7z!7EZms|Y?Lsg{%6lYQ;r3)&s{39_@saxVQ*F_UgZflSCbAT%;6cru`NEIY zIBf&|+%}>g$pung#8AF?B z$2=2Fd3?OZzv?Wt%7K*<9I~e!dGE4Vu|uIRLq>cRp4b!=Dg5OvikQ~}c4;^KTjz=* zUg8GzEJ%ZS^^(GMR?E?cVr*c%w@u~+ynhL^cs8JT}9448&n=Y3s=Ko&l0ZNeD(SF<* zv~H&i7_=xwL|WgcuL^K^Ii9*o)~5T32JiWset@9Lv9(zfPzrzRjJf>0*by~=Yjkl! z{_C{#kl^@Wq2#84cYh;}vB8|g#B>k}B>9Yw`g_`U%74mV^`pT5bs7eMx4Mc16?}s5s&hjb1Gx?%q4@Ov)Pz)L_hmRD82s%V|J)BI$_(}EETD_ z5OGX@{0pihvGq_t4F5Y8V^=b?raL7+Wv@4QZwKe;8@;O=eFIQPUsu_UBta0<$_3iR zT(FCba=Q}BKmJ!S9T|d`Jg?m)DB<}Ma$y;NeyD3inq_OLbGf1_4 zoX_KQ)&cSqFyc-oZvf^nnN>m`4FTtN)TnAH_mMD9V))@oGBEOJ>}JC!6@^j>8j zB>Y8@3gV$g=)0p*J;YYQTSM=qu`)ra7slFOdSas27EPQo_Wl5+H9HY?>9HZJ*!x`u zgUzf6!t;7H2DvPRUiqO|3YGwHwSwR|fIEiDR-%{`A0omJjpI!tV9+cjalbvut5xi z{z*47UwY3=bU>QHRXUGqC7qVHeYXS`V;&)qqPD^@r(I?d zVMyM#PY4Yoau}Rhg%MB-Cf>EOO3K(&T>}|OMwJ=}u71yFP7iY{#ycw8L-GOsD9vwA z-c^*o+}eRoT@`}-WX|L?yG_QgmKZrAS7T@G% z28vUAUO-R}LLdtq#>%ZqXvA<8FeSh z#PE{!p(+~(sFL6*()|XL6xze`J6mbv|%SfZwY))u6nNV zF76+(=UfNxX6@ocH>qK{<36o0r&x)0Z9!#SCiAwQ$YgiWFvnQGX<0{5P?_?U+!E~& zO7?30@Rq{s&l)Ygspr$ImeR_f%cD4*ST9gBk_jikJNSejviqN?#zQ8cGE#0$I{=M+482}n9lV6Dynp2hMBFH^q!F^1T|$Fn><|VF=nx@VE~fgJj-%1XcBT=&IC0( zn7S_Saft9{UEajhw?FU*knOiPQ!96o@1AKp(7!FZo1r6&&#B<4%lfm_yYu~ld9*SJu4*|qDUdun|9uc*uP zxqL@b_N1W2mL7XuV(zpU6~_Jq*GO!9;O76F@4X;tqY!Jf6bHV z$*@>|_=Yq0)OFRgOV2r&%d4+hPZr28bDGF%Z2v{p=O5PgV|OKoi)0` z&$U3mHMEwdw4gOHM^R82`ic{yQ@&2y=e`dPiEfs}O?+A|07FE3LQaC6_39zp-#MVZ zE|mYcTpcBpadsdCMC#7H-zS~{n3%xr0|XNRZW=aW`#(=}0{9ZCZCYDFBI~%h7`(8Dx3OhbA)0r zFIibgY?$CKCCNvyLxzke(Dz@pnZ*{2!7#Cxa?qNiO1hmp8U6@dyhKbJHn_t;GEeUy z+c9MvCv`F_?&<>d=uh{;l6C9!g9Z{eAjR#Z7JEZG-e_l>N_6hN_;86}=zT(zo8vQbQ$4p}pQR&PgCOAI*WChCgVrxwgkO z$t0}9_ebf2WPQ2P>kf{Dapvnaaun&-MX$K`aY`HtlBa{khFMP3UnoU-Ec(R~_**?& zh?6&11KwdbXvaiJfQCW}6HWrV;ioO!K;T@4uv-jG@KM5RP}QtT_E1lw@-C!m-ySU& z5;gpVow^)=HHGv#8s<_huhj?7*-s2w`KlvALVwFwZ_(Q;l)M2=%Y2(y1BP#WL6q2N%y8rDpw!y)$GCL=W(1yI+e=+Te{!dJuj`}^f1Wu zzzqcYyi~~^{!s#2Q}0;X`-sioJK|Jqf==xtyFST`LBZT99ocd!kOulw)OMv%ugWNPMJ9wsOd0L8&kmBEaO<1kl+yVz zhgC+3Aa|SV>O4%dJ^7YnAob{8+lF=lsdL%Z#E4ae$&kIyS9uY?4a!f(P7Y28JPauz z0g~~`o&ceCZH zK>fBKVzc_eXS&M;AR#2)r4^OWPW`-7V+TR*uYhnn_0ZwST#px{!lQ9vZ)jeO@?e0RKQG-0#{4~q>zK0R9x^bc9)`pH2UCJ45_ zFJ?`d!^DYl3RDK9`~t}P70TM(y*&SdOB=S3Hbj>Af?pXz5>(=`qU^H!n4g|3%=Op* z>5})o9}@RJ?XMN{!zsV0_Vrmn_AM*D3Hc+S z^T4JPQM@tR=-WwcCF*k&3)Gmt1Wt@$$^a^nm$29EqVFZ*m?B6SdH(<*IP&VuDJW7g zQ|B2F^8nXk%3Ozuq=o!h^&!Wx%P`H``Q_qfq7(OayRJLq#Cj7@qzm@;%B>AZQ+G*m z7DXfyXWsueyC$9Y0`f*!=5NR2-+otse1_2gz6t!y?w2F`01p2*EBfybW>Np&=GOoE zI_!jh77hQ}O8ooZjC)8n_rE^(um6u;6F71I{;VepV7pVO=cc+d?G475vYlE|*a4Zc zk5 z^&(p(By^~iuglkB44S~L=WfJ0a}hnCg(#1Agkl+Wqp3YdVSxQrgid;9-Q6_KE5g!IEHseUwHv`XsA0fjS^n-0w*-k`L2-qq= zze|>DHUX{YnSi8xy+E?O9}{sIV)(h(E0S->@_>a#7b@-CY8@=2rDen>RU<F@x;9k0+&p^dsG?B>PL?O6=^i0ev*y8cUl)4LCt-(~mJ z|73dW3;SQt7H{77_W?71Cfh&O?wd#QOZmGSH~M>3O0C)t`OHv}0i&ruD+P%LWo##1 z^^qXh?WJRHK_@IoxBLDhBI?8IiL$1n1DFP!Za;M&$9F!=X1E4p<=3aEdruV24B#3?w(Pdo0lq-DMSIh3XW*;< zGnzVC{Nd3jpA+n zDYY~xxB8CU;($(>%pJqan&FP+xu5`<@!;d_{FV5}@;sYUmuNSs)IcY=)QU(<8&EAS zD42^jo8Es_tx_CZVX|{~b2MAP@>lFc&5|7GMe%AWSHgFv5%ZB1t0$blKyzlY)DEo! z*~0^0&wgV;GD#!g{!GWDj9fq2o`P9{Rs?E?*@jQI9hWTwu2Wy>2v`PEC33+WmtyxS z>Hi>}WhNz4b%rE2h6VCoA`R?VLGmtag1YD1Ji@Ne(#yB^m%5Jj7G##7MAJvxyk9$` zFMfHQu6pHLT!LRKAV@WG+`YjdQY-nI6ZjiEdJPg&aTT4no-AyWs5`(Dg~_g|${N_O zuK;!8vl1G>VUYLG^7l8WR-7|7#Xv(n z>;&fJDHYjrm|FD`W+GO|0e^%je$q9oeGi&L+ z`SmsF3%l{p;$lE*=aYps7wJ@UUzgfpvA-5g>`#-(O|1;05EaV=$fjD#xXX{PKDXC| zTkD6MT_Z7+O-qJJcSt;U4no%OXvIYn^`jBBd73?C&zxG>Epan%3|lo+>7w36dg|O`k3Qtfu03S~>01DTDl*=+Rqe>~>lLv@g)?^MVwJAf~wfg^4~-q~|-wv5&L%%h7F z;S?}(#31F&E^D#(S9XK`2bgJhUdy#lDR#wppHZZs+y ze(pV5G@{xw0TFgTOLu`g>3AXOWtAPj;*~KL!(~Q*iz@KrmiLFJGQa+gK2ziUaY}T@ zTuV?qbQhDuxWN7QMIh{mW|<+4rCr3+u@=&WW;m1HWZdM};m~hr^vkNN5};AYu3N3Z zIDLOsysf7uV>I;jf6HL^SiC(8k3K7({1)HriR)sE>ox~`@Y(79ELo$)mnE5F!~+&h z%f_{v!a-&&&5OaAH_$9)IxK*Z;g$%7u88@+k*kkW^iMRbjMT4otby0!QD5w!l`9csXl(ITSpp3V#oQfw3wN1%mGccs zzsVWeJf9aQDbs$vA-Z?@?2)%6NW)R3$dThm}jOV$e#3{qK*j-0YFiib0>9DDAV_U1F~8UTMWh{rrDv}7Z&LSl;yKSRf9W&yMHNf^?H)xPOKA*fgRHj zI<-*rv5-}Ux7ca;MdxK~83|&tR;x~V(_n1Zryx{KfZp`c6hJOdu$pqqKg~36{cr@? z!&nJpdfOpjs}u5Nk*qH*v-I#6v%o$MUwd|mayYSXw06t2uN^o}FT(4Ob&L^Mht&BH z<#Q(_a3U9gSDwM#bAOkAgYZ?_`D;Lhc`*W5x|b{ZzC3;fh5CIt{SPsOqolD=WNa~%-#KLbEzZ+C#KH=IsL{pY)iYk}rS zO1yb4!FRwb^WeW@U6`Z&A>j1#MDAGGJx;M{Dqn^1wZgy=^P+q1c$apwpzL&lXaj&GyT!#ybYyOc4rOi4MshP&^(U zH$2y(>90>&c&OVd|F$_@(E_!5IWn%xzy`f@#Nw&Ny)y99?$YcffEsYDp=8iAgQgB; zTR0b|;Mc}yAt_@69R)f)S}9}8ls4$8zkW&*(JyhOfB1pvf*lxUSpTMJyBCF|A8GCk z?ZKN_Ur7z_F?wIE(P%9!3A)qUO#IqTFE3@qDCx|{$pn>dM{f74|8AcxM z&NZtfXf?K;2V;=$c%U&VR}~7`0DcU%D_woM!S~K6e}KLj$prHg{S9I$%F%)1I_m1A`mV{J&AhK0T zu28Mp;}BWp((_Ph?A&pz*c#4n{OW)g{YyJvLON|;9AyXRXzksdpc$>6xZL#De4C|4 z(#@cJNVsAbNKyGLYr~dEl-t3wy>|Kjfo7HB6Udj=4ASS7dTd{YHcq_qVcV0N={GWP z!ABHw&z%$f(?@7q06 zST>p+RW<~#h0V|zPRpDru$izLlrSZ*sP(InuL8>YV2!i;Le#g4%78KFdO=entBF2z zDgi&d45QBd_d)q^g776;Goe0Tt*w9#+{(1RrAcx^q?DxaLfOyr=bnn!0ilP60l>zr z)@Ykcm8Sji4O*T!*!)`9EpT$B2rfA;8#-?7Ma?{Pf>E=825{-PNpt-{wR zh)v96BZCS0SowHHQrHcEy{mw+I06E?o|`3g!hx=?T>)#V2V}mtF9fM)dzoe<;-P!| z_7=wO&Mzmr>zZByRnJ5>1W-aGiWGd-`Mo40{~ud#8CGT6wQGY&H%O_RuBOPh}!`+cGQ_uIXf;L5{PBVa(dI;?IQ;J9^XFmq3VKXgaE#`2ClSgd7fx7?VM$=n?*y(FRstU${@$^35Wc%ak5HeQDX40XxAho?I~aJk2dLv zo-TPKmn~%;T!KdHKk@N7L3g6lpBM2U4&?5472k2*KCzr>@CAA1qDdZCsQI@J43ID! ziHX0`QAMJpOLlVV*Z*Pm%po}AM2ww6q*w5j2~^5)KM0xpY$p;yk5w`0el80@T=8^{ z)JgO+<`V@&T^{kiRlc*kbG3o(3YRfG`t5mz4AP@n{7u|fH=WPHwGeK5Dic3JsYIvb zo%%}1uhvY3t(`61ue|+$@cKn3({a>FCNy zJETn_KgN3Rkk3|Yz>m}yTp-*8N6afwchK5%JD=UEQ-tG7cgRPA9xlRPFZkc_%n7(& zWZhmzK391uq-4Z%l^YPh4;ao%2(jU`*TUvlCc1(aa)!&;^fNjKe$qndTGbJaa$s|&OUaN0)VD>JPu@St&!3h;j%-U}%VmMt%A+w7XSAaIGdp*C}y(Xj8 zaEElBoZxZWkB>DNd47c{Wrkvco zfyCS*OcZGU0b`TL7m6FI5s6(tTMLCGsY2*c+I~bWUrg5fCg9toBjh&VH==KhIh?`L z|HCNeXqQrYU<0eu2?taeO5YqTd+;E(;J zG$NReG5jeKo3rmxP@(n+p#9jCJ_gj=Q##+~o(V{tQcs~)e_j70fvGV_YZTY3%QI7D z77FB5@m9Orm%As$r`f}E;j|vz_`P7{dFiG}S^}O{_9o{Z+eL5Yyf1>y`E^5q%mgqk z>>&kiEq?dk7pWOk04K@i1aXHv&Bos+q3;P}Mr}K?-6hE28XuN6)R1bRcSYlmi(ChZ zFtR9-C|zp&V|Gu&#$WlOZHN+b7J8`_&KSa${;xSx$iz{GJy>|CRQAXRd<0Ri{u&}f z%@eh*HV+s4FQ<+u`6jfH#pn>|_$H-2c=F>)bLJO-ch&yzF(m%`cDsMg`R2<;T5D-! z$WK@TmOB|3L_&qY*O`({5mAtT;9@J<4%}^z;v<{Nz8{3Yo6gYwc>QG!5A>7k_IAM1 z)v$@Z)c2o~VO_XhGQ8waKsenK*~DE)Q5;<+Av=^RKzLA$*CUceuArQ1=_UUgpbT-_8Gngodl$X)78Di38LPN99LjHa<56_}_jm zf74{v1gdM2$RTD%!u@Qv__m0-fLhqYXZ%0z`EtmQ54~lc#G2zk2 zg-L@KR&!d`8Fpu;Y*yS`lO$(`D2itA=lYhn=W3T&77MVmQ&l?{c@kK4FL+IdS2&oKl%&g0LLQOVPQFf5az@uqON{m$tK zuShGV2~_}kN$tM(Qae*m)v?M(UtM)Nb4Pb$vhQTp5;qVwSNC=6_f1Y7VCwF7Un(?R z`Mif0X?C5ukYf?a81h3Xb$m$j!AX!IELc9sl#TuE$oJ$;5igjJzd%%D^-6d(d1Wn+ zq<)rw;*FYl0MJ4CCXfbF?B@K7$nnmrsBsc^xWb;!{l65Z!4$==sK{0A%DYO}Cw$K! zIPyT_xjj%!gNTlfL(u##Q%LlQ_T1CUl z7TGBIZFnXy@5LuIF7Df9#aL>+nC6B?b&w+#VzDzx2gkPp6h9FMkSs zagEZlA8Xl1GF~&RdD99Utz(YA2cReGa`!~|3Nb#SI`I{Dkoi2UbC!k0G9l>Rtv}Vo z(Mxthv=={Q^3;mAxQ#v{Y-((3IZ|LGV0_E-o;&p)sj;1irmZP%sRfVU1AMB5c8qokI!@XeB{xri07C3G3T(;sM#t{=Yxr0qt$eD zzqWehrhpIi2UWuwF! zE2XWjIegbuliR^5j#*S@rC0Z)mPXPeYx*qH4O`wUkNki;8M!cB@_We=Rmhiw6?Fbs z3R+oDs7{Vs%=1gQZVG+nxdhPxUH*+*jV17&2}iZ~VhPGfuwtL%!|A@8oR<{LV#y07c3oS-7IG2cAsL6FS(hgt*3`VeGQX8u9|2>GGgc zSaCtZjZ=R3lOFmPvTgwGoY?yR((U2*WULQgX=~RuTm5vPh&$>Rk7pMIDP-^tM$X>& zFn?6)cMKx)-_Wi1LX)Ra_nWLbLdY5GEH@hPrgyQmA^r)TQj6cN0uMvV&85ftHdzPD z2dlwjhzwqc9B+gjZ0j8fZoyI9(c1>py5gVt(8HOcpCUF>sNmx*=$AmUmO*wc`MGyE ze$R@UT{DEM1%v55bj%Fz{SVQb)Q{H&l*W;ZZe)%{noG^C$h*QVR{ZgIdc(O2#yQ%B zY-GnMtzTE#?XN*rIEQ-+{R;lMR;|15_PAv_(%-JF=zMo$PU<>F75e2vH%~A9Nnw0{ zfPyUsh~^daCf*6Cnh@tT{EhWcGexbj-xJey7n~dVYcj)?@F;5FaM`s5VSLnX9+EDG%fDYL0~<++Z(tp?eZkp)9x@4U4;bY@Z7Gu(KDD5v;Pc@ZW=BwFfdm zH=YTrpCCc;h?mBp(G*#wUkG+R8p)i9YJ8<%Lhy`5<@ZTYv%ve<*PPfDw&$;ec~=K? zV^oJ+y3uL9Rv39r)f!R#Ls~-QGLr@8=sv&@jbmE69L}zlDXUW4l`{6#x`u}gJnSYb zB(bT6fhL_ndetOCz}%t1xZ{6hQS;{EstD0ZFY^1~n1Dbavx$t-4tTol7b?Az@D6xe zP0o4mH@3%eZw19_g4}3xAd?iH1HzOzueVM~9te)A9-WHc(?5&L;Iw?Efb2J>lCAmM zwDGjku|7IU2Gwnf#&btH*W!hV8S_TvVyNFX4*X)!^i;m%o4WX81?7<Eg>aDRHQKBJz z$2`Gcg0>|`99y)?7(YTGT3#Sqq`tkKz*wzIu>ZN+zfedbKQjFRFh<^1C?_h)BCc#n zu0K*#i@G6-yic^m({VuU0w>zTYrDVX#(a9m!-x%|IS$$YDR@4)3dO@zn;SYg=RWL7 zxW{<4BlA76am($+p-zinFEh(EL4JrAY;Ht|Ls|d?sKN5$_hQ#pqA|$e&U;2rqnad1A>@ zRa7dZ!w`qhE@dNKy8;faPnHymaj*waiY9IM^Iq{QEy_T+MH;?+L~dld0RyQ9CCDz% zQjFgg`|(dNy+hXA!q(jeZ!pKvC}qOx1X0~kv%H~hruUI{LNEpxv8-f66K#-PR|e_FWp%}Mp4thD(LZz# zk~W(TOMUh`ZF@=R{SF6cseWYqwR&FaC2q+(MkMO%lH%W`A~Kf zL6Q?*Cf$;dmbljU+SY%Ay!|0oo~LpKja96rGD??f>P*~X~Pb_f$DanpS)_I`?hFe^qJ4R+m< z(pDl?IQ4QDUTjJw33^5hLzTQ!*TmpB_r(B9S*u0Cmafl{f3^&pgz-WsfYrm!P0?zu z_Iyoh3k(##-RF61IP)LjKbr=fLD#|C5GD zz~lZ`i~otPk@YJXo(Y_OpCKxb?zM$_b8E_EgJi6Z=De-IOUIE6IR$W*T$%_oJp@if zev#mXcO;*i?&q-0uM+VWeZ25-JhDG#au&E?BgNX}odVzD$;bcr79)=a;n6o>Vk;f) z&Bklb>xxhlYc1pYZ|Bg~UXMiSx;ywAx#jKgP#ImBYU!?RtO6=)XTph z3Vbl)yW2sGw~GC+re0YC*$449Y=~1i+PQ)~ZK~m4j;+OzJmyc+hN;NF2$fa5RqM6f z7Ko0lrcF`Iuw8Ed=(36Jet;!vW8gR)l#On(l{!Yg@3MbO;x+)L2XUCK0E@v%qZ{kB75L)#P<^2?MBlpP(NR2 zaNve$_9jO#d@N=kBZ^NqSKg>`I02@WGV=-f?cr*o3vq<&yU-9Sq4o`w zIIw&G{q2b3;Hx5~w*k0n=g_TbePfU0gS(qf-O((4^!|xU+i(zI0BJ^hXs=wV#NPkWwXp*)n38U~%bm!S~!)e9m2X$G$w7f(K! zUp%NnJymaY4-M-eD)*s_gL^G;&t;blv-1q-0Jc+SH0YHCv1@nQpa2p_v?yg@MgDiD z@g<3}o<=4oAYE{vi}BeYaqfyA2Mvw*c1hMRFq)ZI^+{^2^@X}4+Ts_T-6VR zkT2c>$D-w?rghU4CunA!I$#TJ&*2&wTSiVyGWXEIjj1US?+~)So z-);8!^A0|@p=On1(51_S#aCXp%1hYuO=^;qgU3Y0K4hb~;$f}zw8Z#Q;O+GX zZYRoj8%HXVnkkH~3G+`b3o2{oe&odtlN8o%91exI=RVM82nlGN#uR%`l)gauh7;6u zN8TP~_MLX^za=yW1|ela<<-iNYi)dHlE^j7-EI!v!4++{(=QxTCyZSr;=3 zLDXyR9?tup%>P&pT=C!E-)G)(gF2$oe}i)W@_71`K}tl4G`%_blcK0N?pDOUJCyt3 zDxubO0Tx|bdb7WRx_q^LV{@=SHx{T~LkIh>~u$}?o+Q-ta2X4R)09ory46JO10PCBI&aElU?(ut7hZF`f z3i+1uoPqDT%GwfDHGFH^MdG3KqNh+cKOSam2CqOlmPTOSc{2b!aXJw76U4-$GkH3`l90hp zYm;uIt)VWT#?9mT*VuA42SyWX?@+}WwJjINqFEjuXnU`qZ$ba}P=lsj*7*C|k08$d zY~}0Xt83f6?P#k`w~tc|2Dzj@g_)%r@+ZkW9Pfe&gFypMx+9c|v*4-MoFA)jTZQbs zYys*66X5+?D4*~^Brh+*7*%z;qnIGDlf4fYT$?Q34;nYc)(ch|jrx|~>4xk#x1Z4x z7e6PjXs-Ku_G2fN8s&knNFY0B=ya{D3krH<6K&B=9{CFQnKODrKM|i>;23<_e7iAX z6bDkdbj%Io)Qvm2{-tjCw}%seh=tfCeHM@Z%3J;m;s4|f|MRs|FpdF`!u^+Q{O=cq zuRi}j5yz$ScR~L_9RJt%)kXdDS^W!&eD3p7KHleey-#7PbR!kGV2D-snyrs862*z1 z1I4UK1Y|<zD(nCR-F zF$NNHslYln3^Ibr8qei&3HP$i)_z05)Rp7IZ$WLO$K89ZKsj_69PL0N{G&qXP(Ze% z?!pS)?AXfgway#Qg$;@YP+3(P*oQHIBF?9Do40&+=MiZf`icU;(h2g6+1C262!X^R z#QkAKb;Sy&;kZoC>&5BO;M-5KZP~&NqR#|epws29$O|{0x->vCT((QO2EX1#M7b#9 zj}me}bgB?6NODa!@+j_I$ODvax8ny)8SxB3Rn+0fYKHvknN%}-z|J5 zklSl;)Gm|nxaXIbDwB--wY)ALOl?$>xN+Xqj#yC(ai9m!?{E>)>B^mDtV%;+^+!}pyVhas z(wDP_XRAf-*GqtBDHs zvD}-A0ponn``Hq4h}p3`I~x7b=lds&+T#Tlo{{_cGP&V4`NCCwapf89!Sf#v82g=F z@7nEG3L1LtSAu|rDyQy1nlop@eTj0lLD;J$sz!c_jUZcYC*;$gub9e0zed0e>efGb z(r-pU`|oQ4uiGlMfYYJp0sMgRN?V`;GWNcs5PDeR@!IQB{&+#yaMiYjmZ7gP<$8~% zCt%HXc(4VIGFkQ}HpDoQEF{%S4LA7b8a=FwxKLaaMrfJ5$HcW`{UxG=Q+mK9k}N(yL4Ka=|Mn92D=K$f~bT^A|`s#lgl0kyHUNy8rxSE zQ`L45P7AGYKB(EhOtgF(Ng>v1Cpm`MfL{BQ{h3obNsvZ77%#Z6S$MecK<@`BCn?Bv zm3XaD_4I6WJo>;ZgrereuJNwI3&1W4kFw@t*5;f;QQsfK*O)>|ppkQly|(A`;Z;wT z)4LRGhZ~6`N^c3g{CQ z;EU4S6sA6)TP7U=pw;Evud}l8cBWg)#&5H;kkI3%zbGq|Rdh5*Dca#+zM!Q+E`je{ zZ-vxzi312zo`FmLJtf|vHjm3{=4gQ&-5*VfKR~xj>ZrBa9~q7J^1Y#(U5IvkUP~|m zBiGqJr{Y!viAk%U{Vhww3ZJkC{2U=4GgN!CZV(0^#0wK7eh=Ks$<~dU(m+#|tRsL_ zl~n%3FVP)_RQmSlhDxO&tdUlwChwo8&oWx27Ce0}E;+>_>%CJvREYwv@-SDAP6L!= zM!Pv4gxWvUSghN3byj`o1~?RBxnmiG2bC!R*tMx{<_S23Pe(QSQKwxB!b1OaR~fjf z#(o2bj1v8W&Yxnp7GvFt4 z4yMHp6#fPB{wT)GDZ~k=2F{z8R$C@hjr++K1E|wp;ReQ z3;ys@p$)CP8V!Fmd(LzDZ%7&v&eSFMBZKaZtgZ8D9o#}#-u-+b{$Y!^b)xi3SNp(H z?Bx-jB2bPqME{sTjKs1N&rx=RUygqclaP*JeT!{Yl68n<>}2#(KEmfw0t0cdT47zx zx7{i!6_Ehp?4!x-40}ogi&K4??wK={*0wqmpS(Ry%xqNue6S~&n1uq?)=|rgxq*Jd z#eP2Hvp9m2=l2VXd%N(xZN*h3uUlV+v|qm8acMq)DRmk5R1e z_tTM%iESi8$YZ(gOF~pvqnNcrcKTNbI`HdW&}8vTsNceD8n4N{fV@)H!*Wg*C<9VT zjRzEkkA7(hx}NVZmw4U&k&6qtF^dQ`))t@o1j+&3k=ev~7QHpU89u6j9wcE0@|yd> zytbpJkMb7Q>G4DXkBn{S5~6Kqpc~!XET(xTBPQw64!^C?@IN+Ss^$A*9AjY82f)z`gI?_ntjT3$=JndDwKQp~`Y z?ua$t^Zo*Pb$z-tdhAmJlS+OgDyNF(;?HeM?g(ps#)?)7ltLfal)`(C*86s>tl?_~ z5iFl~GYS9c(u@jwcpUzy$|TGKdV9EKEZ)Vw#K5*ZzO4%SyL9*E`0lerC-HMRBp4OtlpI+KY zDdyh!*BG^eSUwV9QtK?CNj|eOH#BSlSfsOA50Jv;a!W++In1Y}VGAN7Uu2hd+5#h& zorE_6iMShFjHRxZI+QtVhlw>x&s_7n%F}~#^2zz3aIi`@l7vQ^ zkis}Ua=3xQ`-h#jrGkYbt{Byf$laN?uGh2t)hEaG)hAzu6@(s|z6PID9ewN2ZbMV1 zFV7K6Qiy%8d4KJVM<>-H@qQH{$Xva2Etx^rt=swgp>!@ZZ9Zn+rC}Pot~BRYu5xap zfdTC`9`Qc4H}ONv@uEz$IB`vhh(N_~y}hX~`A!R>%AVxCn{SmzT%#V>U49?>7c^puz3-I&8t`DxedDVxnoq_ z8C5RlzqWglWhTeSa&2*ohRp{Z$8%@GjPPrEC|P2eUJQYCw;E!0L=mU_w`944)a|lhVhH|~!=XP7gF<2*^lV%Cj~Y?RV?l7-(qlN7 zjN2(L!cgkt;o_6g!in#64enR=+C@_Ex}yckNvdx?m)mqlWM@@;NPSxkUEeizsk8+j zRyJvy%0WjH**q>7#@838k_J}!&&jrX(*&=TQlAkp;R$FcF-6nip zFdnGVeU$V`4ilpmx}Du1^oL$4HLF=v>_#KyEnE}G4NvMcyYT} zT6d|XZ8VwJvtn=ZGnp=ih94HmA0#(KCuI|N&dne^UN+0?6`~930W^$HZ@){$z97(6 zv2A&YL2JXBH57?ym}c0;Mvg>Fu74_*hBU0sX1sB_)jj*SJ0>9i*E}pUn{=`|sE+vQ zyh>6ih0wC2o1A*dlW^9U#pb_+v}ug0dGeqQ*N=~z9KpjrJH(MVJlJi>dO@MsaFb!3O5)6;4VVZfQ z;M0ZLPEp@6d`SOv1@-MgI%xGlJGR*jQ*dGpEkRW@@;+{Ur_>u$Tvd>&Wsp6Pl{=6m zv=Z=K7UgrxlU$Ft>Vc@Tb;uHG7}LdULe9s}_`{IByrR9w>^%8Jb)FB@)7`>VI7t8I z;LX3=a;<#cf~vB#-8~zjMEu@rwypr_27ceI>1l#|RBu^o-6DfunV{R{prsxvqdFU% z3vLoH>=rNm?PVez# zRtExTVN|@A>3cH|UoUJ;?5J6kUym(1z}2AfBmb_S_<@$)KzfFRKOgg6;ufc&V8Bxn zzNPImlI%-SE_bsZ9#tn`$U9}o94f3Z_(xfF-AQ+U z*7~S6M>${oCyltuSpghbiQ;|=lbwaK|6T|_V}kj}L1WcOHltl9`EsesmWn{_g{q5! z(miGnOPG@E{=rOR+|0A|dfV2F(+d&R+CMuhdYxA$hxF3%Ja2nGBxsyPXtmkxkYwDp zqC|oWLwlA6mjOykN=p9I3&tU^`cKuNQK(1z*RWN#=CB;^LSSbkHC$>F#>tk0XSOLn zUs2RFj0DJAt5Q7|yL8Rle!lG+pgh&oisKi#LKk8?t>~fO^woL5GWnE*4V(^!pGmbL zoDI9~C|jrpI3QXw8a_+XA8m28XmmavMdhklX`?s7YFV;fs7W93wv zg?4M*X)PeEEsi_Oz6Wdf6*>Y@%W!11ReUjYN)&8mk+-g%4ipCB#6aV>$;bLe;=`RP zsNpD2mMh=SuL@$>H;;NL`WIbp&0g8XUu4l_ji4Y{N-5i8*D|@?%;C8c86s-3tT=CW zf*D7}#=uTk1LLmTtid$3T}Afup{CF_Py{p&YkcCk?2B<7i3 zw8whQp#&}%wwtcDV+NGOT)J=LXa zV-KSYtmCz)EOm^BniN(M8!kexoLLjA$$Q=FWCbD>lO1~B$<<#Upsp7?5>%&q(qQ`y zr#nxUUAaYd;topU`Oz*%EFLaICW*RCw|FAR&kMVLTECErYriv-COR-=$z|$rc@v*WV;$`jyW`~=M$D8t_*y8EhoQ&IVkCwk`wA<`>U@!2ah>6!K zJ|@%o$>%VOGCz&T=)SkC`8t_g;r%iR|4kov;Mn5o+XV!Ax`D4^ys7-~NE18U1#4a% zi2}7eNr3T_Y%o^kBCZ>V=Tyf0+JDkS>HXHJC`K)^nu7T6K+M^zVDdZM%!}bQ8Gg?v zDFH0UL+n)Z-Ql6gb+pD~iw&v={cmI%EvJRIM!u|GZf|2YMTxQ2z&I@6Ivo3^$IOIr z0}tSHZ4$l_Y%pmy%IJEOc2<2JSm{tO=_Q>2{nd;*1Qm6(72KXv1Y8}D?LE2B8zlugs08EAW*K7g zT72O8Ke?!@Me@?}ml}5E3}280Q?dv7g_5Rj%ev(N{9h`HUZ5&dc&^x;4XH_3z?c| zlOo#h`bf701PSEEaII{0V{*wOqLscO=-NblEKpsAV1ubha0X_L z@B&o@2D6^N)9iDQX%-)*92y-6EFGp3RE$%vYyd?u69U z!aj8dY8?JQ2$|(|S=9RiF&j0DgKqF^`A35Tp)Q!mxRwae7jGh(8pDV_ z(tX2ds(p~;Jon7ZVR4JBPI!a*@aEqY>ZRc2?J;2;0srRtWnacD$HIprVHTG%Vz-A5 z&RjL;j_syYap^;|P9d&Zm1Ki62Y(Q!V|^g9FBH!LUnGYY1j!sr?phtq#`h_eP7ZVR zjx5D};j_W?ZIQ{Vs|=*d*u)=N1Iy{0EgLUDNhXrLO(qie_=Poj4uH}UVF8?7NgeZT zNp)@3)xZ6Gs)+&OEo$~q`)8WXtS~yG z-)ME=OQ~_*-cH$3CO=x$D(kn$(-8W83n;JY+FmSb_^THdXxFwsd@jMp_vnN`7S93$ zG!CvGN-mx6NavaJlmoF&=WF|db5s7N_D<~_EYa}UF7qs~V^*1t)X>^n7%@a#(#YV_ z)KqMsQwfrT3!f=U&l0bI^Y(9_db^7XmvMV8Vt)D}(%ZiQ)CU*}iY!V(0z0`3#$YoW zt#FN$dw;Ya$m;Cp&~d_7LE4ID>Lfj>Kf~;N_;p5U!6+OEea~N*jHL=xIwr}-NQ17~ zM%P2p(>1%DaZE=_W;Kpzm(%Ue;~z{Xi~62Yf;QKu3_9PJm_LS|V*)6ls@eQ$fTmcFO zip;(Hcr>@DY6O`%N>*9G>;F;Ih4!wpC;F%bh1k^(o1#@hl%1vGwmkNCrCF~ZlSNy# z+ZuPoa~Q3TCt{&JIkpWLq5O`4#6zqey?T;7ena_aRL&RuGs4Y*C zN(wd*v-3H`{dD*G3**gW4IC7RvF{4DEm!>H)CTf3H!R2P`tEUHY*5Y3szLjd6VVG_ zER4mznce#FU5Y@&gv1Sonet{7gqerWGOGiPF^f(?*^Fcq4yI$i<`eG1g_a{Z2nRZE z_4HW2NmkLR7!{Bi$LNtwUx2cBpC;E4VQyl~Rj^Ykw!dU8YLJtKxllshkXJ(AnC(4^T_=0zT_e9M7=c?XkPBb-L%BOuPGnn84UI4^= zV8XWEGG~rJRB_HCBf83+pk|u9TRdLtsVkZW?dS_t3E$|@#|tfxZ8)VhalMSmdvKl$ z9WAnzBNa4LEJEeM5PsX);lT1oCMvCo(W|Z=r3o(OWAylK{FT25yfASnl|V~syPlrS z@#0b-z9zBLJ4NXNRF!gfKIfnh=S=o@&nNi`P>;H;ul} zb~rsxfN;RppIJ#BaT}TlX;fcpu2@Zax>z9|6T6nghIhf8b!Kn)5PLy<_28}}#Sby~=VDs$^ zqG?k$yr!61C~*#l8iLH12_(pn5EK^Q_|IzvQ}eRqiz_#IkAMsVF__Q{NRHJqF$$c* zr;!ojCUhL6UgEa~HdFlEs)SDP&R(Eo`-jbzTvB;tF5s}rbAcwMs zXUZ31KW(hlAw)hK_oXjUB9v0lkVOJ6fv|=NscM8>={AYu#&<`en5V|IQUV^Wf^GzX~xu022L7T{d9jX?JXn-b| zlzQnGV=?}U>b4ho*e)20Ou6<(SfVs(Ej{A@?%~!cExKXTx2V}8vl(^v8V5UlBM2wH z)z>5^woVqF@j7{FBky0Wj6wTEkC@kCH_O$C6)C1FM99Lp!C`@>O=rFUsX^6}pbRriNh>8N}!@9X22`KSf@WVCtA^Th^aJtTFctYMt3 zZ9WM7wa^d7MFdo|ZjU3C3$|GO@v(tUPi8{Wq#JBQFy{Tg+ANzs!zjuo`5W*{DL0BD z2y*Yugq4(CL9$2w9iOpag@3$jxW{Bh>~~{W!U|pJF^Slwe^|)uHai9dGNwYyAdbpj z7QqeH?ovrt`nFI-o;Nb$FriSQo%Pp&0I|%b9lV)eDT&_w2`EFWw5 z7sRF{e-Qr^=mj$q#*0BZ#8}L_)xB$eE?yPvWj@Nfh8?+Y>{fL<&gVAwu7iUa`+#db4i^TvL9yNAY>_uFKlBLDV>owI0XGi!~ z-C2i7$IF6Yd~zB(-#tr-F81fdcyw#BiqqiS*$uY&1!bNcKdJV*O|HXbS>5nO-?*w| z_ z)#6IH*oov>^+fx|tBtf1BrL^r`Xz;ZR_2l~l4dLdB_elfj9m;*J4X2a#86xsz6qjQ zHtr+N{-y&+*-pF8IE@76`BVd~Y}j`XWC0rEe*S$n%h4j#v)%dVgA`0p46Tnwwp+u9 zIch0rXFKflYO$&Et!a3R7SeBC1Xz|aq7VhD@~^cKIjp=$(6QV4wZm;XQswZanHe!m z$;TAbVr+jly7U#yL9&Kw?PpiSh($n)R6Hr@CWdGd6dzx5u)VJ1{~N>b$ZWKbAtHHC zlBSzSgTRPX<;P~N%k`>M(b$Ay-CHkr-ouiWX2OUG5I1Y+OEyCzfAg8rRn93<*=sL5 zLpHH(Lv1nddtsw`?*#YVJv!{6^=po5;oFX_QiaTqAehoAGMz%LErw5-bbi=y*vItQ zE3;3LQH%3rWFG1rjOfPlnoXK(pK>)ul)$^K!g;=P_dLlJAFt?@Jm@hx&m?>~D1RC6 zb<|V1W3dv*e``PDBDjMvphB-2tLn&yh~MHtHJgd_P44RIZJM@mt51f7r*iDd8wMvt zcR^<)vcT<8E8*Oz!nCM9ZOxMWnW8AV*u#D=1T9TOXZcm1e=Py;iD0S#n=Z17l=bW-zIC<_&ts_S4sCaD$Q8Qr$&{#p1gtyP zH~gi8oYe#VVqSyJ+zoDjt20TH-gTd&Ot!=sU%dY#QnClBI8NuWc_Guu9bfHzV`t2w zU-%&t9Fl1#pM2e#K&SVZ|Msb|phwS(FR*5&Mj3fbuq`dcTheY$f?@(b^l3cirwk~f zyksDuB}`u9N|a89CC&1J&ZF@+cQ_pk{PDVs{JYW+(LsDW>yqC+@>PT&Orqbzjgm(% z>!^$}*I|@dtJYx@EdsOOwnfGCeb21AjI!phP`a%6;XG5B?b%}gqdpQ4ce!!WHdsL2 zA68ne+ovanSn+MX%9f(saxS)~PT$+nv~T*r`M3{N1%|}`%6hg=ipqoI_sDNXK<}(} zvHu`$UbFP9dzY9RDyr)%i0&LxH-W1H`$w zJ1&0P5H_*KK&IQl3~Gx3radT({2GH;Owx@%7RjG-RX-m1eG;Jbup!bSFsp44-Xy zyd~h`f2j{W8prg{K0h5?@^-^?yW4wl{yX7*t&ZqaB4WqoRi?c%*-rODVi}M(7xXxk z6b6PfYw3lEJEjtwG|`pYe3l&)d-W1z%)di>3yN~rm4vlw8zKXn~GG@G%V-2 zhzwBc$UJ!Dn;2A#4E&k%n`(OVXRy;R;tmKIr}DP@b)BrNxcJAw{+riMVsFNoEK9;} zJ7Zj%R$7Lc6PJP7 zu?QYUpPZ_FWPZO0c4v7ag5#r#f;46<@7r%Lr2Ly>KT@oaly2*YuE8>6ltU0#&a$!P z@+HcHC94Ropw?5t(q=N*7ns)zHq#|%*z>6`)AjJyr%p1P`{A2?s8ygK3R}cCDX4Pg z7*T~zr^d}owao7+243;;2kvUUGcw&HOeW%VkaQi^9!$06R6^k0mpFGlG;D-^wdkJ9 zv$Y&Jw$EeGHJd}1zPb7$-Dst(H@``5YQH*~M3-)J9(i~F2L1R22*q>vB_lk>w8tJVKN}?$AU;ir@66jI9*6}B~=AY8K1zqiq1KIsCGQGCMDs` zRyWcG;4Hc1Q|RqHu2TmqnhVJst}<(>RA9LU7EnviguKaeDn4il6)6e$vo>Cc1~%GD zOP;@JKBM&DvFW_vy?r3~|AC9~A5+|n9@As3C(f3_8l;u%^VKkz*5AoR?^^~lT5}z~ z4ArV6c81sY|B%P=7not##@6Epi(W2MPecy#E%B$!}C7M^=}nn(vSw zLFnW0%fkTjn_bfCJR+{q2FsNXwI%M`W+w}>2hyAUb2MB@##h;H?Q3snqWHd&A6`*#J5j~vD>c5^7Hg*iIow`Sq^TSK19}vW zpjUF@e9;fNVmX``?_;shHR^bbwx0N45$UkDO)$~s@%Evdjp#msocHVZ%18+8x-CBp zph%KJcee=E_Vg%)-@vcy!h48!SLJXPojCz3^G$61#;v6Ugdzp6w*fAgdI2Wwh%DQumYRM=9PBubn8e+fv=a*cD zbHrVxf4?M=OE@B_3~*TbZA1!jumIvC`aVt8z}Aa&fZhNE(-wr9|8_aRSjk~Ov;Yku zFz>Z?OR?u17e&D!((?3nZgsx?rui7x8Dz?`xD?#Go*q8GJHL^z_uyM7ZC$ubjR!qO zsYlBUcyfn=)OA7ZG&MHFaZ^gSFG=cgKTx;JyzbqZK<)fo$y^Vzr`gM#y7*VNbAe~+ zu1%`y!}{|yaFKZ(*2oIu-DB^L=auv5b(dE~l5ZdY$s&W)7MO^ENN>Kj1i$r&C1&zaxz zO9P3(Ch@1Ba%(Um(1}6q;t5FPPAZr9l_s6Ut1ozZy;V1qCd%TrxwI9Lw^zMfo(Suq zoeHMdXUzgzI>sHS^^B_m93+mD$-N+qQ@qVDd||(S*ccG<9m4eokyZm|%8FTEAv5eW8N?)o1@J=O5P|N3wY>pVDXUlw(4!uCw7m3knw(XlDrHZ0n4%+qX ze!fpUXz`u80Uq+H2KcP3M6tSS?Rw5w4gNgans;DabU|jYuW{3-A=@B!GZV2nb11;&L&Z69i^O z3BOh>=zG{dzZDmVxC0ZYjOpB>t)eTVm5)l;;P!Ct3#6GDN#g}6sovtBu*TM7-byEi zmge$usEls3M=WS*4^>I?u5)VsCfHyPss+K;oF|h1Frf=~5arV6qj?>0dmIn1ltuyj zFUJOq2N&f10seS1L4vvAlK4G;?e#n2S3vw~3OS-zbQd5w1J}*9K4p=Fh#9^9@UbeK zv&L7EY$j4F)SoEvO^I`bna;A}IYU6SkW<1te_~53vSSd|3huKRN%7d5W$p6ZE!mQ}F8tYz18E{DTKj=S zT&Ti9&%810?DfqbzeGphIR8nVCseMpTB)}G>An`cNX3L$UN839&D)W>`0i$eK&K-E zpkz-X6=;Buj=uD@Lt*VhEyrhj61YV|;uS?`kb|;B- zHQ74e)W(+KKD{1K8e&S-)54L3IHZ0E#=v zDQI8P-}wRD3i{w=5qlh-VKQDQ`<%_PNaYtrfyynh^b9etbqFQT!A!sUG^Yk$vBKZT z<%f5nSxa5SGaMec8sO4RwXb@~Rj63_2w=*dwGY}Uzj=@)T;hxJA(HlY^-KONse}dj z1EAzt`zSJi;+#)i04Bp(FBlJGFdxB>BK^abMHzvY?zgw@_3uAd ze|Cxm#2gGI{xjk%Tm&ho!Kt3Un4iir3k=T_iCN9spPwPofXLS(JW-M_eWA0=Sayqh zik|LY%k7Ql6DYreFjI+##(`4-TBTYH>T@1@H3C4%5r0~I`!PQJXr({v$c=|j6>Qg? zCJwV29XZBA9SDf|+`{Meb=EBS4`rLWkvxqZEp88%sUt)S1!`g(7&CXKE8{B6?)~pE ziFrG5lB0J7co(?xhxv>0 z>Z!wdMk)3lkX;$XJe4Is3H4!{)*-k!5IL=j{;0}EG-f{(oYtIVvC4I*#GluspW5Z~ zMps;ZI~{X_Ew}{A+U+s3PU|?4t}oz_D*t;`UfMhD7*`EREw4Ow;3W+K|G?-g+%Uhy z%g&2G%q~B_yO#&6k8@sb^|Q=uM-6mY4-mr0Sa*{85faYh zqN6{7)J7&|c=es2Pj4Tki4Hui@4zPhISR#>_}c5&fHTjZ;;F@PLT$mlO5K^XmIZ;xv-){?d5!to zF)aGa^Xam;Jl#mUhM{k}HrDO$NQ26)tL z#-+8utvnQ$XZGnUEZ0ra4DqOCZuZOL!;@>$2H8jCSyY}4SbJG{

    Acd7Co`EVq3B&Yf0lY-f)@S@6 zoJ3&~Smuk>uV=Pr@kV?0&AAN25t4b4yJI!|B|B-*4^i|`!dZ8vGQZ_t;;a6bQX6*U z;^8T5e6G?R;k-JEYn?cwXe-bF#m#Ux53477`JX&q6FH1>tOx+8%vR9bq5@vQuG<^} z2LOda;>YzzPnl5>_Y>=QjQYVbR>7JY`r0h^BwitbmJ1(OdCcZh^=B?~#q0Yb-0`?m z!aqk2TJl+63r%jpK&Cj01isMwX}$S zlwNkYOW*aOm(kpAN}*KRF3Vx}k2Em4&+lLVb9McT3G*0hjP7mILq(D_8cxsn{`2*5 zx9nHnKZ!TG~fgIp; z8yZb%!IUkm0&RLlX0_D1N9QX67CgEZb!3qch4M){(k&&}sM;HtJ`?mtWa34uDIE;m89*XgO(JiG*R014C2Kmo?gv4ZJAV)_8`=%oH63zI!sg?Ov+6&HcK4zZFazEF)j|DC8FcHwdT#n_oQwd^NLDPC|+*)iE z&ZC%!yQe)pVr+ac4Q76cA6mWUMLk997BH64)K$p%@Q3x&X_ZTlRvuR@pFTpI0mS|B zBW{S#!i&-Jk7)J2>x&<6-9{ADNKLY@E`}C{)TBM|4b*7zn%j98$+o>f^x+YzYj~c4 ztI)&~to`>YPxq+CD5C9NTr!z<8ukm0W}KPTJgtyrD?uG^E$n4lR80SXBCk+LHwbJk zLCmdZs#KL~zmH^V*b` zralDP`RWLp8D;VW>By^#KqA)J)A3|$E|^k~Dd&l;5kTvo8Ri}3i)U%j@;NfB;9Hx0 zD~_uhkHwA9Lcm3Wmg@p%W32Cm+0W)S!~46sJrvdxt_|2Aa#IWD?$fPr_?qc?a=zw7 zxSemWu<+D8)WP35s?Vf+l@$*mOWU&!2(7;bulGP40wH@vauWOm#s0w};%Cr*y}F8P z+v>JS?34_rVwxAdHJ+?3*mBp&y^`_rab=}YstIQ?Fi+a`Ej0BBChAQzocM;FOy7ct zjAI2ieI)GvQoW6iArK!LL)l0+{z|cM=7u4F#_*iSZi6I02(c4y&cC7DXoOJlR9`pC z9gg%Z6%{n)aVlK z7Ada*8?7EgHQ4sEs+F{L4!q1Ss+g7Hc%*P8>JqtZx%Y7x0JR4VOginwX45hZ9RZ20 zVVhKh3c$=BCg&;X3GqGBWa?+0eSTKw$2-A z>ag;({YZx8`4mkN=SAUljOKy3O1RtKt5^<1cCJ4QO#u|h4}#Pd240!ZTlN~3V>5hmo}B$Md;-16c!%;T zS%7s2Ls_p3A6W6+1a0fcopxdSQzEpK;swM-9hd>0kwpI1b8rQxtOsp&$v0|vZM7X# zuOj?trglMM3iqG7-EOsUhpAr?)x3hZ^n;t7_WYE-=H|Tq+@{}3fg_1!n=ORAme2{q zw%Qcz3q`oFC7Y|m$^xzku#{55GA2rv4wh8$cx3DcZ6tTvt?W$=!TuEEnz`5S+`rZoczz3$;BAnNmwH z#zzcXq`Jb7cDTquAG*(|t2yFke_-3)x=4hrNo#i(Y5&Nt&2C$LT~g~km%z;O$;y%60y)Rqd7=Jw@ z)?L#-h$`eavx0Y^@MEqsZSH0!mZ92@0^F_aOTI3bBUd!LB%S#^%|{^a@$Fr=1Kaw32%QhyO({M z{iF*~{k2s$`4-q3DA&b7Yus=u^n!a^)VHXu_2g z;=muJAn_TK@Q}=If0AjUHJvh=(|6{LlshSe-v}L8hWb}A4E}U<^vb{gS9zu@&H@+72+_!lBgEC1HmoB18l9&j5y!tA<+`<%|% z_FNB6L00$KLD>Lhu>??_#;u~#eDaX5k`myp{1*ZZo^(s@{pn8S5dUW7k-CZu-=~*K zR)Hu~5|5U8yBjL*jB1Dz3wy~-LVioS-*g||0xr8*Sv*8iI;0TvcI9`^q#UlbzAy`l8b4_Z#7P=i>@t!cpSKwLzKo9bI-CXK3Ky9|#BKW( ziciAloWH`tJf3&5$ud|J$8M?dDhZ49E-ui3;$Z90l&=`$mE{zYAQGchwfQZ9`8NN` zho6&qcpCHv_PJ^~u`pP7`o~bYC7~ZqvZpIKIMnWQOS{NFU*e2_jaQ`A>(63!%v;8j&AluHqYsW!mUF$2$wR%~93NO9p-7nPppc>w# z*dGm`he-rcJY@mi<9yxrAH-44cBG9WH2+j;qO|O4tb}nKnn?#)QV$1cr9VSqlcAf! zX8i&Gal2R+N7L&_C!6|AEax+WHz<&SIuEOrx!IcXv#kBg8W07c$WFmnnlACvFOYRE z8Kd_nlmo^Rx0^t(M%yVsjF&g1**VNHPpe?$ZUqyEDAp(-a$BDi!{9^1xb@jc1~*Hq z?MFYllkmDYZq5C|BD{R1NFk-yWvrH-w{GZZNkIo8E9ntZ$xuAs=V&!cugH~aG>U=s zgno2<;Xu3hRhuxen-L*4Ucnjh$Z{c>*BDYP^Q&RP>}|!HS;7lyaG(Z8$DbV!HQQXt zW0nu}KDw)G--$bnazc_q1u3g(SIn;h-rdj!BY(zk8T2iKEn);QQEt7O(j2hLl8Rht z;{^mFXcoNH_Fuz2ga#o>*$RHWu-%yvyDbcRlg8L@;7x*3G)XC~G1_r|Wx_(I!W}%V zdIP&S5SU9q+OU;!gxAGJKM{@!C|uueZb18IK64=A;en>WqoW#O4&@%qpOhQph5 zliamAsCBsBU^ZY8eCAz`xP(J|BC+9!8B(%UDL1YF5%3)IUu%5&3f*+|`|fMLd!27E zok=1kjheNi{;$ulyh)~uJQ8Xu4=Z^5*W5+X)?9n?C`x3XgWpal^Ow%faa;y90f3Qa zelR|GVH>$(b4RLm^a2qtf1}My>NDFQ)NZTIa|wH(9V2cS3|)-cx-A0onwl1}CG#+A z_34o~Cs2DYtdYFbQZc{YD@Tv-)mxUoXh1c0&k;DSjk$K8I-I`@uv|Ll-m@0cO95Gwfzx1AO5* z(3tU{{CLnmS9jNe^J$I8{n{0AR=$ChO$QQ4c#JO`^!x{fE4zX^OJ(ROvH6FCv(G~* zCVV@^;@eY63SgxIVflt?VKi+L)l|^CD{mxA&7dCMk$~U4?+jF8+4sK^hKxWuu|~`T znf$BukKsI>=GH{WX0t<$S3%ff&L1%R>GzeUl*oAdG3>ZCj&kD9;0}$%j^6x7l9%+K zKSx})X`z`H2NG4#cs4x)k_JBvjzlNDZxDWB#yYm0>(|@Py5n=o9@Ty}1k&-APF=4x z2Rodc$q@$5khR!&A*{uL^jAEc@D#Bn{EswLvlRiv?a>`s$j_45zkJ*bJ_dhG7eB6@ z#$q9jFBhLs*uDKyV00WQX)aA&yjs*ZS}U)cs3tgFuXfsiKWt#lAW`LfN-cR~i_XB! z@3N4xY?O@f9rmdPq(-vaUm-J&_(6oOYN59Ipff?$ROZ;yo@BI^pVM@NuCysWNcOY= zUv7OQN^ULpye%VZKy4LSI-?_4W{sf&YQGy=Xg0p-1HMXyM(ayk#^@$av^1pt7oH`J zg>`~=^VN1ObV+D0`!Y!CxQBmj7Z$uKC`NO5z~IU12dQlER9_tnY@CwcA2O%`)`0f2 z(MVy=)_METLdxxDXu~GEHFybut2%OgOVXwaVG) z`y`x`PR$!Eh@CSI-m{Vm{h?4NQ?C|<--(Uc-wQ}8_h-5X5Nhf>3r*o5jmG2o((?sM zYiG45k3{}F!EVlTGfcA*CZ&heXyftOLoO5!6C0Jrf9o>bK`xI=_8_*hSuTOd2k{Z~ zXtulAVRuAx+ln`kd z?RutEe`I3(=f;fvxpq8)r+$6yW6yTAfux!qBpx;XqGtO-DO-y?=JC^n>WX22zio%L zd<&pH>(MUiV;ONJy75z}6yW1qCT|Z8AtYk#`Y=$dqpdV;BtlrMhF;cxWyq=gj&M zyC0BJlpnD3{i8pMV2s&?XyK*pgU0xCC|_8RfO7+JYb>jbKw=i9Vt5Q^05a;WTC4OC z1mVynReCl(MMsMcA>r0bpT=pz8L(t(>MZ!f z^EJj;%Ttf9FbmEN#PRj4Z&}iWbWG5ruZuXrHo?fwNvVCD^p!Z!6_U{Eep_B_5q(ak zGuTSjh|kGMO3vz~ud*m`vbn;fmL(EEBGKaG{-NQfqQC0;sOgX7PD1&fs1({($vPZC z7`~FnjX+YNX{QL(MR^+G%CqpcxP`tTn-X-()(RRKQCu$$QgA7v1yP&b19q>P@o~{X zhWf!l{bAp#>kG$KW@!RvM2o8X!>Vcd~2_eI$S7ZF$98rFj#}vI zncj=A9oOsg$k#CYuH1hrMK^rdnHdkOu5OETu35|PSWda# z!n$Mjr<`r{Gr&iZ@w`j!;4SCgvIGZB?Q}+z@Q3EH2(IquJ_2R4ZA+LNeh0FM+zDX2 zH27E@C->-PRne7n|DBEfsf^-)-J!S4Qw?WN_lyg$3o? zEVhe2FE*AF<01tZ;AH_5b@p>hNbrgvGF&yl9}sW zv(sJBZWLIfNRJsPr>7W-IEXrh!GQ&^T~J4SQ$==woF%1_#x;kBX^zS3PTkVn7$`0R}N1W7;e!r8fg zn=^9?z8rV$Eci>^14>`BE2_4Lz1-tTK*9`@=PA=#R0CPm_`i%No2t^JX^M@bEcfPE z#-zSr|w^|bA=4UDJJ`HkQqjp?+T5{%fv924>;Wj{dGF|+JtM>@yM<} z-bu-qv1DsmDjrgJw8#Zhf;SyPeRXbs75WhnqMwU2-hAW8%n{VegNzd5ed3Q-D&$3; z_$6Llp`Jy^J;VoBs68zvakM{C5}5SMWPjnY+1YxsO;#cRKv}1|KG#^5T*KQk;vo;R zH%+&K8`-;ZFLpIeeM~{Y4ov8TCuXu6e-zsQ9g+ff7MNuC(I|H;VQCtKsQ}3>@6?!$ z)(g#e0p%>W7m;N^!(S#LFlRZ(#;>|0o+Ta5h+?-t6<3-4_ya>f12k*P3q>zCc?;2D zcZwt*lV{i8B?4n^B;SbUO7fIkQ_9uqTH{lyqY=AAp(^RhC*x8ooe<2cxJ2psB7DL>#fT?up>`0Y zEFL+SdIp*e%A84UJwK3VZxoB<58V2dBfWqB`}nCUwPqc`#vO5@@SUJ0b+T<05ab;| z)vwWQ^$WMbrfUWezMzexd`SD<+4v49!BRkKM=23pEg_H4Xx?<^PkIyNDH6Qax@)`| z-WE;U(>3`Nu)0G6&9@EjHOD{wv+3=Q{NG~Bcc5MfT_R7l}$xnpZ| z->dB}zbc+BV;NMyNhF)jT!dXK6R`)|==YkzkDDp=;P^YPH^P%1M?%(;-R_zvUC!pe z2X`EnsdJw$2>5Y1Arx_lz3bCn{rs%&DBa6oYaVyOzX zt+6`Nb6p*)O&-rMxoKFdkuX(R+xih2^e4xLZQ6=1( z2%qjE&Y@tHWJxDhDQfC&S6pN5jg&L2S2xzdg30T6z0Xyg`E>gm4kY37SarB4F4;p7?WAnZ&S`GD=9Mw31J>~ZJ4^P$Les5LV}7t@y~H~? zV~w#s9U}sbPs2zK(x9|yQ~WDn zbIi=MD_RO7Be_C>)j2ztsBXsF zb3+5p4M*~W;EO|D)m_W;*7>vEY!?r2x^^+FpkAe-RK$BXb@KqY?=?_7k$Tw7^hD&W zkWBZ+)`wuw!kHOQGOyA(C>5jP+MSL|ZvW;bD|pc@PcdSHJ*~MEFX%XuI?Fz9;#{<~ z6+Y|8pVhN9B~t?t!kaKB8pmd@u<)&91a_9%sgv|LFW8wY+%Gqw0W0wcAIG8RA6TE+Iu+L;Aa4^R+99!w=fs=#U$yo5>neOj#Kz}_=J2=6@FeJ97=|A#!QR16UHl_>|Bm$O&>*X z4C{zy4-+Uj8DbZz$LLt%0p32zKz{RxA-|aBxcEB*k_V?E(lkOdA$j`7QhLq$qdZn` zec)hO8D2#T+D3Xwl`a|8bg}jt=rsVw-daioE$Ml~gO7u_SH272IL4;~^q<%+L=@?D2maJ0yO&3ZL3f1eI9Tln5UlP7FdR(Mh zb+^XwMA8+V#&%cS@Mvzno+|%Io^=_TqGHiOzB-R_gm1Mqnxv|c>VVyVkBmB?;MG+G zb765C-D4F6qbr^b*58HcPq*08t(HaWi?e%CsyV$yI^YzTwJF8#bQCRW1t1`BT`t_IIXjYw3(e@XMcI#l|x+Z_%&x!g4K38Z?R8LUlXJDye_ZZ`Y? z2AV{m0%}UeJ+SOn5}n>GoP#0 zRvSZX(FQ~P&0VCxoDYjjfb4!(?Y3NPr6|4lUO2~fy6|(Fvzb^NJCvBSI4Tk1q%$)% zKK40hSa#WGUk14GHqQCn9%A4L9lnFpQRbF}dhVxsYO~pj7%&0TngM0;Kfmeqw_r?>TB#|a6ksLGf^3yCg#sg~KP7Gyq!`wK> z?)l>vF)ursJ(R6E-xSU*ud>*n#d~t)jfdOBaVI_?S(L}JZOMXJ zXys6EfW=(pQC`(t77eu9zDgx>r%2K?Q!BjvDwQ+;f?6s3xa%h22ffr2LZ2}}Ow0WO+Xa?QuZgf$Fe)Au zC}g~hc&Dzhs`|wt#`i7O;WVHdhJkH989c^^Y@P2nWmj{o+l<3sJ5irWvDMv!)hRH0 zgK{+Bnn8xYq{3eQBW@M^oA48h@L7EvUHbWzI~h2shKdc~*JHMj9kGb7nNs%+y_4K3 z7i%v&g7uZMCH2LyV5v8_MbsHdK3xat^s=d5Ka}T6vmvK0FIk<|T_ZbYVfMuxua7cB zeofi-y~zigL3x~t88+(!IVbB&^93rW#QFn~VRuv2%B$Ot?-GK*@_=j#g0LaL*LH|e zl%G&(#NrZ6IPJX-0wio|uqe8012kKY=4D;RRzGk-&S<27X9}q3S~h3ub$ieb17ETc zykJ05hF(rNWB*-DA4*pd$5ITw^FOuGizBkaj9Vs3s*Xyd_QG@!52~ZXXi>zZ>pKE8 z5r?F_urMw;1!@pe|M*e3K4Ls2DdwIaU!n3UW=p!#5FEjv#S{I$*aX>337{%INoLOI zKCYvUDU$=D%y&S{cSC#+c55eZiB+rY_pjg|a>R!-diXv0h@>J$_Zc+ng)Wbq9OaYT zk*O7PrNCU>U^$-nom_hA1A`P@$j-ZL>u8P}=WsQU`ETyXkVy!cC`E7|uloYA`;v-y98_gH>|Pvn06!H@hDR zl)D4N>Li@9Q`GltRvyoc$|Q0JJbGgFL9B0fJ3YY%zg!W?Z4MCQx(r~3lXX8ENHvvd z?^MKT{XGdQ$g$~241WWo?~W#8nXM;Ysf*^cde)D8qrI*06+)ZcT_x*nd_z21Fw5_K z|5Ck<;6sn1n`Dw?3_phj&$P-Pz0-4=?2lzv<#uu66$>b!q03qA+^28*hsk9{Yiqaw zgW&_562$WU&J|Q*0(WEuyD^+v1|6>t?Uf4)BsuqguW9{NSRH@;+ZgnFSmS!_?Kd*x z#A0|7t<7qB!GA_TBVQVDhz7gktxDqlM_u)0V+fw13=Nmz)BN4+4>BWSQ1VAE%G;dCbUXF;AGwDYQuHyY=!xq|F_LD((;eE4c)TMPTF%1beY@;e%O%)$3KPABvPE}N;UwDrSf z%5uF)m;SU89FY>HQroUlB8`M8i;=lH*oHf)Tcg!fiH&cyV!tEvcL2bo0*97N(OkmB zG^RFaK>o!~P)J>Y^W)98RN>(McJ?f%@7oruUE($N`?iFg3y)EC56bbAN@y`i*}Rt8 ze@KIV!Q%BoDD^QwScZ^YYlb5m?$T3*5hk>h>*IS6t#4XoSfw&-jtrKm06y~|(O>?E z3(Wk<=c=r!=FsU8U^_nk^^d8J6Dvd-%5n%>s~2|t27o4Qb)SkSd@+;H zZPkEpW5oMmjZA;Ht50=2!liP>0rU349ek27s)4#5!)w!aBn)*dB2yzZ*+x4;o{@(c?fH>5p2uGcT(HYGz{Q1+qYUwnsbF~n)L^zeDL3cCx1IEl?UAFp&J zUx2MHpUS-tKbu#HpJ?cu#cL(^1k%=?)u3=K~oaQbz@caCDp;ljBCvMMfyFZu9Ab)UifsyKxGe}qU z#CKbqyxLfNbABBn)jEEnbh0+Mi1?iBz2n)}wzAIU|Kr;_q(ux0Fh(4{LoqM?6l@yF2L)`qQWuL=DGOp4z6`*e{h1|=-8U81`N93l(zP`nj z-&^9HSl-EZv$8raJSk-P`|$qv8JXZ;`9jAokAopdf@ZZod3s04Tuxe`c3WV@Vd{9b zV^mBhB={bT*)|WCMIQ#a#uHT(31^%R*hsjJq%Y}}0_vV47CrKru<0YjTLI*S5cX>k z#=@#bry$PY;YaO!ttR7p#Nd>=AXyCv)NK0#=cWoK43tiWc$neQ%_WOU!y+Z{mjaqe z^2#*cYMdY*`^T=jyTpnQsJ$jP= zC<{Hful0x8wEP#~EQ!uiw>_QS9xnn$FGyc)U@GH56AVLjpjm5u_qPunca7bqB$zNV z7H)Vj9ba^%#WPI2$2sgP;BD!27@Tl^FISm$!d>^_j* zO(wh2c{3>#<@+55tjgfX{S9VvSD;5}o&MoLKX!-dNm6u*6-K^Z4kV06FKt(}QLrX!&iAa9z7y8E)p;Dw@Z{@u1P_koDPEw&Amz2U|48a(kI`O!MmVJdFFPg|xlLw%?XDp|zjDW1B)E=dg6_#kv`o@)v(voH z!-g`C5bS*SOE90Kv-b2SuiKT6A;>G$7$%cw*1&SYYT4KUp5rR5xv!klhn+*7wp<6y zw-_2C)bP!|JRTi;P-)x0?(>T#+#nYh* zzG<8cXB;Mz>hyOY;J`S`f5E7-oS-4NwP$tTk+%Jm46syKY(Gu%} zqmA3~tZc4K9j5OzNco$p*QFs8lq~k%4(}>6Y#+UR&ZmL!05Q7|Yvj$}7sqdnne-00 zoo(8RC2$9d-6Pf)>CRsL$_*H{{4Ej77?>&)m^=K8KHAD^nPGe4z`uCCU!$2MjH>3r z^ddN~SQa%5y1yj`n}0QGcRb?xUmmX4CD0%3{@I;2jXqs1$ZLrTc-^lr7LnzJ@@)7( zTUS}*U_8&jeHoYDlH|+c`!=d%p?Axu{Vrz7TS^p>uY_NvEb-ve)Il6_rHhj ddBoIvM3uv*GQy#$9dO{!M`0` and {ref}`KubeRay Clusters `. +If you need to inspect logs after the clusters stop or terminate, you need to store and persist the logs. See the instructions for how to process and export logs for {ref}`Log persistence ` and {ref}`KubeRay Clusters `. (logging-directory-structure)= ## Log files in logging directory @@ -131,12 +131,12 @@ ray.get([task.remote() for _ in range(100)]) The output is as follows: ```bash -2023-03-27 15:08:34,195 INFO worker.py:1603 -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 +2023-03-27 15:08:34,195 INFO worker.py:1603 -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 (task pid=534172) Hello there, I am a task 0.20583517821231412 (task pid=534174) Hello there, I am a task 0.17536720316370757 [repeated 99x across cluster] (Ray deduplicates logs by default. Set RAY_DEDUP_LOGS=0 to disable log deduplication) ``` -This feature is useful when importing libraries such as `tensorflow` or `numpy`, which may emit many verbose warning messages when you import them. +This feature is useful when importing libraries such as `tensorflow` or `numpy`, which may emit many verbose warning messages when you import them. Configure the following environment variables on the driver process **before importing Ray** to customize log deduplication: @@ -247,8 +247,8 @@ ray_tune_logger.addHandler(logging.FileHandler("extra_ray_tune_log.log")) Implement structured logging to enable downstream users and applications to consume the logs efficiently. ### Application logs -A Ray applications include both driver and worker processes. For Python applications, use Python loggers to format and structure your logs. -As a result, Python loggers need to be set up for both driver and worker processes. +A Ray app includes both driver and worker processes. For Python apps, use Python loggers to format and structure your logs. +As a result, you need to set up Python loggers for both driver and worker processes. ::::{tab-set} @@ -472,4 +472,4 @@ The max size of a log file, including its backup, is `RAY_ROTATION_MAX_BYTES * R ## Log persistence -To process and export logs to external stroage or management systems, view {ref}`log persistence on Kubernetes ` and {ref}`log persistence on VMs ` for more details. +To process and export logs to external stroage or management systems, view {ref}`log persistence on Kubernetes ` see {ref}`log persistence on VMs ` for more details. diff --git a/doc/source/serve/production-guide/kubernetes.md b/doc/source/serve/production-guide/kubernetes.md index eab728781a77..5a4914699772 100644 --- a/doc/source/serve/production-guide/kubernetes.md +++ b/doc/source/serve/production-guide/kubernetes.md @@ -238,7 +238,7 @@ Monitor your Serve application using the Ray Dashboard. - Learn more about how to configure and manage Dashboard [here](observability-configure-manage-dashboard). - Learn about the Ray Serve Dashboard [here](serve-monitoring). - Learn how to set up [Prometheus](prometheus-setup) and [Grafana](grafana) for Dashboard. -- Learn about the [Ray Serve logs](serve-logging) and how to [persistent logs](kuberay-logging) on Kubernetes. +- Learn about the [Ray Serve logs](serve-logging) and how to [persistent logs](persist-kuberay-custom-resource-logs) on Kubernetes. :::{note} - To troubleshoot application deployment failures in Serve, you can check the KubeRay operator logs by running `kubectl logs -f ` (e.g., `kubectl logs -f kuberay-operator-7447d85d58-lv7pf`). The KubeRay operator logs contain information about the Serve application deployment event and Serve application health checks. From 415ee19d2ae1006bb11fbdd13ce831ad7547728d Mon Sep 17 00:00:00 2001 From: simonsays1980 Date: Mon, 25 Nov 2024 16:31:08 +0100 Subject: [PATCH 081/107] [RLlib; Offline RL] Allow incomplete `SampleBatch` data and fully compressed observations. (#48699) --- rllib/offline/offline_prelearner.py | 124 +++++++++++++++-------- rllib/offline/tests/test_offline_data.py | 2 +- 2 files changed, 81 insertions(+), 45 deletions(-) diff --git a/rllib/offline/offline_prelearner.py b/rllib/offline/offline_prelearner.py index b000f2c965fc..f5ffca03e75a 100644 --- a/rllib/offline/offline_prelearner.py +++ b/rllib/offline/offline_prelearner.py @@ -1,10 +1,10 @@ import gymnasium as gym import logging import numpy as np -import random +import uuid + from typing import Any, Dict, List, Optional, Union, Set, Tuple, TYPE_CHECKING -import ray from ray.actor import ActorHandle from ray.rllib.core.columns import Columns from ray.rllib.core.learner import Learner @@ -86,8 +86,8 @@ def __init__( self, config: "AlgorithmConfig", learner: Union[Learner, list[ActorHandle]], + locality_hints: Optional[List[str]] = None, spaces: Optional[Tuple[gym.Space, gym.Space]] = None, - locality_hints: Optional[list] = None, module_spec: Optional[MultiRLModuleSpec] = None, module_state: Optional[Dict[ModuleID, Any]] = None, ): @@ -103,24 +103,6 @@ def __init__( self._module = self._learner._module # Otherwise we have remote `Learner`s. else: - # TODO (simon): Check with the data team how to get at - # initialization the data block location. - node_id = ray.get_runtime_context().get_node_id() - # Shuffle indices such that not each data block syncs weights - # with the same learner in case there are multiple learners - # on the same node like the `PreLearner`. - indices = list(range(len(locality_hints))) - random.shuffle(indices) - locality_hints = [locality_hints[i] for i in indices] - learner = [learner[i] for i in indices] - # Choose a learner from the same node. - for i, hint in enumerate(locality_hints): - if hint == node_id: - self._learner = learner[i] - # If no learner has been chosen, there is none on the same node. - if not self._learner: - # Then choose a learner randomly. - self._learner = learner[random.randint(0, len(learner) - 1)] self.learner_is_remote = True # Build the module from spec. Note, this will be a MultiRLModule. self._module = module_spec.build() @@ -525,21 +507,83 @@ def _map_sample_batch_to_episode( # TODO (simon): Add support for multi-agent episodes. NotImplementedError else: - # Unpack observations, if needed. - obs = ( - unpack_if_needed(obs.tolist()) - if schema[Columns.OBS] in input_compress_columns - else obs.tolist() - ) - # Append the last `new_obs` to get the correct length of observations. - obs.append( - unpack_if_needed(batch[schema[Columns.NEXT_OBS]][i][-1]) - if schema[Columns.OBS] in input_compress_columns - else batch[schema[Columns.NEXT_OBS]][i][-1] - ) + # Unpack observations, if needed. Note, observations could + # be either compressed by their entirety (the complete batch + # column) or individually (each column entry). + if isinstance(obs, str): + # Decompress the observations if we have a string, i.e. + # observations are compressed in their entirety. + obs = unpack_if_needed(obs) + # Convert to a list of arrays. This is needed as input by + # the `SingleAgentEpisode`. + obs = [obs[i, ...] for i in range(obs.shape[0])] + # Otherwise observations are only compressed inside of the + # batch column (if at all). + elif isinstance(obs, np.ndarray): + # Unpack observations, if they are compressed otherwise we + # simply convert to a list, which is needed by the + # `SingleAgentEpisode`. + obs = ( + unpack_if_needed(obs.tolist()) + if schema[Columns.OBS] in input_compress_columns + else obs.tolist() + ) + else: + raise TypeError( + f"Unknown observation type: {type(obs)}. When mapping " + "from old recorded `SampleBatches` batched " + "observations should be either of type `np.array` " + "or - if the column is compressed - of `str` type." + ) + + if schema[Columns.NEXT_OBS] in batch: + # Append the last `new_obs` to get the correct length of + # observations. + obs.append( + unpack_if_needed(batch[schema[Columns.NEXT_OBS]][i][-1]) + if schema[Columns.OBS] in input_compress_columns + else batch[schema[Columns.NEXT_OBS]][i][-1] + ) + else: + # Otherwise we duplicate the last observation. + obs.append(obs[-1]) + + # Check, if we have `done`, `truncated`, or `terminated`s in + # the batch. + if ( + schema[Columns.TRUNCATEDS] in batch + and schema[Columns.TERMINATEDS] in batch + ): + truncated = batch[schema[Columns.TRUNCATEDS]][i][-1] + terminated = batch[schema[Columns.TERMINATEDS]][i][-1] + elif ( + schema[Columns.TRUNCATEDS] in batch + and schema[Columns.TERMINATEDS] not in batch + ): + truncated = batch[schema[Columns.TRUNCATEDS]][i][-1] + terminated = False + elif ( + schema[Columns.TRUNCATEDS] not in batch + and schema[Columns.TERMINATEDS] in batch + ): + terminated = batch[schema[Columns.TERMINATEDS]][i][-1] + truncated = False + elif "done" in batch: + terminated = batch["done"][i][-1] + truncated = False + # Otherwise, if no `terminated`, nor `truncated` nor `done` + # is given, we consider the episode as terminated. + else: + terminated = True + truncated = False + # Create a `SingleAgentEpisode`. episode = SingleAgentEpisode( - id_=str(batch[schema[Columns.EPS_ID]][i][0]), + # If the recorded episode has an ID we use this ID, + # otherwise we generate a new one. + id_=str(batch[schema[Columns.EPS_ID]][i][0]) + if schema[Columns.EPS_ID] in batch + else uuid.uuid4().hex, agent_id=agent_id, observations=obs, infos=( @@ -554,16 +598,8 @@ def _map_sample_batch_to_episode( else batch[schema[Columns.ACTIONS]][i] ), rewards=batch[schema[Columns.REWARDS]][i], - terminated=( - any(batch[schema[Columns.TERMINATEDS]][i]) - if schema[Columns.TERMINATEDS] in batch - else any(batch["dones"][i]) - ), - truncated=( - any(batch[schema[Columns.TRUNCATEDS]][i]) - if schema[Columns.TRUNCATEDS] in batch - else False - ), + terminated=terminated, + truncated=truncated, # TODO (simon): Results in zero-length episodes in connector. # t_started=batch[Columns.T if Columns.T in batch else # "unroll_id"][i][0], diff --git a/rllib/offline/tests/test_offline_data.py b/rllib/offline/tests/test_offline_data.py index fad307cc7745..038e9cef383f 100644 --- a/rllib/offline/tests/test_offline_data.py +++ b/rllib/offline/tests/test_offline_data.py @@ -124,7 +124,7 @@ def test_sample_multiple_learners(self): num_samples=10, return_iterator=2, num_shards=2 ) self.assertIsInstance(batch, list) - # Ensure we have indeed two such `SStreamSplitDataIterator` instances. + # Ensure we have indeed two such `StreamSplitDataIterator` instances. self.assertEqual(len(batch), 2) from ray.data._internal.iterator.stream_split_iterator import ( StreamSplitDataIterator, From 93da6b7190ce80d344d45360da03c7d1f15a3cd2 Mon Sep 17 00:00:00 2001 From: Kristian Hartikainen Date: Mon, 25 Nov 2024 11:34:36 -0500 Subject: [PATCH 082/107] Move `shutdown` to upper level to avoid issue with `tmux` (#48786) Fixes #48109 ## Why are these changes needed? See #48109. ## Related issue number Fixes #48109. ## Checks - [x] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [x] I've run `scripts/format.sh` to lint the changes in this PR. - [x] I've included any doc changes needed for https://docs.ray.io/en/master/. - [x] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :( ## Notes I've tested this PR manually and verified that it resolved the issue I reported in #48109. I spent quite a bit of time trying to get the cluster tests to run but couldn't figure out how to do that. ```sh $ ray exec \ --verbose \ --start \ --stop \ --tmux \ --no-config-cache \ ./cluster-config.yml \ 'echo "start" && sleep 10 && echo "done"' [...] Running `tmux new -d bash -c 'echo "start" && sleep 10 && echo "done"; ray stop; ray teardown ~/ray_bootstrap_config.yaml --yes --workers-only; sudo shutdown -h now; exec bash'` bash: warning: setlocale: LC_ALL: cannot change locale (en_US.UTF-8) bash: warning: setlocale: LC_ALL: cannot change locale (en_US.UTF-8) Shared connection to 35.196.24.227 closed. Run `ray attach ./cluster-config.yml --tmux` to check command status. ``` Signed-off-by: Kristian Hartikainen --- python/ray/autoscaler/_private/commands.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python/ray/autoscaler/_private/commands.py b/python/ray/autoscaler/_private/commands.py index 3c03738854f7..9a9b9d91cc2f 100644 --- a/python/ray/autoscaler/_private/commands.py +++ b/python/ray/autoscaler/_private/commands.py @@ -1153,16 +1153,15 @@ def exec_cluster( }, docker_config=config.get("docker"), ) - shutdown_after_run = False if cmd and stop: cmd = "; ".join( [ cmd, "ray stop", "ray teardown ~/ray_bootstrap_config.yaml --yes --workers-only", + "sudo shutdown -h now", ] ) - shutdown_after_run = True result = _exec( updater, @@ -1172,7 +1171,7 @@ def exec_cluster( port_forward=port_forward, with_output=with_output, run_env=run_env, - shutdown_after_run=shutdown_after_run, + shutdown_after_run=False, extra_screen_args=extra_screen_args, ) if tmux or screen: From 2ae9aa7e3b198ca3dbe5d65f8077e38d537dbe11 Mon Sep 17 00:00:00 2001 From: dentiny Date: Mon, 25 Nov 2024 10:00:20 -0800 Subject: [PATCH 083/107] [core] Minor update on runtime env manager (#48916) Signed-off-by: hjiang --- src/ray/common/runtime_env_manager.cc | 24 ++++++++++++++---------- src/ray/common/runtime_env_manager.h | 8 +++++--- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/ray/common/runtime_env_manager.cc b/src/ray/common/runtime_env_manager.cc index d765dbc4f1de..08187cf29932 100644 --- a/src/ray/common/runtime_env_manager.cc +++ b/src/ray/common/runtime_env_manager.cc @@ -31,12 +31,13 @@ void RuntimeEnvManager::AddURIReference(const std::string &hex_id, if (!uris.working_dir_uri().empty()) { const auto &uri = uris.working_dir_uri(); uri_reference_[uri]++; - id_to_uris_[hex_id].push_back(uri); + id_to_uris_[hex_id].emplace_back(uri); RAY_LOG(DEBUG) << "[working_dir] Added URI Reference " << uri << " for id " << hex_id; } + for (const auto &uri : uris.py_modules_uris()) { uri_reference_[uri]++; - id_to_uris_[hex_id].push_back(uri); + id_to_uris_[hex_id].emplace_back(uri); RAY_LOG(DEBUG) << "[py_modules] Added URI Reference " << uri << " for id " << hex_id; } PrintDebugString(); @@ -51,21 +52,24 @@ const std::vector &RuntimeEnvManager::GetReferences( void RuntimeEnvManager::RemoveURIReference(const std::string &hex_id) { RAY_LOG(DEBUG) << "Subtracting 1 from URI Reference for id " << hex_id; - if (!id_to_uris_.count(hex_id)) { + auto iter = id_to_uris_.find(hex_id); + if (iter == id_to_uris_.end()) { return; } - for (const auto &uri : id_to_uris_[hex_id]) { - --uri_reference_[uri]; - auto ref_count = uri_reference_[uri]; - RAY_CHECK(ref_count >= 0); - if (ref_count == 0) { - uri_reference_.erase(uri); + for (const auto &uri : iter->second) { + auto uri_ref_iter = uri_reference_.find(uri); + RAY_CHECK(uri_ref_iter != uri_reference_.end()); + --uri_ref_iter->second; + const auto new_ref_count = uri_ref_iter->second; + RAY_CHECK_GE(new_ref_count, 0); + if (new_ref_count == 0) { + uri_reference_.erase(uri_ref_iter); RAY_LOG(DEBUG) << "Deleting URI Reference " << uri; deleter_(uri, [](bool success) {}); } } - id_to_uris_.erase(hex_id); + id_to_uris_.erase(iter); PrintDebugString(); } diff --git a/src/ray/common/runtime_env_manager.h b/src/ray/common/runtime_env_manager.h index a6b282863307..ab58409d8d91 100644 --- a/src/ray/common/runtime_env_manager.h +++ b/src/ray/common/runtime_env_manager.h @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. #pragma once + #include +#include #include "absl/container/flat_hash_map.h" #include "ray/common/id.h" @@ -32,9 +34,9 @@ class RuntimeEnvManager { public: using DeleteFunc = std::function)>; - explicit RuntimeEnvManager(DeleteFunc deleter) : deleter_(deleter) {} + explicit RuntimeEnvManager(DeleteFunc deleter) : deleter_(std::move(deleter)) {} - /// Increase the reference of URI by job or actor ID and runtime_env. + /// Increase the reference count of URI by job or actor ID and runtime_env. /// /// \param[in] hex_id The id of the runtime env. It can be an actor or job id. /// \param[in] runtime_env_info The runtime env used by the id. @@ -53,7 +55,7 @@ class RuntimeEnvManager { /// \return The URIs referenced by the id. const std::vector &GetReferences(const std::string &hex_id) const; - /// Decrease the reference of URI by job_id + /// Decrease the reference count of URI by job_id /// \param[in] hex_id The id of the runtime env. void RemoveURIReference(const std::string &hex_id); From 61b033d7c0cfba03a0f0a44d40c12930c1aec5b7 Mon Sep 17 00:00:00 2001 From: Kai-Hsun Chen Date: Mon, 25 Nov 2024 11:05:38 -0800 Subject: [PATCH 084/107] [core][compiled-graphs] Minimize the overhead of shared memory in NCCL benchmark (#48860) exec_ray_dag_gpu_nccl_static_shape_direct_return had 3079 executions/sec before this PR, and it increased to 5737 executions/sec after this PR. --- .../accelerated_dag_gpu_microbenchmark.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/release/microbenchmark/experimental/accelerated_dag_gpu_microbenchmark.py b/release/microbenchmark/experimental/accelerated_dag_gpu_microbenchmark.py index f440e72752fb..895d43bdcdab 100644 --- a/release/microbenchmark/experimental/accelerated_dag_gpu_microbenchmark.py +++ b/release/microbenchmark/experimental/accelerated_dag_gpu_microbenchmark.py @@ -58,13 +58,16 @@ class TorchTensorWorker: def __init__(self): self.device = torch_utils.get_devices()[0] - def send(self, shape, dtype, value: int): - t = torch.ones(shape, dtype=dtype, device=self.device) * value + def send(self, shape, dtype, _): + t = torch.ones(shape, dtype=dtype, device=self.device) * 1 return t def recv(self, tensor): + # This benchmark tests the overhead of sending a tensor between + # actors. To minimize the overhead of shared memory transfer, + # we return only a byte string. assert tensor.device == self.device - return (tensor[0].item(), tensor.shape, tensor.dtype) + return b"x" @ray.remote(num_gpus=1) @@ -139,17 +142,15 @@ def exec_ray_dag( dag = dag.experimental_compile() def _run(): - i = np.random.randint(100) - ref = dag.execute(i) + ref = dag.execute(b"x") result = ray.get(ref) - assert result == (i, SHAPE, DTYPE) + assert result == b"x" else: def _run(): - i = np.random.randint(100) - result = ray.get(dag.execute(i)) - assert result == (i, SHAPE, DTYPE) + result = ray.get(dag.execute(b"x")) + assert result == b"x" results = timeit(label, _run) From 2514aff0353f6c746c5bcb8c85d8f844120e4037 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Mon, 25 Nov 2024 13:35:30 -0600 Subject: [PATCH 085/107] [Data] Relax type check in `add_column` (#48918) Previously, you could add a column with a list like this: ``` ds.add_column("zeros", lambda batch: [0] * len(batch)) ``` However, after https://github.com/ray-project/ray/pull/48140, this behavior isn't supported. To avoid breaking tests and user code, this PR re-adds support for lists. --------- Signed-off-by: Balaji Veeramani --- python/ray/data/dataset.py | 20 +++++++++----------- python/ray/data/tests/test_map.py | 16 +++++++++------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index 779a5bd3295e..496e29a8dea4 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -5,6 +5,7 @@ import logging import time import warnings +from collections.abc import Sequence from typing import ( TYPE_CHECKING, Any, @@ -764,19 +765,20 @@ def add_column( f"got: {batch_format}" ) + def _raise_duplicate_column_error(col: str): + raise ValueError(f"Trying to add an existing column with name {col!r}") + def add_column(batch: DataBatch) -> DataBatch: column = fn(batch) if batch_format == "pandas": import pandas as pd - assert isinstance(column, pd.Series), ( + assert isinstance(column, (pd.Series, Sequence)), ( f"For pandas batch format, the function must return a pandas " - f"Series, got: {type(column)}" + f"Series or sequence, got: {type(column)}" ) if col in batch: - raise ValueError( - f"Trying to add an existing column with name" f" {col}" - ) + _raise_duplicate_column_error(col) batch.loc[:, col] = column return batch elif batch_format == "pyarrow": @@ -798,9 +800,7 @@ def add_column(batch: DataBatch) -> DataBatch: # Append the column to the table return batch.append_column(col, column) else: - raise ValueError( - f"Trying to add an existing column with name {col}" - ) + _raise_duplicate_column_error(col) else: # batch format is assumed to be numpy since we checked at the @@ -810,9 +810,7 @@ def add_column(batch: DataBatch) -> DataBatch: f"numpy.ndarray, got: {type(column)}" ) if col in batch: - raise ValueError( - f"Trying to add an existing column with name" f" {col}" - ) + _raise_duplicate_column_error(col) batch[col] = column return batch diff --git a/python/ray/data/tests/test_map.py b/python/ray/data/tests/test_map.py index d4e7e2c374de..41100f4b8a2c 100644 --- a/python/ray/data/tests/test_map.py +++ b/python/ray/data/tests/test_map.py @@ -353,7 +353,7 @@ def test_add_column(ray_start_regular_shared): # Adding a column that is already there should result in an error with pytest.raises( ray.exceptions.UserCodeException, - match="Trying to add an existing column with name id", + match="Trying to add an existing column with name 'id'", ): ds = ray.data.range(5).add_column( "id", lambda x: pc.add(x["id"], 1), batch_format="pyarrow" @@ -362,7 +362,7 @@ def test_add_column(ray_start_regular_shared): # Adding a column in the wrong format should result in an error with pytest.raises( - ray.exceptions.UserCodeException, match="For pyarrow batch " "format" + ray.exceptions.UserCodeException, match="For pyarrow batch format" ): ds = ray.data.range(5).add_column("id", lambda x: [1], batch_format="pyarrow") assert ds.take(2) == [{"id": 1}, {"id": 2}] @@ -381,7 +381,7 @@ def test_add_column(ray_start_regular_shared): # Adding a column that is already there should result in an error with pytest.raises( ray.exceptions.UserCodeException, - match="Trying to add an existing column with name id", + match="Trying to add an existing column with name 'id'", ): ds = ray.data.range(5).add_column( "id", lambda x: np.add(x["id"], 1), batch_format="numpy" @@ -390,7 +390,7 @@ def test_add_column(ray_start_regular_shared): # Adding a column in the wrong format should result in an error with pytest.raises( - ray.exceptions.UserCodeException, match="For numpy batch " "format" + ray.exceptions.UserCodeException, match="For numpy batch format" ): ds = ray.data.range(5).add_column("id", lambda x: [1], batch_format="numpy") assert ds.take(2) == [{"id": 1}, {"id": 2}] @@ -405,16 +405,18 @@ def test_add_column(ray_start_regular_shared): # Adding a column that is already there should result in an error with pytest.raises( ray.exceptions.UserCodeException, - match="Trying to add an existing column with name id", + match="Trying to add an existing column with name 'id'", ): ds = ray.data.range(5).add_column("id", lambda x: x["id"] + 1) assert ds.take(2) == [{"id": 1}, {"id": 2}] # Adding a column in the wrong format should result in an error with pytest.raises( - ray.exceptions.UserCodeException, match="For pandas batch " "format" + ray.exceptions.UserCodeException, match="For pandas batch format" ): - ds = ray.data.range(5).add_column("id", lambda x: [1], batch_format="pandas") + ds = ray.data.range(5).add_column( + "id", lambda x: np.array([1]), batch_format="pandas" + ) assert ds.take(2) == [{"id": 1}, {"id": 2}] with pytest.raises(ValueError): From 8dbc1230a0cc787fe7b47d7e5e8086a8a3db9a6f Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Mon, 25 Nov 2024 11:52:22 -0800 Subject: [PATCH 086/107] [Core] Only publish WORKER_OBJECT_EVICTION when the object is out of scope or manually freed (#47990) Signed-off-by: Jiajun Yao --- src/mock/ray/core_worker/reference_count.h | 2 +- src/ray/core_worker/actor_manager.cc | 2 +- src/ray/core_worker/core_worker.cc | 2 +- src/ray/core_worker/reference_count.cc | 37 +++++++++---------- src/ray/core_worker/reference_count.h | 19 ++++++---- .../core_worker/test/actor_manager_test.cc | 6 +-- .../core_worker/test/reference_count_test.cc | 28 +++++++------- .../transport/actor_task_submitter.cc | 2 +- 8 files changed, 50 insertions(+), 48 deletions(-) diff --git a/src/mock/ray/core_worker/reference_count.h b/src/mock/ray/core_worker/reference_count.h index c0679dec135f..c9f7a1d0b415 100644 --- a/src/mock/ray/core_worker/reference_count.h +++ b/src/mock/ray/core_worker/reference_count.h @@ -41,7 +41,7 @@ class MockReferenceCounter : public ReferenceCounterInterface { bool add_local_ref, const absl::optional &pinned_at_raylet_id)); - MOCK_METHOD2(AddObjectPrimaryCopyDeleteCallback, + MOCK_METHOD2(AddObjectOutOfScopeOrFreedCallback, bool(const ObjectID &object_id, const std::function callback)); diff --git a/src/ray/core_worker/actor_manager.cc b/src/ray/core_worker/actor_manager.cc index 02a89a7c65c9..a31c402fae11 100644 --- a/src/ray/core_worker/actor_manager.cc +++ b/src/ray/core_worker/actor_manager.cc @@ -172,7 +172,7 @@ bool ActorManager::AddActorHandle(std::unique_ptr actor_handle, } if (inserted && owned) { - RAY_CHECK(reference_counter_->AddObjectPrimaryCopyDeleteCallback( + RAY_CHECK(reference_counter_->AddObjectOutOfScopeOrFreedCallback( actor_creation_return_id, [this, actor_id](const ObjectID &object_id) { MarkActorKilledOrOutOfScope(GetActorHandle(actor_id)); })); diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 5dcd799b8936..e04ade96a0fe 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -3860,7 +3860,7 @@ void CoreWorker::ProcessSubscribeForObjectEviction( // Returns true if the object was present and the callback was added. It might have // already been evicted by the time we get this request, in which case we should // respond immediately so the raylet unpins the object. - if (!reference_counter_->AddObjectPrimaryCopyDeleteCallback(object_id, unpin_object)) { + if (!reference_counter_->AddObjectOutOfScopeOrFreedCallback(object_id, unpin_object)) { // If the object is already evicted (callback cannot be set), unregister the // subscription & publish the message so that the subscriber knows it. unpin_object(object_id); diff --git a/src/ray/core_worker/reference_count.cc b/src/ray/core_worker/reference_count.cc index eb5abe2ea387..6dd4c8bf6b7d 100644 --- a/src/ray/core_worker/reference_count.cc +++ b/src/ray/core_worker/reference_count.cc @@ -543,7 +543,7 @@ int64_t ReferenceCounter::ReleaseLineageReferences(ReferenceTable::iterator ref) RAY_LOG(DEBUG) << "Releasing lineage internal for argument " << argument_id; arg_it->second.lineage_ref_count--; if (arg_it->second.OutOfScope(lineage_pinning_enabled_)) { - DeleteObjectPrimaryCopy(arg_it); + OnObjectOutOfScopeOrFreed(arg_it); } if (arg_it->second.ShouldDelete(lineage_pinning_enabled_)) { RAY_CHECK(arg_it->second.on_ref_removed == nullptr); @@ -663,7 +663,7 @@ void ReferenceCounter::FreePlasmaObjects(const std::vector &object_ids } // Free only the plasma value. We must keep the reference around so that we // have the ownership information. - DeleteObjectPrimaryCopy(it); + OnObjectOutOfScopeOrFreed(it); } } @@ -700,8 +700,7 @@ void ReferenceCounter::DeleteReferenceInternal(ReferenceTable::iterator it, DeleteReferenceInternal(inner_it, deleted); } } - // Perform the deletion. - DeleteObjectPrimaryCopy(it); + OnObjectOutOfScopeOrFreed(it); if (deleted) { deleted->push_back(id); } @@ -764,20 +763,20 @@ int64_t ReferenceCounter::EvictLineage(int64_t min_bytes_to_evict) { return lineage_bytes_evicted; } -void ReferenceCounter::DeleteObjectPrimaryCopy(ReferenceTable::iterator it) { - RAY_LOG(DEBUG) << "Calling on_object_primary_copy_delete for object " << it->first - << " num callbacks: " - << it->second.on_object_primary_copy_delete_callbacks.size(); - for (const auto &callback : it->second.on_object_primary_copy_delete_callbacks) { +void ReferenceCounter::OnObjectOutOfScopeOrFreed(ReferenceTable::iterator it) { + RAY_LOG(DEBUG) << "Calling on_object_out_of_scope_or_freed_callbacks for object " + << it->first << " num callbacks: " + << it->second.on_object_out_of_scope_or_freed_callbacks.size(); + for (const auto &callback : it->second.on_object_out_of_scope_or_freed_callbacks) { callback(it->first); } - it->second.on_object_primary_copy_delete_callbacks.clear(); + it->second.on_object_out_of_scope_or_freed_callbacks.clear(); + UnsetObjectPrimaryCopy(it); +} + +void ReferenceCounter::UnsetObjectPrimaryCopy(ReferenceTable::iterator it) { it->second.pinned_at_raylet_id.reset(); if (it->second.spilled && !it->second.spilled_node_id.IsNil()) { - // The spilled copy of the object should get deleted during the - // on_object_primary_copy_delete callback, so reset the spill location metadata here. - // NOTE(swang): Spilled copies in cloud storage are not GCed, so we do not - // reset the spilled metadata. it->second.spilled = false; it->second.spilled_url = ""; it->second.spilled_node_id = NodeID::Nil(); @@ -795,7 +794,7 @@ bool ReferenceCounter::SetObjectRefDeletedCallback( return true; } -bool ReferenceCounter::AddObjectPrimaryCopyDeleteCallback( +bool ReferenceCounter::AddObjectOutOfScopeOrFreedCallback( const ObjectID &object_id, const std::function callback) { absl::MutexLock lock(&mutex_); auto it = object_id_refs_.find(object_id); @@ -812,7 +811,7 @@ bool ReferenceCounter::AddObjectPrimaryCopyDeleteCallback( return false; } - it->second.on_object_primary_copy_delete_callbacks.emplace_back(callback); + it->second.on_object_out_of_scope_or_freed_callbacks.emplace_back(callback); return true; } @@ -822,7 +821,7 @@ void ReferenceCounter::ResetObjectsOnRemovedNode(const NodeID &raylet_id) { const auto &object_id = it->first; if (it->second.pinned_at_raylet_id.value_or(NodeID::Nil()) == raylet_id || it->second.spilled_node_id == raylet_id) { - DeleteObjectPrimaryCopy(it); + UnsetObjectPrimaryCopy(it); if (!it->second.OutOfScope(lineage_pinning_enabled_)) { objects_to_recover_.push_back(object_id); } @@ -862,7 +861,7 @@ void ReferenceCounter::UpdateObjectPinnedAtRaylet(const ObjectID &object_id, if (check_node_alive_(raylet_id)) { it->second.pinned_at_raylet_id = raylet_id; } else { - DeleteObjectPrimaryCopy(it); + UnsetObjectPrimaryCopy(it); objects_to_recover_.push_back(object_id); } } @@ -1429,7 +1428,7 @@ bool ReferenceCounter::HandleObjectSpilled(const ObjectID &object_id, } else { RAY_LOG(DEBUG).WithField(spilled_node_id).WithField(object_id) << "Object spilled to dead node "; - DeleteObjectPrimaryCopy(it); + UnsetObjectPrimaryCopy(it); objects_to_recover_.push_back(object_id); } return true; diff --git a/src/ray/core_worker/reference_count.h b/src/ray/core_worker/reference_count.h index 4ef6e14a0016..5eb228301c34 100644 --- a/src/ray/core_worker/reference_count.h +++ b/src/ray/core_worker/reference_count.h @@ -49,7 +49,7 @@ class ReferenceCounterInterface { bool is_reconstructable, bool add_local_ref, const absl::optional &pinned_at_raylet_id = absl::optional()) = 0; - virtual bool AddObjectPrimaryCopyDeleteCallback( + virtual bool AddObjectOutOfScopeOrFreedCallback( const ObjectID &object_id, const std::function callback) = 0; virtual bool SetObjectRefDeletedCallback( @@ -320,7 +320,7 @@ class ReferenceCounter : public ReferenceCounterInterface, /// Adds the callback that will be run when the object goes out of scope /// (Reference.OutOfScope() returns true). /// Returns true if the object was in scope and the callback was added, else false. - bool AddObjectPrimaryCopyDeleteCallback( + bool AddObjectOutOfScopeOrFreedCallback( const ObjectID &object_id, const std::function callback) ABSL_LOCKS_EXCLUDED(mutex_); @@ -783,13 +783,13 @@ class ReferenceCounter : public ReferenceCounterInterface, /// Metadata related to borrowing. std::unique_ptr borrow_info; - /// Callback that will be called when this Object's primary copy - /// should be deleted: out of scope or internal_api.free + /// Callback that will be called when this object + /// is out of scope or manually freed. /// Note: when an object is out of scope, it can still /// have lineage ref count and on_object_ref_delete /// will be called when lineage ref count is also 0. std::vector> - on_object_primary_copy_delete_callbacks; + on_object_out_of_scope_or_freed_callbacks; /// Callback that will be called when the object ref is deleted /// from the reference table (all refs including lineage ref count go to 0). std::function on_object_ref_delete; @@ -847,9 +847,12 @@ class ReferenceCounter : public ReferenceCounterInterface, rpc::Address *owner_address = nullptr) const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - /// Delete the object primary copy, if any. Also unsets the raylet address - /// that the object was pinned at, if the address was set. - void DeleteObjectPrimaryCopy(ReferenceTable::iterator it); + /// Unsets the raylet address + /// that the object was pinned at or spilled at, if the address was set. + void UnsetObjectPrimaryCopy(ReferenceTable::iterator it); + + /// This should be called whenever the object is out of scope or manually freed. + void OnObjectOutOfScopeOrFreed(ReferenceTable::iterator it); /// Shutdown if all references have gone out of scope and shutdown /// is scheduled. diff --git a/src/ray/core_worker/test/actor_manager_test.cc b/src/ray/core_worker/test/actor_manager_test.cc index b5c938c6c0ce..8f68f28cd845 100644 --- a/src/ray/core_worker/test/actor_manager_test.cc +++ b/src/ray/core_worker/test/actor_manager_test.cc @@ -168,7 +168,7 @@ class ActorManagerTest : public ::testing::Test { ray_namespace, -1, false); - EXPECT_CALL(*reference_counter_, AddObjectPrimaryCopyDeleteCallback(_, _)) + EXPECT_CALL(*reference_counter_, AddObjectOutOfScopeOrFreedCallback(_, _)) .WillRepeatedly(testing::Return(true)); actor_manager_->AddNewActorHandle(std::move(actor_handle), call_site, @@ -207,7 +207,7 @@ TEST_F(ActorManagerTest, TestAddAndGetActorHandleEndToEnd) { "", -1, false); - EXPECT_CALL(*reference_counter_, AddObjectPrimaryCopyDeleteCallback(_, _)) + EXPECT_CALL(*reference_counter_, AddObjectOutOfScopeOrFreedCallback(_, _)) .WillRepeatedly(testing::Return(true)); // Add an actor handle. @@ -284,7 +284,7 @@ TEST_F(ActorManagerTest, RegisterActorHandles) { "", -1, false); - EXPECT_CALL(*reference_counter_, AddObjectPrimaryCopyDeleteCallback(_, _)) + EXPECT_CALL(*reference_counter_, AddObjectOutOfScopeOrFreedCallback(_, _)) .WillRepeatedly(testing::Return(true)); ObjectID outer_object_id = ObjectID::Nil(); diff --git a/src/ray/core_worker/test/reference_count_test.cc b/src/ray/core_worker/test/reference_count_test.cc index 4351692284e2..67423a3ed75a 100644 --- a/src/ray/core_worker/test/reference_count_test.cc +++ b/src/ray/core_worker/test/reference_count_test.cc @@ -572,9 +572,9 @@ TEST_F(ReferenceCountTest, TestUnreconstructableObjectOutOfScope) { // The object goes out of scope once it has no more refs. std::vector out; - ASSERT_FALSE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_FALSE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); rc->AddOwnedObject(id, {}, address, "", 0, false, /*add_local_ref=*/true); - ASSERT_TRUE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_TRUE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); ASSERT_FALSE(*out_of_scope); rc->RemoveLocalReference(id, &out); ASSERT_TRUE(*out_of_scope); @@ -582,9 +582,9 @@ TEST_F(ReferenceCountTest, TestUnreconstructableObjectOutOfScope) { // Unreconstructable objects go out of scope even if they have a nonzero // lineage ref count. *out_of_scope = false; - ASSERT_FALSE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_FALSE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); rc->AddOwnedObject(id, {}, address, "", 0, false, /*add_local_ref=*/false); - ASSERT_TRUE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_TRUE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); rc->UpdateSubmittedTaskReferences({}, {id}); ASSERT_FALSE(*out_of_scope); rc->UpdateFinishedTaskReferences({}, {id}, false, empty_borrower, empty_refs, &out); @@ -2437,9 +2437,9 @@ TEST_F(ReferenceCountLineageEnabledTest, TestUnreconstructableObjectOutOfScope) // The object goes out of scope once it has no more refs. std::vector out; - ASSERT_FALSE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_FALSE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); rc->AddOwnedObject(id, {}, address, "", 0, false, /*add_local_ref=*/true); - ASSERT_TRUE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_TRUE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); ASSERT_FALSE(*out_of_scope); ASSERT_FALSE(*out_of_scope); rc->RemoveLocalReference(id, &out); @@ -2450,9 +2450,9 @@ TEST_F(ReferenceCountLineageEnabledTest, TestUnreconstructableObjectOutOfScope) // Unreconstructable objects stay in scope if they have a nonzero lineage ref // count. *out_of_scope = false; - ASSERT_FALSE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_FALSE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); rc->AddOwnedObject(id, {}, address, "", 0, false, /*add_local_ref=*/false); - ASSERT_TRUE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_TRUE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); rc->UpdateSubmittedTaskReferences({return_id}, {id}); ASSERT_TRUE(rc->IsObjectPendingCreation(return_id)); ASSERT_FALSE(*out_of_scope); @@ -2541,7 +2541,7 @@ TEST_F(ReferenceCountLineageEnabledTest, TestPinLineageRecursive) { rc->UpdateFinishedTaskReferences({}, {id}, false, empty_borrower, empty_refs, &out); // We should fail to set the deletion callback because the object has // already gone out of scope. - ASSERT_FALSE(rc->AddObjectPrimaryCopyDeleteCallback( + ASSERT_FALSE(rc->AddObjectOutOfScopeOrFreedCallback( id, [&](const ObjectID &object_id) { ASSERT_FALSE(true); })); ASSERT_EQ(out.size(), 1); @@ -2658,7 +2658,7 @@ TEST_F(ReferenceCountLineageEnabledTest, TestPlasmaLocation) { ObjectID id = ObjectID::FromRandom(); NodeID node_id = NodeID::FromRandom(); rc->AddOwnedObject(id, {}, rpc::Address(), "", 0, true, /*add_local_ref=*/true); - ASSERT_TRUE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_TRUE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); ASSERT_TRUE(rc->IsPlasmaObjectPinnedOrSpilled(id, &owned_by_us, &pinned_at, &spilled)); ASSERT_TRUE(owned_by_us); ASSERT_TRUE(pinned_at.IsNil()); @@ -2674,7 +2674,7 @@ TEST_F(ReferenceCountLineageEnabledTest, TestPlasmaLocation) { deleted->clear(); rc->AddOwnedObject(id, {}, rpc::Address(), "", 0, true, /*add_local_ref=*/true); - ASSERT_TRUE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_TRUE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); rc->UpdateObjectPinnedAtRaylet(id, node_id); rc->ResetObjectsOnRemovedNode(node_id); auto objects = rc->FlushObjectsToRecover(); @@ -2683,7 +2683,7 @@ TEST_F(ReferenceCountLineageEnabledTest, TestPlasmaLocation) { ASSERT_TRUE(rc->IsPlasmaObjectPinnedOrSpilled(id, &owned_by_us, &pinned_at, &spilled)); ASSERT_TRUE(owned_by_us); ASSERT_TRUE(pinned_at.IsNil()); - ASSERT_TRUE(deleted->count(id) > 0); + ASSERT_TRUE(deleted->empty()); deleted->clear(); } @@ -2699,7 +2699,7 @@ TEST_F(ReferenceCountTest, TestFree) { ASSERT_FALSE(rc->IsPlasmaObjectFreed(id)); rc->FreePlasmaObjects({id}); ASSERT_TRUE(rc->IsPlasmaObjectFreed(id)); - ASSERT_FALSE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_FALSE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); ASSERT_EQ(deleted->count(id), 0); rc->UpdateObjectPinnedAtRaylet(id, node_id); bool owned_by_us; @@ -2714,7 +2714,7 @@ TEST_F(ReferenceCountTest, TestFree) { // Test free after receiving information about where the object is pinned. rc->AddOwnedObject(id, {}, rpc::Address(), "", 0, true, /*add_local_ref=*/true); - ASSERT_TRUE(rc->AddObjectPrimaryCopyDeleteCallback(id, callback)); + ASSERT_TRUE(rc->AddObjectOutOfScopeOrFreedCallback(id, callback)); rc->UpdateObjectPinnedAtRaylet(id, node_id); ASSERT_FALSE(rc->IsPlasmaObjectFreed(id)); rc->FreePlasmaObjects({id}); diff --git a/src/ray/core_worker/transport/actor_task_submitter.cc b/src/ray/core_worker/transport/actor_task_submitter.cc index c54b9deb16ec..af2600e0a6b8 100644 --- a/src/ray/core_worker/transport/actor_task_submitter.cc +++ b/src/ray/core_worker/transport/actor_task_submitter.cc @@ -44,7 +44,7 @@ void ActorTaskSubmitter::NotifyGCSWhenActorOutOfScope( })); }; - if (!reference_counter_->AddObjectPrimaryCopyDeleteCallback( + if (!reference_counter_->AddObjectOutOfScopeOrFreedCallback( actor_creation_return_id, [actor_out_of_scope_callback](const ObjectID &object_id) { actor_out_of_scope_callback(object_id); From cb384454b7943e42b63684c765e0a2cda6631ea0 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:38:37 -0800 Subject: [PATCH 087/107] [release tests] update byod dependencies (#48896) - upgrade cryptography and pyopenssl - remove dataset Signed-off-by: Lonnie Liu --- .../byod/requirements_byod_3.9.txt | 69 +- .../byod/requirements_ml_byod_3.9.in | 1 - .../byod/requirements_ml_byod_3.9.txt | 1132 ++++++++--------- 3 files changed, 596 insertions(+), 606 deletions(-) diff --git a/release/ray_release/byod/requirements_byod_3.9.txt b/release/ray_release/byod/requirements_byod_3.9.txt index adff2b611647..c5b98cebe695 100644 --- a/release/ray_release/byod/requirements_byod_3.9.txt +++ b/release/ray_release/byod/requirements_byod_3.9.txt @@ -144,6 +144,7 @@ ale-py==0.10.1 \ # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_byod_3.9.in + # gymnasium annotated-types==0.6.0 \ --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d @@ -574,33 +575,39 @@ crcmod==1.7 \ # via # -c release/ray_release/byod/requirements_compiled.txt # gsutil -cryptography==38.0.1 \ - --hash=sha256:0297ffc478bdd237f5ca3a7dc96fc0d315670bfa099c04dc3a4a2172008a405a \ - --hash=sha256:10d1f29d6292fc95acb597bacefd5b9e812099d75a6469004fd38ba5471a977f \ - --hash=sha256:16fa61e7481f4b77ef53991075de29fc5bacb582a1244046d2e8b4bb72ef66d0 \ - --hash=sha256:194044c6b89a2f9f169df475cc167f6157eb9151cc69af8a2a163481d45cc407 \ - --hash=sha256:1db3d807a14931fa317f96435695d9ec386be7b84b618cc61cfa5d08b0ae33d7 \ - --hash=sha256:3261725c0ef84e7592597606f6583385fed2a5ec3909f43bc475ade9729a41d6 \ - --hash=sha256:3b72c360427889b40f36dc214630e688c2fe03e16c162ef0aa41da7ab1455153 \ - --hash=sha256:3e3a2599e640927089f932295a9a247fc40a5bdf69b0484532f530471a382750 \ - --hash=sha256:3fc26e22840b77326a764ceb5f02ca2d342305fba08f002a8c1f139540cdfaad \ - --hash=sha256:5067ee7f2bce36b11d0e334abcd1ccf8c541fc0bbdaf57cdd511fdee53e879b6 \ - --hash=sha256:52e7bee800ec869b4031093875279f1ff2ed12c1e2f74923e8f49c916afd1d3b \ - --hash=sha256:64760ba5331e3f1794d0bcaabc0d0c39e8c60bf67d09c93dc0e54189dfd7cfe5 \ - --hash=sha256:765fa194a0f3372d83005ab83ab35d7c5526c4e22951e46059b8ac678b44fa5a \ - --hash=sha256:79473cf8a5cbc471979bd9378c9f425384980fcf2ab6534b18ed7d0d9843987d \ - --hash=sha256:896dd3a66959d3a5ddcfc140a53391f69ff1e8f25d93f0e2e7830c6de90ceb9d \ - --hash=sha256:89ed49784ba88c221756ff4d4755dbc03b3c8d2c5103f6d6b4f83a0fb1e85294 \ - --hash=sha256:ac7e48f7e7261207d750fa7e55eac2d45f720027d5703cd9007e9b37bbb59ac0 \ - --hash=sha256:ad7353f6ddf285aeadfaf79e5a6829110106ff8189391704c1d8801aa0bae45a \ - --hash=sha256:b0163a849b6f315bf52815e238bc2b2346604413fa7c1601eea84bcddb5fb9ac \ - --hash=sha256:b6c9b706316d7b5a137c35e14f4103e2115b088c412140fdbd5f87c73284df61 \ - --hash=sha256:c2e5856248a416767322c8668ef1845ad46ee62629266f84a8f007a317141013 \ - --hash=sha256:ca9f6784ea96b55ff41708b92c3f6aeaebde4c560308e5fbbd3173fbc466e94e \ - --hash=sha256:d1a5bd52d684e49a36582193e0b89ff267704cd4025abefb9e26803adeb3e5fb \ - --hash=sha256:d3971e2749a723e9084dd507584e2a2761f78ad2c638aa31e80bc7a15c9db4f9 \ - --hash=sha256:d4ef6cc305394ed669d4d9eebf10d3a101059bdcf2669c366ec1d14e4fb227bd \ - --hash=sha256:d9e69ae01f99abe6ad646947bba8941e896cb3aa805be2597a0400e0764b5818 +cryptography==42.0.5 \ + --hash=sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee \ + --hash=sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576 \ + --hash=sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d \ + --hash=sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30 \ + --hash=sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413 \ + --hash=sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb \ + --hash=sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da \ + --hash=sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4 \ + --hash=sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd \ + --hash=sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc \ + --hash=sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8 \ + --hash=sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1 \ + --hash=sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc \ + --hash=sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e \ + --hash=sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8 \ + --hash=sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940 \ + --hash=sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400 \ + --hash=sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7 \ + --hash=sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16 \ + --hash=sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278 \ + --hash=sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74 \ + --hash=sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec \ + --hash=sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1 \ + --hash=sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2 \ + --hash=sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c \ + --hash=sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922 \ + --hash=sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a \ + --hash=sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6 \ + --hash=sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1 \ + --hash=sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e \ + --hash=sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac \ + --hash=sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7 # via # -c release/ray_release/byod/requirements_compiled.txt # pyopenssl @@ -1264,7 +1271,7 @@ gsutil==5.27 \ # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_byod_3.9.in -gymnasium==1.0.0 \ +gymnasium[atari]==1.0.0 \ --hash=sha256:9d2b66f30c1b34fe3c2ce7fae65ecf365d0e9982d2b3d860235e773328a3b403 \ --hash=sha256:b6f40e1e24c5bd419361e1a5b86a9117d2499baecc3a660d44dfff4c465393ad # via @@ -2203,9 +2210,9 @@ pygments==2.18.0 \ # via # -c release/ray_release/byod/requirements_compiled.txt # rich -pyopenssl==23.0.0 \ - --hash=sha256:c1cc5f86bcacefc84dada7d31175cae1b1518d5f60d3d0bb595a67822a868a6f \ - --hash=sha256:df5fc28af899e74e19fccb5510df423581047e10ab6f1f4ba1763ff5fde844c0 +pyopenssl==24.2.1 \ + --hash=sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95 \ + --hash=sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d # via # -c release/ray_release/byod/requirements_compiled.txt # gcs-oauth2-boto-plugin diff --git a/release/ray_release/byod/requirements_ml_byod_3.9.in b/release/ray_release/byod/requirements_ml_byod_3.9.in index 7ef915a8a698..69ca2cc2c734 100644 --- a/release/ray_release/byod/requirements_ml_byod_3.9.in +++ b/release/ray_release/byod/requirements_ml_byod_3.9.in @@ -6,7 +6,6 @@ bitsandbytes boto3 cmake crc32c -dataset datasets decord deepspeed>=0.12.3 diff --git a/release/ray_release/byod/requirements_ml_byod_3.9.txt b/release/ray_release/byod/requirements_ml_byod_3.9.txt index 7eeec76c2886..8d9e5a044f47 100644 --- a/release/ray_release/byod/requirements_ml_byod_3.9.txt +++ b/release/ray_release/byod/requirements_ml_byod_3.9.txt @@ -1,4 +1,3 @@ - # # This file is autogenerated by pip-compile with python 3.9 # To update, run: @@ -118,12 +117,6 @@ aiosignal==1.3.1 \ # via # -c release/ray_release/byod/requirements_compiled.txt # aiohttp -alembic==1.12.1 \ - --hash=sha256:47d52e3dfb03666ed945becb723d6482e52190917fdb47071440cfdba05d92cb \ - --hash=sha256:bca5877e9678b454706347bc10b97cb7d67f300320fa5c3a94423e8266e2823f - # via - # -c release/ray_release/byod/requirements_compiled.txt - # dataset annotated-types==0.6.0 \ --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d @@ -175,17 +168,12 @@ attrs==21.4.0 \ # aiohttp # jsonlines # jsonschema - # markdown-it-py backcall==0.2.0 \ --hash=sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e \ --hash=sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255 # via # -c release/ray_release/byod/requirements_compiled.txt # ipython -banal==1.0.6 \ - --hash=sha256:2fe02c9305f53168441948f4a03dfbfa2eacc73db30db4a93309083cb0e250a5 \ - --hash=sha256:877aacb16b17f8fa4fd29a7c44515c5a23dc1a7b26078bc41dd34829117d85e1 - # via dataset bitsandbytes==0.43.1 \ --hash=sha256:52c1c7189a6ca006555a9663e544e75f40520a97a26e075411f9f9aca0771fcd \ --hash=sha256:a81c826d576d6d691c7b4a7491c8fdc0f37f769795d6ca2e54afa605d2c260a3 @@ -510,12 +498,6 @@ comm==0.2.0 \ # via # -c release/ray_release/byod/requirements_compiled.txt # ipywidgets -commonmark==0.9.1 \ - --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ - --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 - # via - # -c release/ray_release/byod/requirements_compiled.txt - # rich configargparse==1.7 \ --hash=sha256:d249da6591465c6c26df64a9f73d2536e743be2f244eb3ebe61114af2f94f86b \ --hash=sha256:e7067471884de5478c58a511e529f0f9bd1c66bfef1dea90935438d6c23306d1 @@ -652,33 +634,39 @@ crcmod==1.7 \ # via # -c release/ray_release/byod/requirements_compiled.txt # gsutil -cryptography==38.0.1 \ - --hash=sha256:0297ffc478bdd237f5ca3a7dc96fc0d315670bfa099c04dc3a4a2172008a405a \ - --hash=sha256:10d1f29d6292fc95acb597bacefd5b9e812099d75a6469004fd38ba5471a977f \ - --hash=sha256:16fa61e7481f4b77ef53991075de29fc5bacb582a1244046d2e8b4bb72ef66d0 \ - --hash=sha256:194044c6b89a2f9f169df475cc167f6157eb9151cc69af8a2a163481d45cc407 \ - --hash=sha256:1db3d807a14931fa317f96435695d9ec386be7b84b618cc61cfa5d08b0ae33d7 \ - --hash=sha256:3261725c0ef84e7592597606f6583385fed2a5ec3909f43bc475ade9729a41d6 \ - --hash=sha256:3b72c360427889b40f36dc214630e688c2fe03e16c162ef0aa41da7ab1455153 \ - --hash=sha256:3e3a2599e640927089f932295a9a247fc40a5bdf69b0484532f530471a382750 \ - --hash=sha256:3fc26e22840b77326a764ceb5f02ca2d342305fba08f002a8c1f139540cdfaad \ - --hash=sha256:5067ee7f2bce36b11d0e334abcd1ccf8c541fc0bbdaf57cdd511fdee53e879b6 \ - --hash=sha256:52e7bee800ec869b4031093875279f1ff2ed12c1e2f74923e8f49c916afd1d3b \ - --hash=sha256:64760ba5331e3f1794d0bcaabc0d0c39e8c60bf67d09c93dc0e54189dfd7cfe5 \ - --hash=sha256:765fa194a0f3372d83005ab83ab35d7c5526c4e22951e46059b8ac678b44fa5a \ - --hash=sha256:79473cf8a5cbc471979bd9378c9f425384980fcf2ab6534b18ed7d0d9843987d \ - --hash=sha256:896dd3a66959d3a5ddcfc140a53391f69ff1e8f25d93f0e2e7830c6de90ceb9d \ - --hash=sha256:89ed49784ba88c221756ff4d4755dbc03b3c8d2c5103f6d6b4f83a0fb1e85294 \ - --hash=sha256:ac7e48f7e7261207d750fa7e55eac2d45f720027d5703cd9007e9b37bbb59ac0 \ - --hash=sha256:ad7353f6ddf285aeadfaf79e5a6829110106ff8189391704c1d8801aa0bae45a \ - --hash=sha256:b0163a849b6f315bf52815e238bc2b2346604413fa7c1601eea84bcddb5fb9ac \ - --hash=sha256:b6c9b706316d7b5a137c35e14f4103e2115b088c412140fdbd5f87c73284df61 \ - --hash=sha256:c2e5856248a416767322c8668ef1845ad46ee62629266f84a8f007a317141013 \ - --hash=sha256:ca9f6784ea96b55ff41708b92c3f6aeaebde4c560308e5fbbd3173fbc466e94e \ - --hash=sha256:d1a5bd52d684e49a36582193e0b89ff267704cd4025abefb9e26803adeb3e5fb \ - --hash=sha256:d3971e2749a723e9084dd507584e2a2761f78ad2c638aa31e80bc7a15c9db4f9 \ - --hash=sha256:d4ef6cc305394ed669d4d9eebf10d3a101059bdcf2669c366ec1d14e4fb227bd \ - --hash=sha256:d9e69ae01f99abe6ad646947bba8941e896cb3aa805be2597a0400e0764b5818 +cryptography==42.0.5 \ + --hash=sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee \ + --hash=sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576 \ + --hash=sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d \ + --hash=sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30 \ + --hash=sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413 \ + --hash=sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb \ + --hash=sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da \ + --hash=sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4 \ + --hash=sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd \ + --hash=sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc \ + --hash=sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8 \ + --hash=sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1 \ + --hash=sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc \ + --hash=sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e \ + --hash=sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8 \ + --hash=sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940 \ + --hash=sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400 \ + --hash=sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7 \ + --hash=sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16 \ + --hash=sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278 \ + --hash=sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74 \ + --hash=sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec \ + --hash=sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1 \ + --hash=sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2 \ + --hash=sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c \ + --hash=sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922 \ + --hash=sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a \ + --hash=sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6 \ + --hash=sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1 \ + --hash=sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e \ + --hash=sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac \ + --hash=sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7 # via # -c release/ray_release/byod/requirements_compiled.txt # pyopenssl @@ -694,10 +682,6 @@ dataproperty==1.0.1 \ # via # pytablewriter # tabledata -dataset==1.6.2 \ - --hash=sha256:77d362118f67a8cbb4848dbd30ab362b9fa7cfebdbfaf426c9c500cb38969a99 \ - --hash=sha256:dcca9ba7658473d3082b1adf87a650252a1cd665705b73fa7d4ee32116a107b9 - # via -r release/ray_release/byod/requirements_ml_byod_3.9.in datasets==2.14.0 \ --hash=sha256:1bb3d1c992a593949a8d3e445b358ac1db4ead00e6619ea2e5e7b6dfc222dde1 \ --hash=sha256:93081cc3d9d0ce860c81f950a3ba23d24704da2eacbe2722092ef4f6ae0ada96 @@ -867,68 +851,84 @@ fonttools==4.45.1 \ # via # -c release/ray_release/byod/requirements_compiled.txt # matplotlib -frozenlist==1.4.0 \ - --hash=sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6 \ - --hash=sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01 \ - --hash=sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251 \ - --hash=sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9 \ - --hash=sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b \ - --hash=sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87 \ - --hash=sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf \ - --hash=sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f \ - --hash=sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0 \ - --hash=sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2 \ - --hash=sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b \ - --hash=sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc \ - --hash=sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c \ - --hash=sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467 \ - --hash=sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9 \ - --hash=sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1 \ - --hash=sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a \ - --hash=sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79 \ - --hash=sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167 \ - --hash=sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300 \ - --hash=sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf \ - --hash=sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea \ - --hash=sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2 \ - --hash=sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab \ - --hash=sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3 \ - --hash=sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb \ - --hash=sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087 \ - --hash=sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc \ - --hash=sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8 \ - --hash=sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62 \ - --hash=sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f \ - --hash=sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326 \ - --hash=sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c \ - --hash=sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431 \ - --hash=sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963 \ - --hash=sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7 \ - --hash=sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef \ - --hash=sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3 \ - --hash=sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956 \ - --hash=sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781 \ - --hash=sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472 \ - --hash=sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc \ - --hash=sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839 \ - --hash=sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672 \ - --hash=sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3 \ - --hash=sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503 \ - --hash=sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d \ - --hash=sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8 \ - --hash=sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b \ - --hash=sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc \ - --hash=sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f \ - --hash=sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559 \ - --hash=sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b \ - --hash=sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95 \ - --hash=sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb \ - --hash=sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963 \ - --hash=sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919 \ - --hash=sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f \ - --hash=sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3 \ - --hash=sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1 \ - --hash=sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e +frozenlist==1.4.1 \ + --hash=sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7 \ + --hash=sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98 \ + --hash=sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad \ + --hash=sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5 \ + --hash=sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae \ + --hash=sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e \ + --hash=sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a \ + --hash=sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701 \ + --hash=sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d \ + --hash=sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6 \ + --hash=sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6 \ + --hash=sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106 \ + --hash=sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75 \ + --hash=sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868 \ + --hash=sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a \ + --hash=sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0 \ + --hash=sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1 \ + --hash=sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826 \ + --hash=sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec \ + --hash=sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6 \ + --hash=sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950 \ + --hash=sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19 \ + --hash=sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0 \ + --hash=sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8 \ + --hash=sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a \ + --hash=sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09 \ + --hash=sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86 \ + --hash=sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c \ + --hash=sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5 \ + --hash=sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b \ + --hash=sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b \ + --hash=sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d \ + --hash=sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0 \ + --hash=sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea \ + --hash=sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776 \ + --hash=sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a \ + --hash=sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897 \ + --hash=sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7 \ + --hash=sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09 \ + --hash=sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9 \ + --hash=sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe \ + --hash=sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd \ + --hash=sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742 \ + --hash=sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09 \ + --hash=sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0 \ + --hash=sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932 \ + --hash=sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1 \ + --hash=sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a \ + --hash=sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49 \ + --hash=sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d \ + --hash=sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7 \ + --hash=sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480 \ + --hash=sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89 \ + --hash=sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e \ + --hash=sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b \ + --hash=sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82 \ + --hash=sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb \ + --hash=sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068 \ + --hash=sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8 \ + --hash=sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b \ + --hash=sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb \ + --hash=sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2 \ + --hash=sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11 \ + --hash=sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b \ + --hash=sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc \ + --hash=sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0 \ + --hash=sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497 \ + --hash=sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17 \ + --hash=sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0 \ + --hash=sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2 \ + --hash=sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439 \ + --hash=sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5 \ + --hash=sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac \ + --hash=sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825 \ + --hash=sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887 \ + --hash=sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced \ + --hash=sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74 # via # -c release/ray_release/byod/requirements_compiled.txt # aiohttp @@ -964,8 +964,9 @@ fugue-sql-antlr==0.2.0 \ # via # -c release/ray_release/byod/requirements_compiled.txt # fugue -future==0.18.3 \ - --hash=sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307 +future==1.0.0 \ + --hash=sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216 \ + --hash=sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05 # via # -c release/ray_release/byod/requirements_compiled.txt # petastorm @@ -1295,7 +1296,6 @@ greenlet==3.0.1 \ # via # -c release/ray_release/byod/requirements_compiled.txt # gevent - # sqlalchemy gsutil==5.27 \ --hash=sha256:681a2d844acdf05fac989da6dd406944ae11cb27a4cf3c9edef74d2585ab5f05 # via @@ -1431,9 +1431,9 @@ jupyterlab-widgets==3.0.11 \ # via # -c release/ray_release/byod/requirements_compiled.txt # ipywidgets -jupytext==1.13.6 \ - --hash=sha256:2160774e30587fb427213231f0267ed070ba4ede41cf6121dbb2b14225eb83ba \ - --hash=sha256:c6c25918ddb6403d0d8504e08d35f6efc447baf0dbeb6a28b73adf39e866a0c4 +jupytext==1.16.3 \ + --hash=sha256:1ebac990461dd9f477ff7feec9e3003fa1acc89f3c16ba01b73f79fd76f01a98 \ + --hash=sha256:870e0d7a716dcb1303df6ad1cec65e3315a20daedd808a55cb3dae2d56e4ed20 # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_ml_byod_3.9.in @@ -1680,19 +1680,14 @@ lxml==4.9.4 \ # via # -c release/ray_release/byod/requirements_compiled.txt # sacrebleu -mako==1.3.0 \ - --hash=sha256:57d4e997349f1a92035aa25c17ace371a4213f2ca42f99bee9a602500cfd54d9 \ - --hash=sha256:e3a9d388fd00e87043edbe8792f45880ac0114e9c4adc69f6e9bfb2c55e3b11b - # via - # -c release/ray_release/byod/requirements_compiled.txt - # alembic -markdown-it-py==1.1.0 \ - --hash=sha256:36be6bb3ad987bfdb839f5ba78ddf094552ca38ccbd784ae4f74a4e1419fc6e3 \ - --hash=sha256:98080fc0bc34c4f2bcf0846a096a9429acbd9d5d8e67ed34026c03c61c464389 +markdown-it-py==2.2.0 \ + --hash=sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30 \ + --hash=sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1 # via # -c release/ray_release/byod/requirements_compiled.txt # jupytext # mdit-py-plugins + # rich markupsafe==2.1.3 \ --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ @@ -1757,7 +1752,6 @@ markupsafe==2.1.3 \ # via # -c release/ray_release/byod/requirements_compiled.txt # jinja2 - # mako # werkzeug matplotlib==3.7.4 \ --hash=sha256:0037d066cca1f4bda626c507cddeb6f7da8283bc6a214da2db13ff2162933c52 \ @@ -1829,6 +1823,12 @@ mdit-py-plugins==0.3.5 \ # via # -c release/ray_release/byod/requirements_compiled.txt # jupytext +mdurl==0.1.2 \ + --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ + --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba + # via + # -c release/ray_release/byod/requirements_compiled.txt + # markdown-it-py memray==1.10.0 ; platform_system != "Windows" and sys_platform != "darwin" and platform_machine != "aarch64" \ --hash=sha256:0a21745fb516b7a6efcd40aa7487c59e9313fcfc782d0193fcfcf00b48426874 \ --hash=sha256:22f2a47871c172a0539bd72737bb6b294fc10c510464066b825d90fcd3bb4916 \ @@ -1868,7 +1868,7 @@ memray==1.10.0 ; platform_system != "Windows" and sys_platform != "darwin" and p # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_ml_byod_3.9.in -modin==0.22.2 \ +modin==0.22.2 ; python_version < "3.12" \ --hash=sha256:532fe0bfb2dcf06c0ad2d467721ef489fd58bb3ef7150bcf4a7ddd1069be1e4d \ --hash=sha256:fa897dc59d5b9a8496be044185689fdd337b9f26cc81c4144b217a2a94d029bc # via @@ -1952,81 +1952,97 @@ msgpack==1.0.7 \ # via # -c release/ray_release/byod/requirements_compiled.txt # locust -multidict==6.0.4 \ - --hash=sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9 \ - --hash=sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8 \ - --hash=sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03 \ - --hash=sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710 \ - --hash=sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161 \ - --hash=sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664 \ - --hash=sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569 \ - --hash=sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067 \ - --hash=sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313 \ - --hash=sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706 \ - --hash=sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2 \ - --hash=sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636 \ - --hash=sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49 \ - --hash=sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93 \ - --hash=sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603 \ - --hash=sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0 \ - --hash=sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60 \ - --hash=sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4 \ - --hash=sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e \ - --hash=sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1 \ - --hash=sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60 \ - --hash=sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951 \ - --hash=sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc \ - --hash=sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe \ - --hash=sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95 \ - --hash=sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d \ - --hash=sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8 \ - --hash=sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed \ - --hash=sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2 \ - --hash=sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775 \ - --hash=sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87 \ - --hash=sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c \ - --hash=sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2 \ - --hash=sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98 \ - --hash=sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3 \ - --hash=sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe \ - --hash=sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78 \ - --hash=sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660 \ - --hash=sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176 \ - --hash=sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e \ - --hash=sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988 \ - --hash=sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c \ - --hash=sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c \ - --hash=sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0 \ - --hash=sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449 \ - --hash=sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f \ - --hash=sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde \ - --hash=sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5 \ - --hash=sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d \ - --hash=sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac \ - --hash=sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a \ - --hash=sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9 \ - --hash=sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca \ - --hash=sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11 \ - --hash=sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35 \ - --hash=sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063 \ - --hash=sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b \ - --hash=sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982 \ - --hash=sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258 \ - --hash=sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1 \ - --hash=sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52 \ - --hash=sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480 \ - --hash=sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7 \ - --hash=sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461 \ - --hash=sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d \ - --hash=sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc \ - --hash=sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779 \ - --hash=sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a \ - --hash=sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547 \ - --hash=sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0 \ - --hash=sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171 \ - --hash=sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf \ - --hash=sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d \ - --hash=sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba +multidict==6.0.5 \ + --hash=sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556 \ + --hash=sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c \ + --hash=sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29 \ + --hash=sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b \ + --hash=sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8 \ + --hash=sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7 \ + --hash=sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd \ + --hash=sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40 \ + --hash=sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6 \ + --hash=sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3 \ + --hash=sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c \ + --hash=sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9 \ + --hash=sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5 \ + --hash=sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae \ + --hash=sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442 \ + --hash=sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9 \ + --hash=sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc \ + --hash=sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c \ + --hash=sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea \ + --hash=sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5 \ + --hash=sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50 \ + --hash=sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182 \ + --hash=sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453 \ + --hash=sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e \ + --hash=sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600 \ + --hash=sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733 \ + --hash=sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda \ + --hash=sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241 \ + --hash=sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461 \ + --hash=sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e \ + --hash=sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e \ + --hash=sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b \ + --hash=sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e \ + --hash=sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7 \ + --hash=sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386 \ + --hash=sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd \ + --hash=sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9 \ + --hash=sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf \ + --hash=sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee \ + --hash=sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5 \ + --hash=sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a \ + --hash=sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271 \ + --hash=sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54 \ + --hash=sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4 \ + --hash=sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496 \ + --hash=sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb \ + --hash=sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319 \ + --hash=sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3 \ + --hash=sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f \ + --hash=sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527 \ + --hash=sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed \ + --hash=sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604 \ + --hash=sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef \ + --hash=sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8 \ + --hash=sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5 \ + --hash=sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5 \ + --hash=sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626 \ + --hash=sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c \ + --hash=sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d \ + --hash=sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c \ + --hash=sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc \ + --hash=sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc \ + --hash=sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b \ + --hash=sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38 \ + --hash=sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450 \ + --hash=sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1 \ + --hash=sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f \ + --hash=sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3 \ + --hash=sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755 \ + --hash=sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226 \ + --hash=sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a \ + --hash=sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046 \ + --hash=sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf \ + --hash=sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479 \ + --hash=sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e \ + --hash=sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1 \ + --hash=sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a \ + --hash=sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83 \ + --hash=sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929 \ + --hash=sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93 \ + --hash=sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a \ + --hash=sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c \ + --hash=sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44 \ + --hash=sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89 \ + --hash=sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba \ + --hash=sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e \ + --hash=sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da \ + --hash=sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24 \ + --hash=sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423 \ + --hash=sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef # via # -c release/ray_release/byod/requirements_compiled.txt # aiohttp @@ -2219,6 +2235,12 @@ numpy==1.26.4 \ # triad # utilsforecast # xgboost +nvidia-nccl-cu12==2.20.5 \ + --hash=sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56 \ + --hash=sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01 + # via + # -c release/ray_release/byod/requirements_compiled.txt + # xgboost oauth2client==4.1.3 \ --hash=sha256:b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac \ --hash=sha256:d486741e451287f69568a4d26d70d9acd73a2bbfa275746c535b4209891cccc6 @@ -2250,6 +2272,7 @@ packaging==23.0 \ # evaluate # fugue-sql-antlr # huggingface-hub + # jupytext # lightning-utilities # matplotlib # modin @@ -2263,7 +2286,7 @@ packaging==23.0 \ # transformers # typepy # utilsforecast -pandas==1.5.3 \ +pandas==1.5.3 ; python_version < "3.12" \ --hash=sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813 \ --hash=sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792 \ --hash=sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406 \ @@ -2710,9 +2733,9 @@ pynvml==11.5.0 \ # via # -c release/ray_release/byod/requirements_compiled.txt # deepspeed -pyopenssl==23.0.0 \ - --hash=sha256:c1cc5f86bcacefc84dada7d31175cae1b1518d5f60d3d0bb595a67822a868a6f \ - --hash=sha256:df5fc28af899e74e19fccb5510df423581047e10ab6f1f4ba1763ff5fde844c0 +pyopenssl==24.2.1 \ + --hash=sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95 \ + --hash=sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d # via # -c release/ray_release/byod/requirements_compiled.txt # gcs-oauth2-boto-plugin @@ -2963,95 +2986,86 @@ qpd==0.4.4 \ # via # -c release/ray_release/byod/requirements_compiled.txt # fugue -regex==2023.10.3 \ - --hash=sha256:00ba3c9818e33f1fa974693fb55d24cdc8ebafcb2e4207680669d8f8d7cca79a \ - --hash=sha256:00e871d83a45eee2f8688d7e6849609c2ca2a04a6d48fba3dff4deef35d14f07 \ - --hash=sha256:06e9abc0e4c9ab4779c74ad99c3fc10d3967d03114449acc2c2762ad4472b8ca \ - --hash=sha256:0b9ac09853b2a3e0d0082104036579809679e7715671cfbf89d83c1cb2a30f58 \ - --hash=sha256:0d47840dc05e0ba04fe2e26f15126de7c755496d5a8aae4a08bda4dd8d646c54 \ - --hash=sha256:0f649fa32fe734c4abdfd4edbb8381c74abf5f34bc0b3271ce687b23729299ed \ - --hash=sha256:107ac60d1bfdc3edb53be75e2a52aff7481b92817cfdddd9b4519ccf0e54a6ff \ - --hash=sha256:11175910f62b2b8c055f2b089e0fedd694fe2be3941b3e2633653bc51064c528 \ - --hash=sha256:12bd4bc2c632742c7ce20db48e0d99afdc05e03f0b4c1af90542e05b809a03d9 \ - --hash=sha256:16f8740eb6dbacc7113e3097b0a36065a02e37b47c936b551805d40340fb9971 \ - --hash=sha256:1c0e8fae5b27caa34177bdfa5a960c46ff2f78ee2d45c6db15ae3f64ecadde14 \ - --hash=sha256:2c54e23836650bdf2c18222c87f6f840d4943944146ca479858404fedeb9f9af \ - --hash=sha256:3367007ad1951fde612bf65b0dffc8fd681a4ab98ac86957d16491400d661302 \ - --hash=sha256:36362386b813fa6c9146da6149a001b7bd063dabc4d49522a1f7aa65b725c7ec \ - --hash=sha256:39807cbcbe406efca2a233884e169d056c35aa7e9f343d4e78665246a332f597 \ - --hash=sha256:39cdf8d141d6d44e8d5a12a8569d5a227f645c87df4f92179bd06e2e2705e76b \ - --hash=sha256:3b2c3502603fab52d7619b882c25a6850b766ebd1b18de3df23b2f939360e1bd \ - --hash=sha256:3ccf2716add72f80714b9a63899b67fa711b654be3fcdd34fa391d2d274ce767 \ - --hash=sha256:3fef4f844d2290ee0ba57addcec17eec9e3df73f10a2748485dfd6a3a188cc0f \ - --hash=sha256:4023e2efc35a30e66e938de5aef42b520c20e7eda7bb5fb12c35e5d09a4c43f6 \ - --hash=sha256:4a3ee019a9befe84fa3e917a2dd378807e423d013377a884c1970a3c2792d293 \ - --hash=sha256:4a8bf76e3182797c6b1afa5b822d1d5802ff30284abe4599e1247be4fd6b03be \ - --hash=sha256:4a992f702c9be9c72fa46f01ca6e18d131906a7180950958f766c2aa294d4b41 \ - --hash=sha256:4c34d4f73ea738223a094d8e0ffd6d2c1a1b4c175da34d6b0de3d8d69bee6bcc \ - --hash=sha256:4cd1bccf99d3ef1ab6ba835308ad85be040e6a11b0977ef7ea8c8005f01a3c29 \ - --hash=sha256:4ef80829117a8061f974b2fda8ec799717242353bff55f8a29411794d635d964 \ - --hash=sha256:58837f9d221744d4c92d2cf7201c6acd19623b50c643b56992cbd2b745485d3d \ - --hash=sha256:5a8f91c64f390ecee09ff793319f30a0f32492e99f5dc1c72bc361f23ccd0a9a \ - --hash=sha256:5addc9d0209a9afca5fc070f93b726bf7003bd63a427f65ef797a931782e7edc \ - --hash=sha256:6239d4e2e0b52c8bd38c51b760cd870069f0bdf99700a62cd509d7a031749a55 \ - --hash=sha256:66e2fe786ef28da2b28e222c89502b2af984858091675044d93cb50e6f46d7af \ - --hash=sha256:69c0771ca5653c7d4b65203cbfc5e66db9375f1078689459fe196fe08b7b4930 \ - --hash=sha256:6ac965a998e1388e6ff2e9781f499ad1eaa41e962a40d11c7823c9952c77123e \ - --hash=sha256:6c56c3d47da04f921b73ff9415fbaa939f684d47293f071aa9cbb13c94afc17d \ - --hash=sha256:6f85739e80d13644b981a88f529d79c5bdf646b460ba190bffcaf6d57b2a9863 \ - --hash=sha256:706e7b739fdd17cb89e1fbf712d9dc21311fc2333f6d435eac2d4ee81985098c \ - --hash=sha256:741ba2f511cc9626b7561a440f87d658aabb3d6b744a86a3c025f866b4d19e7f \ - --hash=sha256:7434a61b158be563c1362d9071358f8ab91b8d928728cd2882af060481244c9e \ - --hash=sha256:76066d7ff61ba6bf3cb5efe2428fc82aac91802844c022d849a1f0f53820502d \ - --hash=sha256:7979b834ec7a33aafae34a90aad9f914c41fd6eaa8474e66953f3f6f7cbd4368 \ - --hash=sha256:7eece6fbd3eae4a92d7c748ae825cbc1ee41a89bb1c3db05b5578ed3cfcfd7cb \ - --hash=sha256:7ef1e014eed78ab650bef9a6a9cbe50b052c0aebe553fb2881e0453717573f52 \ - --hash=sha256:81dce2ddc9f6e8f543d94b05d56e70d03a0774d32f6cca53e978dc01e4fc75b8 \ - --hash=sha256:82fcc1f1cc3ff1ab8a57ba619b149b907072e750815c5ba63e7aa2e1163384a4 \ - --hash=sha256:8d1f21af4c1539051049796a0f50aa342f9a27cde57318f2fc41ed50b0dbc4ac \ - --hash=sha256:90a79bce019c442604662d17bf69df99090e24cdc6ad95b18b6725c2988a490e \ - --hash=sha256:9145f092b5d1977ec8c0ab46e7b3381b2fd069957b9862a43bd383e5c01d18c2 \ - --hash=sha256:91dc1d531f80c862441d7b66c4505cd6ea9d312f01fb2f4654f40c6fdf5cc37a \ - --hash=sha256:979c24cbefaf2420c4e377ecd1f165ea08cc3d1fbb44bdc51bccbbf7c66a2cb4 \ - --hash=sha256:994645a46c6a740ee8ce8df7911d4aee458d9b1bc5639bc968226763d07f00fa \ - --hash=sha256:9b98b7681a9437262947f41c7fac567c7e1f6eddd94b0483596d320092004533 \ - --hash=sha256:9c6b4d23c04831e3ab61717a707a5d763b300213db49ca680edf8bf13ab5d91b \ - --hash=sha256:9c6d0ced3c06d0f183b73d3c5920727268d2201aa0fe6d55c60d68c792ff3588 \ - --hash=sha256:9fd88f373cb71e6b59b7fa597e47e518282455c2734fd4306a05ca219a1991b0 \ - --hash=sha256:a8f4e49fc3ce020f65411432183e6775f24e02dff617281094ba6ab079ef0915 \ - --hash=sha256:a9e908ef5889cda4de038892b9accc36d33d72fb3e12c747e2799a0e806ec841 \ - --hash=sha256:ad08a69728ff3c79866d729b095872afe1e0557251da4abb2c5faff15a91d19a \ - --hash=sha256:adbccd17dcaff65704c856bd29951c58a1bd4b2b0f8ad6b826dbd543fe740988 \ - --hash=sha256:b0c7d2f698e83f15228ba41c135501cfe7d5740181d5903e250e47f617eb4292 \ - --hash=sha256:b3ab05a182c7937fb374f7e946f04fb23a0c0699c0450e9fb02ef567412d2fa3 \ - --hash=sha256:b6104f9a46bd8743e4f738afef69b153c4b8b592d35ae46db07fc28ae3d5fb7c \ - --hash=sha256:ba7cd6dc4d585ea544c1412019921570ebd8a597fabf475acc4528210d7c4a6f \ - --hash=sha256:bc72c231f5449d86d6c7d9cc7cd819b6eb30134bb770b8cfdc0765e48ef9c420 \ - --hash=sha256:bce8814b076f0ce5766dc87d5a056b0e9437b8e0cd351b9a6c4e1134a7dfbda9 \ - --hash=sha256:be5e22bbb67924dea15039c3282fa4cc6cdfbe0cbbd1c0515f9223186fc2ec5f \ - --hash=sha256:be6b7b8d42d3090b6c80793524fa66c57ad7ee3fe9722b258aec6d0672543fd0 \ - --hash=sha256:bfe50b61bab1b1ec260fa7cd91106fa9fece57e6beba05630afe27c71259c59b \ - --hash=sha256:bff507ae210371d4b1fe316d03433ac099f184d570a1a611e541923f78f05037 \ - --hash=sha256:c148bec483cc4b421562b4bcedb8e28a3b84fcc8f0aa4418e10898f3c2c0eb9b \ - --hash=sha256:c15ad0aee158a15e17e0495e1e18741573d04eb6da06d8b84af726cfc1ed02ee \ - --hash=sha256:c2169b2dcabf4e608416f7f9468737583ce5f0a6e8677c4efbf795ce81109d7c \ - --hash=sha256:c55853684fe08d4897c37dfc5faeff70607a5f1806c8be148f1695be4a63414b \ - --hash=sha256:c65a3b5330b54103e7d21cac3f6bf3900d46f6d50138d73343d9e5b2900b2353 \ - --hash=sha256:c7964c2183c3e6cce3f497e3a9f49d182e969f2dc3aeeadfa18945ff7bdd7051 \ - --hash=sha256:cc3f1c053b73f20c7ad88b0d1d23be7e7b3901229ce89f5000a8399746a6e039 \ - --hash=sha256:ce615c92d90df8373d9e13acddd154152645c0dc060871abf6bd43809673d20a \ - --hash=sha256:d29338556a59423d9ff7b6eb0cb89ead2b0875e08fe522f3e068b955c3e7b59b \ - --hash=sha256:d8a993c0a0ffd5f2d3bda23d0cd75e7086736f8f8268de8a82fbc4bd0ac6791e \ - --hash=sha256:d9c727bbcf0065cbb20f39d2b4f932f8fa1631c3e01fcedc979bd4f51fe051c5 \ - --hash=sha256:dac37cf08fcf2094159922edc7a2784cfcc5c70f8354469f79ed085f0328ebdf \ - --hash=sha256:dd829712de97753367153ed84f2de752b86cd1f7a88b55a3a775eb52eafe8a94 \ - --hash=sha256:e54ddd0bb8fb626aa1f9ba7b36629564544954fff9669b15da3610c22b9a0991 \ - --hash=sha256:e77c90ab5997e85901da85131fd36acd0ed2221368199b65f0d11bca44549711 \ - --hash=sha256:ebedc192abbc7fd13c5ee800e83a6df252bec691eb2c4bedc9f8b2e2903f5e2a \ - --hash=sha256:ef71561f82a89af6cfcbee47f0fabfdb6e63788a9258e913955d89fdd96902ab \ - --hash=sha256:f0a47efb1dbef13af9c9a54a94a0b814902e547b7f21acb29434504d18f36e3a \ - --hash=sha256:f4f2ca6df64cbdd27f27b34f35adb640b5d2d77264228554e68deda54456eb11 \ - --hash=sha256:fb02e4257376ae25c6dd95a5aec377f9b18c09be6ebdefa7ad209b9137b73d48 +regex==2024.5.15 \ + --hash=sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649 \ + --hash=sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35 \ + --hash=sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb \ + --hash=sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68 \ + --hash=sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5 \ + --hash=sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133 \ + --hash=sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0 \ + --hash=sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d \ + --hash=sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da \ + --hash=sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f \ + --hash=sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d \ + --hash=sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53 \ + --hash=sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa \ + --hash=sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a \ + --hash=sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890 \ + --hash=sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67 \ + --hash=sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c \ + --hash=sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2 \ + --hash=sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced \ + --hash=sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741 \ + --hash=sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f \ + --hash=sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa \ + --hash=sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf \ + --hash=sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4 \ + --hash=sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5 \ + --hash=sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2 \ + --hash=sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384 \ + --hash=sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7 \ + --hash=sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014 \ + --hash=sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704 \ + --hash=sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5 \ + --hash=sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2 \ + --hash=sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49 \ + --hash=sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1 \ + --hash=sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694 \ + --hash=sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629 \ + --hash=sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6 \ + --hash=sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435 \ + --hash=sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c \ + --hash=sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835 \ + --hash=sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e \ + --hash=sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201 \ + --hash=sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62 \ + --hash=sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5 \ + --hash=sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16 \ + --hash=sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f \ + --hash=sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1 \ + --hash=sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f \ + --hash=sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f \ + --hash=sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145 \ + --hash=sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3 \ + --hash=sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed \ + --hash=sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143 \ + --hash=sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca \ + --hash=sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9 \ + --hash=sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa \ + --hash=sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850 \ + --hash=sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80 \ + --hash=sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe \ + --hash=sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656 \ + --hash=sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388 \ + --hash=sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1 \ + --hash=sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294 \ + --hash=sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3 \ + --hash=sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d \ + --hash=sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b \ + --hash=sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40 \ + --hash=sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600 \ + --hash=sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c \ + --hash=sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569 \ + --hash=sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456 \ + --hash=sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9 \ + --hash=sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb \ + --hash=sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e \ + --hash=sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f \ + --hash=sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d \ + --hash=sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a \ + --hash=sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a \ + --hash=sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796 # via # -c release/ray_release/byod/requirements_compiled.txt # diffusers @@ -3080,9 +3094,9 @@ requests==2.31.0 \ # torchtext # transformers # wandb -requests-oauthlib==1.3.1 \ - --hash=sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5 \ - --hash=sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a +requests-oauthlib==2.0.0 \ + --hash=sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36 \ + --hash=sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9 # via # -c release/ray_release/byod/requirements_compiled.txt # google-auth-oauthlib @@ -3098,12 +3112,13 @@ retry-decorator==1.1.1 \ # -c release/ray_release/byod/requirements_compiled.txt # gcs-oauth2-boto-plugin # gsutil -rich==12.6.0 \ - --hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \ - --hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0 +rich==13.3.2 \ + --hash=sha256:91954fe80cfb7985727a467ca98a7618e5dd15178cc2da10f553b36a93859001 \ + --hash=sha256:a104f37270bf677148d8acb07d33be1569eeee87e2d1beb286a4e9113caf6f2f # via # -c release/ray_release/byod/requirements_compiled.txt # memray + # typer rouge-score==0.1.2 \ --hash=sha256:c7d4da2683e68c9abf0135ef915d63a46643666f848e558a1b9f7ead17ff0f04 # via lm-eval @@ -3127,105 +3142,107 @@ s3transfer==0.6.2 \ sacrebleu==2.4.2 \ --hash=sha256:611a581d205828912f0b05f806b110180087184d3be2dc650fda7a729d6ecb89 # via lm-eval -safetensors==0.4.1 \ - --hash=sha256:04157d008385bea66d12fe90844a80d4a76dc25ec5230b5bd9a630496d1b7c03 \ - --hash=sha256:04dd14f53f5500eb4c4149674216ba1000670efbcf4b1b5c2643eb244e7882ea \ - --hash=sha256:097e9af2efa8778cd2f0cba451784253e62fa7cc9fc73c0744d27212f7294e25 \ - --hash=sha256:0bd0afd95c1e497f520e680ea01e0397c0868a3a3030e128438cf6e9e3fcd671 \ - --hash=sha256:0ddd050e01f3e843aa8c1c27bf68675b8a08e385d0045487af4d70418c3cb356 \ - --hash=sha256:16d8bbb7344e39cb9d4762e85c21df94ebeb03edac923dd94bb9ed8c10eac070 \ - --hash=sha256:1a45dbf03e8334d3a5dc93687d98b6dc422f5d04c7d519dac09b84a3c87dd7c6 \ - --hash=sha256:1d568628e9c43ca15eb96c217da73737c9ccb07520fafd8a1eba3f2750614105 \ - --hash=sha256:1faf5111c66a6ba91f85dff2e36edaaf36e6966172703159daeef330de4ddc7b \ - --hash=sha256:2297b359d91126c0f9d4fd17bae3cfa2fe3a048a6971b8db07db746ad92f850c \ - --hash=sha256:2304658e6ada81a5223225b4efe84748e760c46079bffedf7e321763cafb36c9 \ - --hash=sha256:2536b11ce665834201072e9397404170f93f3be10cca9995b909f023a04501ee \ - --hash=sha256:257d59e40a1b367cb544122e7451243d65b33c3f34d822a347f4eea6fdf97fdf \ - --hash=sha256:25a043cbb59d4f75e9dd87fdf5c009dd8830105a2c57ace49b72167dd9808111 \ - --hash=sha256:270b99885ec14abfd56c1d7f28ada81740a9220b4bae960c3de1c6fe84af9e4d \ - --hash=sha256:285b52a481e7ba93e29ad4ec5841ef2c4479ef0a6c633c4e2629e0508453577b \ - --hash=sha256:2b6a2814278b6660261aa9a9aae524616de9f1ec364e3716d219b6ed8f91801f \ - --hash=sha256:2d54c2f1826e790d1eb2d2512bfd0ee443f0206b423d6f27095057c7f18a0687 \ - --hash=sha256:2d87d993eaefe6611a9c241a8bd364a5f1ffed5771c74840363a6c4ed8d868f6 \ - --hash=sha256:2fe6926110e3d425c4b684a4379b7796fdc26ad7d16922ea1696c8e6ea7e920f \ - --hash=sha256:303d2c0415cf15a28f8d7f17379ea3c34c2b466119118a34edd9965983a1a8a6 \ - --hash=sha256:313e8472197bde54e3ec54a62df184c414582979da8f3916981b6a7954910a1b \ - --hash=sha256:35803201d980efcf964b75a0a2aee97fe5e9ecc5f3ad676b38fafdfe98e0620d \ - --hash=sha256:39d36f1d88468a87c437a1bc27c502e71b6ca44c385a9117a9f9ba03a75cc9c6 \ - --hash=sha256:3b0b7b2d5976fbed8a05e2bbdce5816a59e6902e9e7c7e07dc723637ed539787 \ - --hash=sha256:3b30abd0cddfe959d1daedf92edcd1b445521ebf7ddefc20860ed01486b33c90 \ - --hash=sha256:3c1b1d510c7aba71504ece87bf393ea82638df56303e371e5e2cf09d18977dd7 \ - --hash=sha256:3cfd1ca35eacc635f0eaa894e5c5ed83ffebd0f95cac298fd430014fa7323631 \ - --hash=sha256:3f6a520af7f2717c5ecba112041f2c8af1ca6480b97bf957aba81ed9642e654c \ - --hash=sha256:413e1f6ac248f7d1b755199a06635e70c3515493d3b41ba46063dec33aa2ebb7 \ - --hash=sha256:4177b456c6b0c722d82429127b5beebdaf07149d265748e97e0a34ff0b3694c8 \ - --hash=sha256:42c3710cec7e5c764c7999697516370bee39067de0aa089b7e2cfb97ac8c6b20 \ - --hash=sha256:44e230fbbe120de564b64f63ef3a8e6ff02840fa02849d9c443d56252a1646d4 \ - --hash=sha256:48901bd540f8a3c1791314bc5c8a170927bf7f6acddb75bf0a263d081a3637d4 \ - --hash=sha256:53134226053e56bd56e73f7db42596e7908ed79f3c9a1016e4c1dade593ac8e5 \ - --hash=sha256:573b6023a55a2f28085fc0a84e196c779b6cbef4d9e73acea14c8094fee7686f \ - --hash=sha256:5d95ea4d8b32233910734a904123bdd3979c137c461b905a5ed32511defc075f \ - --hash=sha256:5f25297148ec665f0deb8bd67e9564634d8d6841041ab5393ccfe203379ea88b \ - --hash=sha256:645b3f1138fce6e818e79d4128afa28f0657430764cc045419c1d069ff93f732 \ - --hash=sha256:660ca1d8bff6c7bc7c6b30b9b32df74ef3ab668f5df42cefd7588f0d40feadcb \ - --hash=sha256:6ace9e66a40f98a216ad661245782483cf79cf56eb2b112650bb904b0baa9db5 \ - --hash=sha256:6fd80f7794554091836d4d613d33a7d006e2b8d6ba014d06f97cebdfda744f64 \ - --hash=sha256:780dc21eb3fd32ddd0e8c904bdb0290f2454f4ac21ae71e94f9ce72db1900a5a \ - --hash=sha256:791edc10a3c359a2f5f52d5cddab0df8a45107d91027d86c3d44e57162e5d934 \ - --hash=sha256:7a8f6f679d97ea0135c7935c202feefbd042c149aa70ee759855e890c01c7814 \ - --hash=sha256:7ef010e9afcb4057fb6be3d0a0cfa07aac04fe97ef73fe4a23138d8522ba7c17 \ - --hash=sha256:7ff8a36e0396776d3ed9a106fc9a9d7c55d4439ca9a056a24bf66d343041d3e6 \ - --hash=sha256:82571d20288c975c1b30b08deb9b1c3550f36b31191e1e81fae87669a92217d0 \ - --hash=sha256:82cbb8f4d022f2e94498cbefca900698b8ded3d4f85212f47da614001ff06652 \ - --hash=sha256:83c2cfbe8c6304f0891e7bb378d56f66d2148972eeb5f747cd8a2246886f0d8c \ - --hash=sha256:845be0aafabf2a60c2d482d4e93023fecffe5e5443d801d7a7741bae9de41233 \ - --hash=sha256:88b4653059c903015284a9722f9a46838c654257173b279c8f6f46dbe80b612d \ - --hash=sha256:8b58ba13a9e82b4bc3fc221914f6ef237fe6c2adb13cede3ace64d1aacf49610 \ - --hash=sha256:8f69903ff49cb30b9227fb5d029bea276ea20d04b06803877a420c5b1b74c689 \ - --hash=sha256:8ff8e41c8037db17de0ea2a23bc684f43eaf623be7d34906fe1ac10985b8365e \ - --hash=sha256:911b48dc09e321a194def3a7431662ff4f03646832f3a8915bbf0f449b8a5fcb \ - --hash=sha256:998fbac99ca956c3a09fe07cc0b35fac26a521fa8865a690686d889f0ff4e4a6 \ - --hash=sha256:9a82bc2bd7a9a0e08239bdd6d7774d64121f136add93dfa344a2f1a6d7ef35fa \ - --hash=sha256:9d16b3b2fcc6fca012c74bd01b5619c655194d3e3c13e4d4d0e446eefa39a463 \ - --hash=sha256:a257de175c254d39ccd6a21341cd62eb7373b05c1e618a78096a56a857e0c316 \ - --hash=sha256:a79e16222106b2f5edbca1b8185661477d8971b659a3c814cc6f15181a9b34c8 \ - --hash=sha256:ae2d5a31cfb8a973a318f7c4d2cffe0bd1fe753cdf7bb41a1939d45a0a06f964 \ - --hash=sha256:ae2f67f04ed0bb2e56fd380a8bd3eef03f609df53f88b6f5c7e89c08e52aae00 \ - --hash=sha256:ae5497adc68669db2fed7cb2dad81e6a6106e79c9a132da3efdb6af1db1014fa \ - --hash=sha256:b287304f2b2220d51ccb51fd857761e78bcffbeabe7b0238f8dc36f2edfd9542 \ - --hash=sha256:b2f8877990a72ff595507b80f4b69036a9a1986a641f8681adf3425d97d3d2a5 \ - --hash=sha256:bb4cb3e37a9b961ddd68e873b29fe9ab4a081e3703412e34aedd2b7a8e9cafd9 \ - --hash=sha256:bbc2ce1f5ae5143a7fb72b71fa71db6a42b4f6cf912aa3acdc6b914084778e68 \ - --hash=sha256:bda3d98e2bcece388232cfc551ebf063b55bdb98f65ab54df397da30efc7dcc5 \ - --hash=sha256:bdc0d039e44a727824639824090bd8869535f729878fa248addd3dc01db30eae \ - --hash=sha256:bfa2e20342b81921b98edba52f8deb68843fa9c95250739a56b52ceda5ea5c61 \ - --hash=sha256:c3807ac3b16288dffebb3474b555b56fe466baa677dfc16290dcd02dca1ab228 \ - --hash=sha256:c3c9f0ca510e0de95abd6424789dcbc879942a3a4e29b0dfa99d9427bf1da75c \ - --hash=sha256:c8ed5d2c04cdc1afc6b3c28d59580448ac07732c50d94c15e14670f9c473a2ce \ - --hash=sha256:cba01c6b76e01ec453933b3b3c0157c59b52881c83eaa0f7666244e71aa75fd1 \ - --hash=sha256:ce7a28bc8af685a69d7e869d09d3e180a275e3281e29cf5f1c7319e231932cc7 \ - --hash=sha256:d10a9f7bae608ccfdc009351f01dc3d8535ff57f9488a58a4c38e45bf954fe93 \ - --hash=sha256:d3ac139377cfe71ba04573f1cda66e663b7c3e95be850e9e6c2dd4b5984bd513 \ - --hash=sha256:d5b3defa74f3723a388bfde2f5d488742bc4879682bd93267c09a3bcdf8f869b \ - --hash=sha256:d784938534e255473155e4d9f276ee69eb85455b6af1292172c731409bf9adee \ - --hash=sha256:d784a98c492c751f228a4a894c3b8a092ff08b24e73b5568938c28b8c0e8f8df \ - --hash=sha256:d8a85e3e47e0d4eebfaf9a58b40aa94f977a56050cb5598ad5396a9ee7c087c6 \ - --hash=sha256:d93321eea0dd7e81b283e47a1d20dee6069165cc158286316d0d06d340de8fe8 \ - --hash=sha256:da52ee0dc8ba03348ffceab767bd8230842fdf78f8a996e2a16445747143a778 \ - --hash=sha256:dab431699b5d45e0ca043bc580651ce9583dda594e62e245b7497adb32e99809 \ - --hash=sha256:dac4bb42f8679aadc59bd91a4c5a1784a758ad49d0912995945cd674089f628e \ - --hash=sha256:e056fb9e22d118cc546107f97dc28b449d88274207dd28872bd668c86216e4f6 \ - --hash=sha256:e09000b2599e1836314430f81a3884c66a5cbabdff5d9f175b5d560d4de38d78 \ - --hash=sha256:e0ccb5aa0f3be2727117e5631200fbb3a5b3a2b3757545a92647d6dd8be6658f \ - --hash=sha256:e57a5ab08b0ec7a7caf30d2ac79bb30c89168431aca4f8854464bb9461686925 \ - --hash=sha256:e9a7ffb1e551c6df51d267f5a751f042b183df22690f6feceac8d27364fd51d7 \ - --hash=sha256:e9c80ce0001efa16066358d2dd77993adc25f5a6c61850e4ad096a2232930bce \ - --hash=sha256:eb2c1da1cc39509d1a55620a5f4d14f8911c47a89c926a96e6f4876e864375a3 \ - --hash=sha256:edcf3121890b5f0616aa5a54683b1a5d2332037b970e507d6bb7841a3a596556 \ - --hash=sha256:f603bdd8deac6726d39f41688ed353c532dd53935234405d79e9eb53f152fbfb \ - --hash=sha256:f8934bdfd202ebd0697040a3dff40dd77bc4c5bbf3527ede0532f5e7fb4d970f \ - --hash=sha256:fdb4adb76e21bad318210310590de61c9f4adcef77ee49b4a234f9dc48867869 \ - --hash=sha256:fdb58dee173ef33634c3016c459d671ca12d11e6acf9db008261cbe58107e579 +safetensors==0.4.3 \ + --hash=sha256:018b691383026a2436a22b648873ed11444a364324e7088b99cd2503dd828400 \ + --hash=sha256:01e4b22e3284cd866edeabe4f4d896229495da457229408d2e1e4810c5187121 \ + --hash=sha256:01feb3089e5932d7e662eda77c3ecc389f97c0883c4a12b5cfdc32b589a811c3 \ + --hash=sha256:02318f01e332cc23ffb4f6716e05a492c5f18b1d13e343c49265149396284a44 \ + --hash=sha256:02ef3a24face643456020536591fbd3c717c5abaa2737ec428ccbbc86dffa7a4 \ + --hash=sha256:03a4447c784917c9bf01d8f2ac5080bc15c41692202cd5f406afba16629e84d6 \ + --hash=sha256:084fc436e317f83f7071fc6a62ca1c513b2103db325cd09952914b50f51cf78f \ + --hash=sha256:0bf4f9d6323d9f86eef5567eabd88f070691cf031d4c0df27a40d3b4aaee755b \ + --hash=sha256:0d52c958dc210265157573f81d34adf54e255bc2b59ded6218500c9b15a750eb \ + --hash=sha256:0d5ffc6a80f715c30af253e0e288ad1cd97a3d0086c9c87995e5093ebc075e50 \ + --hash=sha256:0d9cd8e1560dfc514b6d7859247dc6a86ad2f83151a62c577428d5102d872721 \ + --hash=sha256:0dd37306546b58d3043eb044c8103a02792cc024b51d1dd16bd3dd1f334cb3ed \ + --hash=sha256:1139eb436fd201c133d03c81209d39ac57e129f5e74e34bb9ab60f8d9b726270 \ + --hash=sha256:19bbdf95de2cf64f25cd614c5236c8b06eb2cfa47cbf64311f4b5d80224623a3 \ + --hash=sha256:1ab6527a20586d94291c96e00a668fa03f86189b8a9defa2cdd34a1a01acc7d5 \ + --hash=sha256:1b89381517891a7bb7d1405d828b2bf5d75528299f8231e9346b8eba092227f9 \ + --hash=sha256:1f598b713cc1a4eb31d3b3203557ac308acf21c8f41104cdd74bf640c6e538e3 \ + --hash=sha256:22d21760dc6ebae42e9c058d75aa9907d9f35e38f896e3c69ba0e7b213033856 \ + --hash=sha256:22f3b5d65e440cec0de8edaa672efa888030802e11c09b3d6203bff60ebff05a \ + --hash=sha256:2a0deb16a1d3ea90c244ceb42d2c6c276059616be21a19ac7101aa97da448faf \ + --hash=sha256:2a1f4430cc0c9d6afa01214a4b3919d0a029637df8e09675ceef1ca3f0dfa0df \ + --hash=sha256:2d603846a8585b9432a0fd415db1d4c57c0f860eb4aea21f92559ff9902bae4d \ + --hash=sha256:2f85fc50c4e07a21e95c24e07460fe6f7e2859d0ce88092838352b798ce711c2 \ + --hash=sha256:309b10dbcab63269ecbf0e2ca10ce59223bb756ca5d431ce9c9eeabd446569da \ + --hash=sha256:3615a96dd2dcc30eb66d82bc76cda2565f4f7bfa89fcb0e31ba3cea8a1a9ecbb \ + --hash=sha256:38e2a8666178224a51cca61d3cb4c88704f696eac8f72a49a598a93bbd8a4af9 \ + --hash=sha256:393e6e391467d1b2b829c77e47d726f3b9b93630e6a045b1d1fca67dc78bf632 \ + --hash=sha256:3f9cdca09052f585e62328c1c2923c70f46814715c795be65f0b93f57ec98a02 \ + --hash=sha256:41a727a7f5e6ad9f1db6951adee21bbdadc632363d79dc434876369a17de6ad6 \ + --hash=sha256:420a98f593ff9930f5822560d14c395ccbc57342ddff3b463bc0b3d6b1951550 \ + --hash=sha256:446e9fe52c051aeab12aac63d1017e0f68a02a92a027b901c4f8e931b24e5397 \ + --hash=sha256:455d538aa1aae4a8b279344a08136d3f16334247907b18a5c3c7fa88ef0d3c46 \ + --hash=sha256:4f9bac020faba7f5dc481e881b14b6425265feabb5bfc552551d21189c0eddc3 \ + --hash=sha256:53c4879b9c6bd7cd25d114ee0ef95420e2812e676314300624594940a8d6a91f \ + --hash=sha256:5757e4688f20df083e233b47de43845d1adb7e17b6cf7da5f8444416fc53828d \ + --hash=sha256:585c9ae13a205807b63bef8a37994f30c917ff800ab8a1ca9c9b5d73024f97ee \ + --hash=sha256:5d07cbca5b99babb692d76d8151bec46f461f8ad8daafbfd96b2fca40cadae65 \ + --hash=sha256:5fc6775529fb9f0ce2266edd3e5d3f10aab068e49f765e11f6f2a63b5367021d \ + --hash=sha256:622afd28968ef3e9786562d352659a37de4481a4070f4ebac883f98c5836563e \ + --hash=sha256:6f9568f380f513a60139971169c4a358b8731509cc19112369902eddb33faa4d \ + --hash=sha256:70a5319ef409e7f88686a46607cbc3c428271069d8b770076feaf913664a07ac \ + --hash=sha256:74707624b81f1b7f2b93f5619d4a9f00934d5948005a03f2c1845ffbfff42212 \ + --hash=sha256:7c4fa560ebd4522adddb71dcd25d09bf211b5634003f015a4b815b7647d62ebe \ + --hash=sha256:7de32d0d34b6623bb56ca278f90db081f85fb9c5d327e3c18fd23ac64f465768 \ + --hash=sha256:840b7ac0eff5633e1d053cc9db12fdf56b566e9403b4950b2dc85393d9b88d67 \ + --hash=sha256:840caf38d86aa7014fe37ade5d0d84e23dcfbc798b8078015831996ecbc206a3 \ + --hash=sha256:8651c7299cbd8b4161a36cd6a322fa07d39cd23535b144d02f1c1972d0c62f3c \ + --hash=sha256:868ad1b6fc41209ab6bd12f63923e8baeb1a086814cb2e81a65ed3d497e0cf8f \ + --hash=sha256:88887f69f7a00cf02b954cdc3034ffb383b2303bc0ab481d4716e2da51ddc10e \ + --hash=sha256:89f9f17b0dacb913ed87d57afbc8aad85ea42c1085bd5de2f20d83d13e9fc4b2 \ + --hash=sha256:8c496c5401c1b9c46d41a7688e8ff5b0310a3b9bae31ce0f0ae870e1ea2b8caf \ + --hash=sha256:8cf18888606dad030455d18f6c381720e57fc6a4170ee1966adb7ebc98d4d6a3 \ + --hash=sha256:8d22c1a10dff3f64d0d68abb8298a3fd88ccff79f408a3e15b3e7f637ef5c980 \ + --hash=sha256:90964917f5b0fa0fa07e9a051fbef100250c04d150b7026ccbf87a34a54012e0 \ + --hash=sha256:9bfb92f82574d9e58401d79c70c716985dc049b635fef6eecbb024c79b2c46ad \ + --hash=sha256:9c6ad011c1b4e3acff058d6b090f1da8e55a332fbf84695cf3100c649cc452d1 \ + --hash=sha256:a11c374eb63a9c16c5ed146457241182f310902bd2a9c18255781bb832b6748b \ + --hash=sha256:a7cef55929dcbef24af3eb40bedec35d82c3c2fa46338bb13ecf3c5720af8a61 \ + --hash=sha256:a844cdb5d7cbc22f5f16c7e2a0271170750763c4db08381b7f696dbd2c78a361 \ + --hash=sha256:ae7613a119a71a497d012ccc83775c308b9c1dab454806291427f84397d852fd \ + --hash=sha256:b1648568667f820b8c48317c7006221dc40aced1869908c187f493838a1362bc \ + --hash=sha256:b1e31be7945f66be23f4ec1682bb47faa3df34cb89fc68527de6554d3c4258a4 \ + --hash=sha256:b277482120df46e27a58082df06a15aebda4481e30a1c21eefd0921ae7e03f65 \ + --hash=sha256:b7ffba80aa49bd09195145a7fd233a7781173b422eeb995096f2b30591639517 \ + --hash=sha256:b852e47eb08475c2c1bd8131207b405793bfc20d6f45aff893d3baaad449ed14 \ + --hash=sha256:bb4f8c5d0358a31e9a08daeebb68f5e161cdd4018855426d3f0c23bb51087055 \ + --hash=sha256:bbae3b4b9d997971431c346edbfe6e41e98424a097860ee872721e176040a893 \ + --hash=sha256:befdf0167ad626f22f6aac6163477fcefa342224a22f11fdd05abb3995c1783c \ + --hash=sha256:c0acbe31340ab150423347e5b9cc595867d814244ac14218932a5cf1dd38eb39 \ + --hash=sha256:c41e1893d1206aa7054029681778d9a58b3529d4c807002c156d58426c225173 \ + --hash=sha256:c59d51f182c729f47e841510b70b967b0752039f79f1de23bcdd86462a9b09ee \ + --hash=sha256:cd6fff9e56df398abc5866b19a32124815b656613c1c5ec0f9350906fd798aac \ + --hash=sha256:cdd0a3b5da66e7f377474599814dbf5cbf135ff059cc73694de129b58a5e8a2c \ + --hash=sha256:cf476bca34e1340ee3294ef13e2c625833f83d096cfdf69a5342475602004f95 \ + --hash=sha256:d0dd4a1db09db2dba0f94d15addc7e7cd3a7b0d393aa4c7518c39ae7374623c3 \ + --hash=sha256:d1456f814655b224d4bf6e7915c51ce74e389b413be791203092b7ff78c936dd \ + --hash=sha256:d14d30c25897b2bf19b6fb5ff7e26cc40006ad53fd4a88244fdf26517d852dd7 \ + --hash=sha256:d244bcafeb1bc06d47cfee71727e775bca88a8efda77a13e7306aae3813fa7e4 \ + --hash=sha256:d8815b5e1dac85fc534a97fd339e12404db557878c090f90442247e87c8aeaea \ + --hash=sha256:d88b33980222085dd6001ae2cad87c6068e0991d4f5ccf44975d216db3b57376 \ + --hash=sha256:d8c5093206ef4b198600ae484230402af6713dab1bd5b8e231905d754022bec7 \ + --hash=sha256:d9c289f140a9ae4853fc2236a2ffc9a9f2d5eae0cb673167e0f1b8c18c0961ac \ + --hash=sha256:dcf5705cab159ce0130cd56057f5f3425023c407e170bca60b4868048bae64fd \ + --hash=sha256:e011cc162503c19f4b1fd63dfcddf73739c7a243a17dac09b78e57a00983ab35 \ + --hash=sha256:e066e8861eef6387b7c772344d1fe1f9a72800e04ee9a54239d460c400c72aab \ + --hash=sha256:e0b2104df1579d6ba9052c0ae0e3137c9698b2d85b0645507e6fd1813b70931a \ + --hash=sha256:e375d975159ac534c7161269de24ddcd490df2157b55c1a6eeace6cbb56903f0 \ + --hash=sha256:e4119532cd10dba04b423e0f86aecb96cfa5a602238c0aa012f70c3a40c44b50 \ + --hash=sha256:e7dbbde64b6c534548696808a0e01276d28ea5773bc9a2dfb97a88cd3dffe3df \ + --hash=sha256:e9afd5358719f1b2cf425fad638fc3c887997d6782da317096877e5b15b2ce93 \ + --hash=sha256:ec4b52ce9a396260eb9731eb6aea41a7320de22ed73a1042c2230af0212758ce \ + --hash=sha256:edb5698a7bc282089f64c96c477846950358a46ede85a1c040e0230344fdde10 \ + --hash=sha256:ee463219d9ec6c2be1d331ab13a8e0cd50d2f32240a81d498266d77d07b7e71e \ + --hash=sha256:efcc860be094b8d19ac61b452ec635c7acb9afa77beb218b1d7784c6d41fe8ad \ + --hash=sha256:f5e6883af9a68c0028f70a4c19d5a6ab6238a379be36ad300a22318316c00cb0 \ + --hash=sha256:f9650713b2cfa9537a2baf7dd9fee458b24a0aaaa6cafcea8bdd5fb2b8efdc34 \ + --hash=sha256:faefeb3b81bdfb4e5a55b9bbdf3d8d8753f65506e1d67d03f5c851a6c87150e9 \ + --hash=sha256:fb9c65bd82f9ef3ce4970dc19ee86be5f6f93d032159acf35e663c6bea02b237 \ + --hash=sha256:fe746d03ed8d193674a26105e4f0fe6c726f5bb602ffc695b409eaf02f04763d \ + --hash=sha256:fef5d70683643618244a4f5221053567ca3e77c2531e42ad48ae05fae909f542 # via # -c release/ray_release/byod/requirements_compiled.txt # accelerate @@ -3346,9 +3363,9 @@ sentencepiece==0.1.96 \ # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_ml_byod_3.9.in -sentry-sdk==1.37.1 \ - --hash=sha256:7cd324dd2877fdc861f75cba4242bce23a58272a6fea581fcb218bb718bd9cc5 \ - --hash=sha256:a249c7364827ee89daaa078bb8b56ece0b3d52d9130961bef2302b79bdf7fe70 +sentry-sdk==2.10.0 \ + --hash=sha256:545fcc6e36c335faa6d6cda84669b6e17025f31efbf3b2211ec14efe008b75d1 \ + --hash=sha256:87b3d413c87d8e7f816cc9334bff255a83d8b577db2b22042651c30c19c09190 # via # -c release/ray_release/byod/requirements_compiled.txt # wandb @@ -3444,6 +3461,12 @@ setproctitle==1.3.3 \ # via # -c release/ray_release/byod/requirements_compiled.txt # wandb +shellingham==1.5.4 \ + --hash=sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686 \ + --hash=sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de + # via + # -c release/ray_release/byod/requirements_compiled.txt + # typer six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 @@ -3470,50 +3493,15 @@ smmap==5.0.1 \ # via # -c release/ray_release/byod/requirements_compiled.txt # gitdb -sniffio==1.3.0 \ - --hash=sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101 \ - --hash=sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384 +sniffio==1.3.1 \ + --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \ + --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc # via # -c release/ray_release/byod/requirements_compiled.txt # anyio -sqlalchemy==1.4.17 \ - --hash=sha256:196fb6bb2733834e506c925d7532f8eabad9d2304deef738a40846e54c31e236 \ - --hash=sha256:1dd77acbc19bee9c0ba858ff5e4e5d5c60895495c83b4df9bcdf4ad5e9b74f21 \ - --hash=sha256:216ff28fe803885ceb5b131dcee6507d28d255808dd5bcffcb3b5fa75be2e102 \ - --hash=sha256:461a4ea803ce0834822f372617a68ac97f9fa1281f2a984624554c651d7c3ae1 \ - --hash=sha256:4b09191ed22af149c07a880f309b7740f3f782ff13325bae5c6168a6aa57e715 \ - --hash=sha256:4c5e20666b33b03bf7f14953f0deb93007bf8c1342e985bd7c7cf25f46fac579 \ - --hash=sha256:4d93b62e98248e3e1ac1e91c2e6ee1e7316f704be1f734338b350b6951e6c175 \ - --hash=sha256:5732858e56d32fa7e02468f4fd2d8f01ddf709e5b93d035c637762890f8ed8b6 \ - --hash=sha256:58c02d1771bb0e61bc9ced8f3b36b5714d9ece8fd4bdbe2a44a892574c3bbc3c \ - --hash=sha256:651cdb3adcee13624ba22d5ff3e96f91e16a115d2ca489ddc16a8e4c217e8509 \ - --hash=sha256:6fe1c8dc26bc0005439cb78ebc78772a22cccc773f5a0e67cb3002d791f53f0f \ - --hash=sha256:7222f3236c280fab3a2d76f903b493171f0ffc29667538cc388a5d5dd0216a88 \ - --hash=sha256:7dc3d3285fb682316d580d84e6e0840fdd8ffdc05cb696db74b9dd746c729908 \ - --hash=sha256:7e45043fe11d503e1c3f9dcf5b42f92d122a814237cd9af68a11dae46ecfcae1 \ - --hash=sha256:7eb55d5583076c03aaf1510473fad2a61288490809049cb31028af56af7068ee \ - --hash=sha256:82922a320d38d7d6aa3a8130523ec7e8c70fa95f7ca7d0fd6ec114b626e4b10b \ - --hash=sha256:8e133e2551fa99c75849848a4ac08efb79930561eb629dd7d2dc9b7ee05256e6 \ - --hash=sha256:949ac299903d2ed8419086f81847381184e2264f3431a33af4679546dcc87f01 \ - --hash=sha256:a2d225c8863a76d15468896dc5af36f1e196b403eb9c7e0151e77ffab9e7df57 \ - --hash=sha256:a5f00a2be7d777119e15ccfb5ba0b2a92e8a193959281089d79821a001095f80 \ - --hash=sha256:b0ad951a6e590bbcfbfeadc5748ef5ec8ede505a8119a71b235f7481cc08371c \ - --hash=sha256:b59b2c0a3b1d93027f6b6b8379a50c354483fe1ebe796c6740e157bb2e06d39a \ - --hash=sha256:bc89e37c359dcd4d75b744e5e81af128ba678aa2ecea4be957e80e6e958a1612 \ - --hash=sha256:bde055c019e6e449ebc4ec61abd3e08690abeb028c7ada2a3b95d8e352b7b514 \ - --hash=sha256:c367ed95d41df584f412a9419b5ece85b0d6c2a08a51ae13ae47ef74ff9a9349 \ - --hash=sha256:dde05ae0987e43ec84e64d6722ce66305eda2a5e2b7d6fda004b37aabdfbb909 \ - --hash=sha256:ee6e7ca09ff274c55d19a1e15ee6f884fa0230c0d9b8d22a456e249d08dee5bf \ - --hash=sha256:f1c68f7bd4a57ffdb85eab489362828dddf6cd565a4c18eda4c446c1d5d3059d \ - --hash=sha256:f63e1f531a8bf52184e2afb53648511f3f8534decb7575b483a583d3cd8d13ed \ - --hash=sha256:fdad4a33140b77df61d456922b7974c1f1bb2c35238f6809f078003a620c4734 - # via - # -c release/ray_release/byod/requirements_compiled.txt - # alembic - # dataset -sqlglot==20.4.0 \ - --hash=sha256:401a2933298cf66901704cf2029272d8243ee72ac47b9fd8784254401b43ee43 \ - --hash=sha256:9a42135d0530de8150a2c5106e0c52abd3396d92501ebe97df7b371d20de5dc9 +sqlglot==25.6.1 \ + --hash=sha256:c1fcbaa00429979f16fb8cea20279a8b3f5312e76d97abb8f8c6a9b21be450d7 \ + --hash=sha256:ea40f3bf8452e2c1a696fe120163190bd67e49b346336e7db6d34400b57b7601 # via # -c release/ray_release/byod/requirements_compiled.txt # fugue @@ -3571,9 +3559,9 @@ statsmodels==0.14.0 \ # via # -c release/ray_release/byod/requirements_compiled.txt # statsforecast -sympy==1.12 \ - --hash=sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5 \ - --hash=sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8 +sympy==1.13.1 \ + --hash=sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f \ + --hash=sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8 # via # -c release/ray_release/byod/requirements_compiled.txt # torch @@ -3764,17 +3752,12 @@ tokenizers==0.15.2 \ # via # -c release/ray_release/byod/requirements_compiled.txt # transformers -toml==0.10.2 \ - --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ - --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f - # via - # -c release/ray_release/byod/requirements_compiled.txt - # jupytext tomli==2.0.1 \ --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f # via # -c release/ray_release/byod/requirements_compiled.txt + # jupytext # pytest torch==2.3.0 \ --hash=sha256:09c81c5859a5b819956c6925a405ef1cdda393c9d8a01ce3851453f699d3358c \ @@ -3881,9 +3864,9 @@ tqdm-multiprocess==0.0.11 \ --hash=sha256:3ebdf03e7a675150fa0bbceaa9c3c64b8cb556e9ffafa4fe6c078e51820524aa \ --hash=sha256:a74002a1222ea9cbe8cdc9bd460108c6009be359621fbee9b92d0515d4d180f7 # via lm-eval -traitlets==5.14.0 \ - --hash=sha256:f14949d23829023013c47df20b4a76ccd1a85effb786dc060f34de7948361b33 \ - --hash=sha256:fcdaa8ac49c04dfa0ed3ee3384ef6dfdb5d6f3741502be247279407679296772 +traitlets==5.14.3 \ + --hash=sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7 \ + --hash=sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f # via # -c release/ray_release/byod/requirements_compiled.txt # comm @@ -3900,9 +3883,9 @@ transformers==4.36.2 \ # -r release/ray_release/byod/requirements_ml_byod_3.9.in # lm-eval # peft -triad==0.9.3 \ - --hash=sha256:1862b5a78deb9d475c7747b605f2b32457e96c6719f8cbc4e7e95147f34f6f64 \ - --hash=sha256:e4dff41ffbb98bad4d9741c9dd632890cdfe0b873f23d76d2b5f9ca41d4440a7 +triad==0.9.8 \ + --hash=sha256:2c0ba7d83977c6d4e7b59e3cc70727f858014ef7676c62d184aa8e63f7bef5de \ + --hash=sha256:5b67673124891981daf8afbab44b2e6358932ca35ef3ff38a25bc3e0f6f03f17 # via # -c release/ray_release/byod/requirements_compiled.txt # adagio @@ -3930,9 +3913,9 @@ typepy[datetime]==1.3.2 \ # dataproperty # pytablewriter # tabledata -typer==0.9.0 \ - --hash=sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2 \ - --hash=sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee +typer==0.12.3 \ + --hash=sha256:070d7ca53f785acbccba8e7d28b08dcd88f79f1fbda035ade0aecec71ca5c914 \ + --hash=sha256:49e73131481d804288ef62598d97a1ceef3058905aa536a1134f90891ba35482 # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_ml_byod_3.9.in @@ -3942,7 +3925,6 @@ typing-extensions==4.8.0 \ # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_ml_byod_3.9.in - # alembic # fastapi # huggingface-hub # ipython @@ -3954,9 +3936,9 @@ typing-extensions==4.8.0 \ # torch # typer # wandb -urllib3==1.26.18 \ - --hash=sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07 \ - --hash=sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0 +urllib3==1.26.19 \ + --hash=sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3 \ + --hash=sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429 # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_ml_byod_3.9.in @@ -3965,9 +3947,9 @@ urllib3==1.26.18 \ # requests # responses # sentry-sdk -utilsforecast==0.0.23 \ - --hash=sha256:188daa121c528965e26a3a38f409b66a15f9eef2b44684cc9426f3ddb1146841 \ - --hash=sha256:290882da47ebc7887663c05c46c67e19bc63898220be444ca6173d0a5fdeee4a +utilsforecast==0.2.0 \ + --hash=sha256:3db4245da4e361f26c8eaeef216c2d1206b20defbb033bf11d3e66ce2b1d6ef8 \ + --hash=sha256:a4825bf8da547e3dc552f9b9a7a8159341a118c3a5d122191f09bc3683cba433 # via # -c release/ray_release/byod/requirements_compiled.txt # statsforecast @@ -3992,9 +3974,9 @@ wandb==0.17.0 \ # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_ml_byod_3.9.in -wcwidth==0.2.12 \ - --hash=sha256:f01c104efdf57971bcb756f054dd58ddec5204dd15fa31d6503ea57947d97c02 \ - --hash=sha256:f26ec43d96c8cbfed76a5075dac87680124fa84e0855195a6184da9c187f133c +wcwidth==0.2.13 \ + --hash=sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859 \ + --hash=sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5 # via # -c release/ray_release/byod/requirements_compiled.txt # prompt-toolkit @@ -4011,13 +3993,15 @@ widgetsnbextension==4.0.11 \ # via # -c release/ray_release/byod/requirements_compiled.txt # ipywidgets -xgboost==1.7.6 \ - --hash=sha256:127cf1f5e2ec25cd41429394c6719b87af1456ce583e89f0bffd35d02ad18bcb \ - --hash=sha256:1c527554a400445e0c38186039ba1a00425dcdb4e40b37eed0e74cb39a159c47 \ - --hash=sha256:281c3c6f4fbed2d36bf95cd02a641afa95e72e9abde70064056da5e76233e8df \ - --hash=sha256:4c34675b4d2678c624ddde5d45361e7e16046923e362e4e609b88353e6b87124 \ - --hash=sha256:59b4b366d2cafc7f645e87d897983a5b59be02876194b1d213bd8d8b811d8ce8 \ - --hash=sha256:b1d5db49b199152d62bd9217c98760207d3de86d2b9d243260c573ffe638f80a +xgboost==2.1.0 \ + --hash=sha256:19d145eb847b070c32342b1bf2d7331c102783e07a484f8b13b7d759d707c6b0 \ + --hash=sha256:43b16205689249d7509daf7a6ab00ad0e6c570b3a9c263cb32b26e39d9477bb3 \ + --hash=sha256:7144980923e76ce741c7b03a14d3bd7514db6de5c7cabe96ba95b229d274f5ca \ + --hash=sha256:73673c9bb85927db7fe2e3aed6df6d35dba708cfd6767cc63d4ea11dda2dede5 \ + --hash=sha256:74904b91c42524a6c32147fe5718569e78fb65911ff4499b053f81d0964514d4 \ + --hash=sha256:840a0c6e2119d8c8f260a5dace996ea064a267f62b301a25d7d452488a7ac860 \ + --hash=sha256:b2a456eb0f3d3e8fd8ab37e44ac288292bf8ea8744c294be9fd88713d27af810 \ + --hash=sha256:cedc2e386e686795735448fd4597533acacc5ba6fb47dd910c204c468b80bb96 # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_ml_byod_3.9.in @@ -4134,103 +4118,103 @@ xxhash==3.4.1 \ # -c release/ray_release/byod/requirements_compiled.txt # datasets # evaluate -yarl==1.9.3 \ - --hash=sha256:09c19e5f4404574fcfb736efecf75844ffe8610606f3fccc35a1515b8b6712c4 \ - --hash=sha256:0ab5baaea8450f4a3e241ef17e3d129b2143e38a685036b075976b9c415ea3eb \ - --hash=sha256:0d155a092bf0ebf4a9f6f3b7a650dc5d9a5bbb585ef83a52ed36ba46f55cc39d \ - --hash=sha256:126638ab961633f0940a06e1c9d59919003ef212a15869708dcb7305f91a6732 \ - --hash=sha256:1a0a4f3aaa18580038cfa52a7183c8ffbbe7d727fe581300817efc1e96d1b0e9 \ - --hash=sha256:1d93461e2cf76c4796355494f15ffcb50a3c198cc2d601ad8d6a96219a10c363 \ - --hash=sha256:26a1a8443091c7fbc17b84a0d9f38de34b8423b459fb853e6c8cdfab0eacf613 \ - --hash=sha256:271d63396460b6607b588555ea27a1a02b717ca2e3f2cf53bdde4013d7790929 \ - --hash=sha256:28a108cb92ce6cf867690a962372996ca332d8cda0210c5ad487fe996e76b8bb \ - --hash=sha256:29beac86f33d6c7ab1d79bd0213aa7aed2d2f555386856bb3056d5fdd9dab279 \ - --hash=sha256:2c757f64afe53a422e45e3e399e1e3cf82b7a2f244796ce80d8ca53e16a49b9f \ - --hash=sha256:2dad8166d41ebd1f76ce107cf6a31e39801aee3844a54a90af23278b072f1ccf \ - --hash=sha256:2dc72e891672343b99db6d497024bf8b985537ad6c393359dc5227ef653b2f17 \ - --hash=sha256:2f3c8822bc8fb4a347a192dd6a28a25d7f0ea3262e826d7d4ef9cc99cd06d07e \ - --hash=sha256:32435d134414e01d937cd9d6cc56e8413a8d4741dea36af5840c7750f04d16ab \ - --hash=sha256:3cfa4dbe17b2e6fca1414e9c3bcc216f6930cb18ea7646e7d0d52792ac196808 \ - --hash=sha256:3d5434b34100b504aabae75f0622ebb85defffe7b64ad8f52b8b30ec6ef6e4b9 \ - --hash=sha256:4003f380dac50328c85e85416aca6985536812c082387255c35292cb4b41707e \ - --hash=sha256:44e91a669c43f03964f672c5a234ae0d7a4d49c9b85d1baa93dec28afa28ffbd \ - --hash=sha256:4a14907b597ec55740f63e52d7fee0e9ee09d5b9d57a4f399a7423268e457b57 \ - --hash=sha256:4ce77d289f8d40905c054b63f29851ecbfd026ef4ba5c371a158cfe6f623663e \ - --hash=sha256:4d6d74a97e898c1c2df80339aa423234ad9ea2052f66366cef1e80448798c13d \ - --hash=sha256:51382c72dd5377861b573bd55dcf680df54cea84147c8648b15ac507fbef984d \ - --hash=sha256:525cd69eff44833b01f8ef39aa33a9cc53a99ff7f9d76a6ef6a9fb758f54d0ff \ - --hash=sha256:53ec65f7eee8655bebb1f6f1607760d123c3c115a324b443df4f916383482a67 \ - --hash=sha256:5f74b015c99a5eac5ae589de27a1201418a5d9d460e89ccb3366015c6153e60a \ - --hash=sha256:6280353940f7e5e2efaaabd686193e61351e966cc02f401761c4d87f48c89ea4 \ - --hash=sha256:632c7aeb99df718765adf58eacb9acb9cbc555e075da849c1378ef4d18bf536a \ - --hash=sha256:6465d36381af057d0fab4e0f24ef0e80ba61f03fe43e6eeccbe0056e74aadc70 \ - --hash=sha256:66a6dbf6ca7d2db03cc61cafe1ee6be838ce0fbc97781881a22a58a7c5efef42 \ - --hash=sha256:6d350388ba1129bc867c6af1cd17da2b197dff0d2801036d2d7d83c2d771a682 \ - --hash=sha256:7217234b10c64b52cc39a8d82550342ae2e45be34f5bff02b890b8c452eb48d7 \ - --hash=sha256:721ee3fc292f0d069a04016ef2c3a25595d48c5b8ddc6029be46f6158d129c92 \ - --hash=sha256:72a57b41a0920b9a220125081c1e191b88a4cdec13bf9d0649e382a822705c65 \ - --hash=sha256:73cc83f918b69110813a7d95024266072d987b903a623ecae673d1e71579d566 \ - --hash=sha256:778df71c8d0c8c9f1b378624b26431ca80041660d7be7c3f724b2c7a6e65d0d6 \ - --hash=sha256:79e1df60f7c2b148722fb6cafebffe1acd95fd8b5fd77795f56247edaf326752 \ - --hash=sha256:7c86d0d0919952d05df880a1889a4f0aeb6868e98961c090e335671dea5c0361 \ - --hash=sha256:7eaf13af79950142ab2bbb8362f8d8d935be9aaf8df1df89c86c3231e4ff238a \ - --hash=sha256:828235a2a169160ee73a2fcfb8a000709edf09d7511fccf203465c3d5acc59e4 \ - --hash=sha256:8535e111a064f3bdd94c0ed443105934d6f005adad68dd13ce50a488a0ad1bf3 \ - --hash=sha256:88d2c3cc4b2f46d1ba73d81c51ec0e486f59cc51165ea4f789677f91a303a9a7 \ - --hash=sha256:8a2538806be846ea25e90c28786136932ec385c7ff3bc1148e45125984783dc6 \ - --hash=sha256:8dab30b21bd6fb17c3f4684868c7e6a9e8468078db00f599fb1c14e324b10fca \ - --hash=sha256:8f18a7832ff85dfcd77871fe677b169b1bc60c021978c90c3bb14f727596e0ae \ - --hash=sha256:946db4511b2d815979d733ac6a961f47e20a29c297be0d55b6d4b77ee4b298f6 \ - --hash=sha256:96758e56dceb8a70f8a5cff1e452daaeff07d1cc9f11e9b0c951330f0a2396a7 \ - --hash=sha256:9a172c3d5447b7da1680a1a2d6ecdf6f87a319d21d52729f45ec938a7006d5d8 \ - --hash=sha256:9a5211de242754b5e612557bca701f39f8b1a9408dff73c6db623f22d20f470e \ - --hash=sha256:9df9a0d4c5624790a0dea2e02e3b1b3c69aed14bcb8650e19606d9df3719e87d \ - --hash=sha256:aa4643635f26052401750bd54db911b6342eb1a9ac3e74f0f8b58a25d61dfe41 \ - --hash=sha256:aed37db837ecb5962469fad448aaae0f0ee94ffce2062cf2eb9aed13328b5196 \ - --hash=sha256:af52725c7c39b0ee655befbbab5b9a1b209e01bb39128dce0db226a10014aacc \ - --hash=sha256:b0b8c06afcf2bac5a50b37f64efbde978b7f9dc88842ce9729c020dc71fae4ce \ - --hash=sha256:b61e64b06c3640feab73fa4ff9cb64bd8182de52e5dc13038e01cfe674ebc321 \ - --hash=sha256:b7831566595fe88ba17ea80e4b61c0eb599f84c85acaa14bf04dd90319a45b90 \ - --hash=sha256:b8bc5b87a65a4e64bc83385c05145ea901b613d0d3a434d434b55511b6ab0067 \ - --hash=sha256:b8d51817cf4b8d545963ec65ff06c1b92e5765aa98831678d0e2240b6e9fd281 \ - --hash=sha256:b9f9cafaf031c34d95c1528c16b2fa07b710e6056b3c4e2e34e9317072da5d1a \ - --hash=sha256:bb72d2a94481e7dc7a0c522673db288f31849800d6ce2435317376a345728225 \ - --hash=sha256:c25ec06e4241e162f5d1f57c370f4078797ade95c9208bd0c60f484834f09c96 \ - --hash=sha256:c405d482c320a88ab53dcbd98d6d6f32ada074f2d965d6e9bf2d823158fa97de \ - --hash=sha256:c4472fe53ebf541113e533971bd8c32728debc4c6d8cc177f2bff31d011ec17e \ - --hash=sha256:c4b1efb11a8acd13246ffb0bee888dd0e8eb057f8bf30112e3e21e421eb82d4a \ - --hash=sha256:c5f3faeb8100a43adf3e7925d556801d14b5816a0ac9e75e22948e787feec642 \ - --hash=sha256:c6f034386e5550b5dc8ded90b5e2ff7db21f0f5c7de37b6efc5dac046eb19c10 \ - --hash=sha256:c99ddaddb2fbe04953b84d1651149a0d85214780e4d0ee824e610ab549d98d92 \ - --hash=sha256:ca6b66f69e30f6e180d52f14d91ac854b8119553b524e0e28d5291a724f0f423 \ - --hash=sha256:cccdc02e46d2bd7cb5f38f8cc3d9db0d24951abd082b2f242c9e9f59c0ab2af3 \ - --hash=sha256:cd49a908cb6d387fc26acee8b7d9fcc9bbf8e1aca890c0b2fdfd706057546080 \ - --hash=sha256:cf7a4e8de7f1092829caef66fd90eaf3710bc5efd322a816d5677b7664893c93 \ - --hash=sha256:cfd77e8e5cafba3fb584e0f4b935a59216f352b73d4987be3af51f43a862c403 \ - --hash=sha256:d34c4f80956227f2686ddea5b3585e109c2733e2d4ef12eb1b8b4e84f09a2ab6 \ - --hash=sha256:d61a0ca95503867d4d627517bcfdc28a8468c3f1b0b06c626f30dd759d3999fd \ - --hash=sha256:d81657b23e0edb84b37167e98aefb04ae16cbc5352770057893bd222cdc6e45f \ - --hash=sha256:d92d897cb4b4bf915fbeb5e604c7911021a8456f0964f3b8ebbe7f9188b9eabb \ - --hash=sha256:dd318e6b75ca80bff0b22b302f83a8ee41c62b8ac662ddb49f67ec97e799885d \ - --hash=sha256:dd952b9c64f3b21aedd09b8fe958e4931864dba69926d8a90c90d36ac4e28c9a \ - --hash=sha256:e0e7e83f31e23c5d00ff618045ddc5e916f9e613d33c5a5823bc0b0a0feb522f \ - --hash=sha256:e0f17d1df951336a02afc8270c03c0c6e60d1f9996fcbd43a4ce6be81de0bd9d \ - --hash=sha256:e2a16ef5fa2382af83bef4a18c1b3bcb4284c4732906aa69422cf09df9c59f1f \ - --hash=sha256:e36021db54b8a0475805acc1d6c4bca5d9f52c3825ad29ae2d398a9d530ddb88 \ - --hash=sha256:e73db54c967eb75037c178a54445c5a4e7461b5203b27c45ef656a81787c0c1b \ - --hash=sha256:e741bd48e6a417bdfbae02e088f60018286d6c141639359fb8df017a3b69415a \ - --hash=sha256:f7271d6bd8838c49ba8ae647fc06469137e1c161a7ef97d778b72904d9b68696 \ - --hash=sha256:fc391e3941045fd0987c77484b2799adffd08e4b6735c4ee5f054366a2e1551d \ - --hash=sha256:fc94441bcf9cb8c59f51f23193316afefbf3ff858460cb47b5758bf66a14d130 \ - --hash=sha256:fe34befb8c765b8ce562f0200afda3578f8abb159c76de3ab354c80b72244c41 \ - --hash=sha256:fe8080b4f25dfc44a86bedd14bc4f9d469dfc6456e6f3c5d9077e81a5fedfba7 \ - --hash=sha256:ff34cb09a332832d1cf38acd0f604c068665192c6107a439a92abfd8acf90fe2 +yarl==1.9.4 \ + --hash=sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51 \ + --hash=sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce \ + --hash=sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559 \ + --hash=sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0 \ + --hash=sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81 \ + --hash=sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc \ + --hash=sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4 \ + --hash=sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c \ + --hash=sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130 \ + --hash=sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136 \ + --hash=sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e \ + --hash=sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec \ + --hash=sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7 \ + --hash=sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1 \ + --hash=sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455 \ + --hash=sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099 \ + --hash=sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129 \ + --hash=sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10 \ + --hash=sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142 \ + --hash=sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98 \ + --hash=sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa \ + --hash=sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7 \ + --hash=sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525 \ + --hash=sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c \ + --hash=sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9 \ + --hash=sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c \ + --hash=sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8 \ + --hash=sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b \ + --hash=sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf \ + --hash=sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23 \ + --hash=sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd \ + --hash=sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27 \ + --hash=sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f \ + --hash=sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece \ + --hash=sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434 \ + --hash=sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec \ + --hash=sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff \ + --hash=sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78 \ + --hash=sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d \ + --hash=sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863 \ + --hash=sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53 \ + --hash=sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31 \ + --hash=sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15 \ + --hash=sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5 \ + --hash=sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b \ + --hash=sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57 \ + --hash=sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3 \ + --hash=sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1 \ + --hash=sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f \ + --hash=sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad \ + --hash=sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c \ + --hash=sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7 \ + --hash=sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2 \ + --hash=sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b \ + --hash=sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2 \ + --hash=sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b \ + --hash=sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9 \ + --hash=sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be \ + --hash=sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e \ + --hash=sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984 \ + --hash=sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4 \ + --hash=sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074 \ + --hash=sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2 \ + --hash=sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392 \ + --hash=sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91 \ + --hash=sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541 \ + --hash=sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf \ + --hash=sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572 \ + --hash=sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66 \ + --hash=sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575 \ + --hash=sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14 \ + --hash=sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5 \ + --hash=sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1 \ + --hash=sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e \ + --hash=sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551 \ + --hash=sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17 \ + --hash=sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead \ + --hash=sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0 \ + --hash=sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe \ + --hash=sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234 \ + --hash=sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0 \ + --hash=sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7 \ + --hash=sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34 \ + --hash=sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42 \ + --hash=sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385 \ + --hash=sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78 \ + --hash=sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be \ + --hash=sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958 \ + --hash=sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749 \ + --hash=sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec # via # -c release/ray_release/byod/requirements_compiled.txt # aiohttp -zipp==3.17.0 \ - --hash=sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31 \ - --hash=sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0 +zipp==3.19.2 \ + --hash=sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19 \ + --hash=sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c # via # -c release/ray_release/byod/requirements_compiled.txt # importlib-metadata From d0ae2d31dbe4dafb1d0d89f108a82d5cef08a199 Mon Sep 17 00:00:00 2001 From: dentiny Date: Mon, 25 Nov 2024 16:54:38 -0800 Subject: [PATCH 088/107] [core] Fix gcs server using shared pointer (#48888) Current implementation for shared pointer usage is completely a mess; Shared pointer should **ONLY** be used where lifecycle is impossible to judge, for example, async operations or multi-threaded cases; abusing would lead to memory leak (i.e. circular dependency), and makes code hard to justify. --------- Signed-off-by: hjiang --- BUILD.bazel | 40 +++++++++++++++++-- .../ray/gcs/gcs_server/gcs_node_manager.h | 6 ++- src/ray/gcs/gcs_server/gcs_actor_manager.h | 2 +- src/ray/gcs/gcs_server/gcs_actor_scheduler.cc | 31 +++++++------- src/ray/gcs/gcs_server/gcs_actor_scheduler.h | 10 ++--- .../gcs_autoscaler_state_manager.cc | 6 +-- .../gcs_server/gcs_autoscaler_state_manager.h | 13 +++--- src/ray/gcs/gcs_server/gcs_node_manager.cc | 11 +++-- src/ray/gcs/gcs_server/gcs_node_manager.h | 4 +- .../gcs_server/gcs_placement_group_manager.cc | 8 ++-- .../gcs_server/gcs_placement_group_manager.h | 6 +-- .../gcs_placement_group_scheduler.cc | 6 +-- .../gcs_placement_group_scheduler.h | 13 +++--- src/ray/gcs/gcs_server/gcs_resource_manager.h | 2 +- src/ray/gcs/gcs_server/gcs_server.cc | 36 ++++++++--------- src/ray/gcs/gcs_server/gcs_server.h | 18 +++++---- .../gcs_node_manager_export_event_test.cc | 8 ++-- .../test/gcs_actor_scheduler_mock_test.cc | 14 ++++--- .../test/gcs_actor_scheduler_test.cc | 9 +++-- .../test/gcs_autoscaler_state_manager_test.cc | 4 +- .../gcs_server/test/gcs_node_manager_test.cc | 8 ++-- .../gcs_placement_group_manager_mock_test.cc | 2 +- .../test/gcs_placement_group_manager_test.cc | 2 +- .../gcs_placement_group_scheduler_test.cc | 8 ++-- .../raylet/scheduling/cluster_task_manager.h | 1 + 25 files changed, 155 insertions(+), 113 deletions(-) diff --git a/BUILD.bazel b/BUILD.bazel index dd31e976d6c2..f30037472d39 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -2403,11 +2403,43 @@ ray_cc_test( ) ray_cc_test( - name = "gcs_export_event_test", + name = "gcs_job_manager_export_event_test", size = "small", - srcs = glob([ - "src/ray/gcs/gcs_server/test/export_api/*.cc", - ]), + srcs = ["src/ray/gcs/gcs_server/test/export_api/gcs_job_manager_export_event_test.cc"], + tags = [ + "no_windows", + "team:core" + ], + deps = [ + ":gcs_server_lib", + ":gcs_server_test_util", + ":gcs_test_util_lib", + ":ray_mock", + "@com_google_googletest//:gtest_main", + ], +) + +ray_cc_test( + name = "gcs_actor_manager_export_event_test", + size = "small", + srcs = ["src/ray/gcs/gcs_server/test/export_api/gcs_actor_manager_export_event_test.cc"], + tags = [ + "no_windows", + "team:core" + ], + deps = [ + ":gcs_server_lib", + ":gcs_server_test_util", + ":gcs_test_util_lib", + ":ray_mock", + "@com_google_googletest//:gtest_main", + ], +) + +ray_cc_test( + name = "gcs_node_manager_export_event_test", + size = "small", + srcs = ["src/ray/gcs/gcs_server/test/export_api/gcs_node_manager_export_event_test.cc"], tags = [ "no_windows", "team:core" diff --git a/src/mock/ray/gcs/gcs_server/gcs_node_manager.h b/src/mock/ray/gcs/gcs_server/gcs_node_manager.h index 3a8f22949fae..7a3efe197529 100644 --- a/src/mock/ray/gcs/gcs_server/gcs_node_manager.h +++ b/src/mock/ray/gcs/gcs_server/gcs_node_manager.h @@ -18,7 +18,11 @@ namespace gcs { class MockGcsNodeManager : public GcsNodeManager { public: - MockGcsNodeManager() : GcsNodeManager(nullptr, nullptr, nullptr, ClusterID::Nil()) {} + MockGcsNodeManager() + : GcsNodeManager(/*gcs_publisher=*/nullptr, + /*gcs_table_storage=*/nullptr, + /*raylet_client_pool=*/nullptr, + /*cluster_id=*/ClusterID::Nil()) {} MOCK_METHOD(void, HandleRegisterNode, (rpc::RegisterNodeRequest request, diff --git a/src/ray/gcs/gcs_server/gcs_actor_manager.h b/src/ray/gcs/gcs_server/gcs_actor_manager.h index dadb90498379..afdf55be80d6 100644 --- a/src/ray/gcs/gcs_server/gcs_actor_manager.h +++ b/src/ray/gcs/gcs_server/gcs_actor_manager.h @@ -321,7 +321,7 @@ class GcsActorManager : public rpc::ActorInfoHandler { std::function destroy_owned_placement_group_if_needed, const rpc::CoreWorkerClientFactoryFn &worker_client_factory = nullptr); - ~GcsActorManager() = default; + ~GcsActorManager() override = default; void HandleRegisterActor(rpc::RegisterActorRequest request, rpc::RegisterActorReply *reply, diff --git a/src/ray/gcs/gcs_server/gcs_actor_scheduler.cc b/src/ray/gcs/gcs_server/gcs_actor_scheduler.cc index f562c0f9034e..5daa1a992257 100644 --- a/src/ray/gcs/gcs_server/gcs_actor_scheduler.cc +++ b/src/ray/gcs/gcs_server/gcs_actor_scheduler.cc @@ -27,17 +27,17 @@ GcsActorScheduler::GcsActorScheduler( instrumented_io_context &io_context, GcsActorTable &gcs_actor_table, const GcsNodeManager &gcs_node_manager, - std::shared_ptr cluster_task_manager, + ClusterTaskManager &cluster_task_manager, GcsActorSchedulerFailureCallback schedule_failure_handler, GcsActorSchedulerSuccessCallback schedule_success_handler, - std::shared_ptr raylet_client_pool, + rpc::NodeManagerClientPool &raylet_client_pool, rpc::CoreWorkerClientFactoryFn client_factory, std::function normal_task_resources_changed_callback) : io_context_(io_context), gcs_actor_table_(gcs_actor_table), gcs_node_manager_(gcs_node_manager), - cluster_task_manager_(std::move(cluster_task_manager)), + cluster_task_manager_(cluster_task_manager), schedule_failure_handler_(std::move(schedule_failure_handler)), schedule_success_handler_(std::move(schedule_success_handler)), raylet_client_pool_(raylet_client_pool), @@ -97,11 +97,11 @@ void GcsActorScheduler::ScheduleByGcs(std::shared_ptr actor) { const auto &owner_node = gcs_node_manager_.GetAliveNode(actor->GetOwnerNodeID()); RayTask task(actor->GetCreationTaskSpecification(), owner_node.has_value() ? actor->GetOwnerNodeID().Binary() : std::string()); - cluster_task_manager_->QueueAndScheduleTask(task, - /*grant_or_reject*/ false, - /*is_selected_based_on_locality*/ false, - /*reply*/ reply.get(), - send_reply_callback); + cluster_task_manager_.QueueAndScheduleTask(task, + /*grant_or_reject*/ false, + /*is_selected_based_on_locality*/ false, + /*reply*/ reply.get(), + send_reply_callback); } void GcsActorScheduler::ScheduleByRaylet(std::shared_ptr actor) { @@ -218,7 +218,7 @@ std::vector GcsActorScheduler::CancelOnNode(const NodeID &node_id) { } } - raylet_client_pool_->Disconnect(node_id); + raylet_client_pool_.Disconnect(node_id); return actor_ids; } @@ -531,7 +531,7 @@ void GcsActorScheduler::DoRetryCreatingActorOnWorker( std::shared_ptr GcsActorScheduler::GetOrConnectLeaseClient( const rpc::Address &raylet_address) { - return raylet_client_pool_->GetOrConnectByAddress(raylet_address); + return raylet_client_pool_.GetOrConnectByAddress(raylet_address); } bool GcsActorScheduler::KillActorOnWorker(const rpc::Address &worker_address, @@ -664,13 +664,13 @@ void GcsActorScheduler::HandleWorkerLeaseRejectedReply( void GcsActorScheduler::OnActorDestruction(std::shared_ptr actor) { if (!actor->GetAcquiredResources().IsEmpty()) { ReturnActorAcquiredResources(actor); - cluster_task_manager_->ScheduleAndDispatchTasks(); + cluster_task_manager_.ScheduleAndDispatchTasks(); } } void GcsActorScheduler::ReturnActorAcquiredResources(std::shared_ptr actor) { auto &cluster_resource_manager = - cluster_task_manager_->GetClusterResourceScheduler()->GetClusterResourceManager(); + cluster_task_manager_.GetClusterResourceScheduler()->GetClusterResourceManager(); cluster_resource_manager.AddNodeAvailableResources( scheduling::NodeID(actor->GetNodeID().Binary()), actor->GetAcquiredResources().GetResourceSet()); @@ -678,14 +678,13 @@ void GcsActorScheduler::ReturnActorAcquiredResources(std::shared_ptr a } size_t GcsActorScheduler::GetPendingActorsCount() const { - return cluster_task_manager_->GetInfeasibleQueueSize() + - cluster_task_manager_->GetPendingQueueSize(); + return cluster_task_manager_.GetInfeasibleQueueSize() + + cluster_task_manager_.GetPendingQueueSize(); } bool GcsActorScheduler::CancelInFlightActorScheduling( const std::shared_ptr &actor) { - return cluster_task_manager_->CancelTask( - actor->GetCreationTaskSpecification().TaskId()); + return cluster_task_manager_.CancelTask(actor->GetCreationTaskSpecification().TaskId()); } } // namespace gcs diff --git a/src/ray/gcs/gcs_server/gcs_actor_scheduler.h b/src/ray/gcs/gcs_server/gcs_actor_scheduler.h index 1ea66d0ddbe0..048d1da8939c 100644 --- a/src/ray/gcs/gcs_server/gcs_actor_scheduler.h +++ b/src/ray/gcs/gcs_server/gcs_actor_scheduler.h @@ -129,14 +129,14 @@ class GcsActorScheduler : public GcsActorSchedulerInterface { instrumented_io_context &io_context, GcsActorTable &gcs_actor_table, const GcsNodeManager &gcs_node_manager, - std::shared_ptr cluster_task_manager_, + ClusterTaskManager &cluster_task_manager_, GcsActorSchedulerFailureCallback schedule_failure_handler, GcsActorSchedulerSuccessCallback schedule_success_handler, - std::shared_ptr raylet_client_pool, + rpc::NodeManagerClientPool &raylet_client_pool, rpc::CoreWorkerClientFactoryFn client_factory = nullptr, std::function normal_task_resources_changed_callback = nullptr); - virtual ~GcsActorScheduler() = default; + ~GcsActorScheduler() override = default; /// Schedule the specified actor. /// If there is no available nodes then the actor would be queued in the @@ -377,7 +377,7 @@ class GcsActorScheduler : public GcsActorSchedulerInterface { /// Reference of GcsNodeManager. const GcsNodeManager &gcs_node_manager_; /// The cluster task manager. - std::shared_ptr cluster_task_manager_; + ClusterTaskManager &cluster_task_manager_; /// The handler to handle the scheduling failures. GcsActorSchedulerFailureCallback schedule_failure_handler_; /// The handler to handle the successful scheduling. @@ -385,7 +385,7 @@ class GcsActorScheduler : public GcsActorSchedulerInterface { /// The nodes which are releasing unused workers. absl::flat_hash_set nodes_of_releasing_unused_workers_; /// The cached raylet clients used to communicate with raylet. - std::shared_ptr raylet_client_pool_; + rpc::NodeManagerClientPool &raylet_client_pool_; /// The cached core worker clients which are used to communicate with leased worker. rpc::CoreWorkerClientPool core_worker_clients_; diff --git a/src/ray/gcs/gcs_server/gcs_autoscaler_state_manager.cc b/src/ray/gcs/gcs_server/gcs_autoscaler_state_manager.cc index e4ef13371b53..c166dbbf6398 100644 --- a/src/ray/gcs/gcs_server/gcs_autoscaler_state_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_autoscaler_state_manager.cc @@ -28,12 +28,12 @@ GcsAutoscalerStateManager::GcsAutoscalerStateManager( GcsNodeManager &gcs_node_manager, GcsActorManager &gcs_actor_manager, const GcsPlacementGroupManager &gcs_placement_group_manager, - std::shared_ptr raylet_client_pool) + rpc::NodeManagerClientPool &raylet_client_pool) : session_name_(session_name), gcs_node_manager_(gcs_node_manager), gcs_actor_manager_(gcs_actor_manager), gcs_placement_group_manager_(gcs_placement_group_manager), - raylet_client_pool_(std::move(raylet_client_pool)), + raylet_client_pool_(raylet_client_pool), last_cluster_resource_state_version_(0), last_seen_autoscaler_state_version_(0) {} @@ -396,7 +396,7 @@ void GcsAutoscalerStateManager::HandleDrainNode( raylet_address.set_ip_address(node->node_manager_address()); raylet_address.set_port(node->node_manager_port()); - const auto raylet_client = raylet_client_pool_->GetOrConnectByAddress(raylet_address); + const auto raylet_client = raylet_client_pool_.GetOrConnectByAddress(raylet_address); raylet_client->DrainRaylet( request.reason(), request.reason_message(), diff --git a/src/ray/gcs/gcs_server/gcs_autoscaler_state_manager.h b/src/ray/gcs/gcs_server/gcs_autoscaler_state_manager.h index c00d8d465202..c592a7a484d6 100644 --- a/src/ray/gcs/gcs_server/gcs_autoscaler_state_manager.h +++ b/src/ray/gcs/gcs_server/gcs_autoscaler_state_manager.h @@ -29,12 +29,11 @@ class GcsResourceManager; class GcsAutoscalerStateManager : public rpc::autoscaler::AutoscalerStateHandler { public: - GcsAutoscalerStateManager( - const std::string &session_name, - GcsNodeManager &gcs_node_manager, - GcsActorManager &gcs_actor_manager, - const GcsPlacementGroupManager &gcs_placement_group_manager, - std::shared_ptr raylet_client_pool); + GcsAutoscalerStateManager(const std::string &session_name, + GcsNodeManager &gcs_node_manager, + GcsActorManager &gcs_actor_manager, + const GcsPlacementGroupManager &gcs_placement_group_manager, + rpc::NodeManagerClientPool &raylet_client_pool); void HandleGetClusterResourceState( rpc::autoscaler::GetClusterResourceStateRequest request, @@ -152,7 +151,7 @@ class GcsAutoscalerStateManager : public rpc::autoscaler::AutoscalerStateHandler const GcsPlacementGroupManager &gcs_placement_group_manager_; /// Raylet client pool. - std::shared_ptr raylet_client_pool_; + rpc::NodeManagerClientPool &raylet_client_pool_; // The default value of the last seen version for the request is 0, which indicates // no version has been reported. So the first reported version should be 1. diff --git a/src/ray/gcs/gcs_server/gcs_node_manager.cc b/src/ray/gcs/gcs_server/gcs_node_manager.cc index a232ecf10903..c57e8ea2fd08 100644 --- a/src/ray/gcs/gcs_server/gcs_node_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_node_manager.cc @@ -29,14 +29,13 @@ namespace ray { namespace gcs { ////////////////////////////////////////////////////////////////////////////////////////// -GcsNodeManager::GcsNodeManager( - std::shared_ptr gcs_publisher, - std::shared_ptr gcs_table_storage, - std::shared_ptr raylet_client_pool, - const ClusterID &cluster_id) +GcsNodeManager::GcsNodeManager(std::shared_ptr gcs_publisher, + std::shared_ptr gcs_table_storage, + rpc::NodeManagerClientPool *raylet_client_pool, + const ClusterID &cluster_id) : gcs_publisher_(std::move(gcs_publisher)), gcs_table_storage_(std::move(gcs_table_storage)), - raylet_client_pool_(std::move(raylet_client_pool)), + raylet_client_pool_(raylet_client_pool), cluster_id_(cluster_id) {} void GcsNodeManager::WriteNodeExportEvent(rpc::GcsNodeInfo node_info) const { diff --git a/src/ray/gcs/gcs_server/gcs_node_manager.h b/src/ray/gcs/gcs_server/gcs_node_manager.h index db258d4cb00c..b924fec264c9 100644 --- a/src/ray/gcs/gcs_server/gcs_node_manager.h +++ b/src/ray/gcs/gcs_server/gcs_node_manager.h @@ -50,7 +50,7 @@ class GcsNodeManager : public rpc::NodeInfoHandler { /// \param gcs_table_storage GCS table external storage accessor. explicit GcsNodeManager(std::shared_ptr gcs_publisher, std::shared_ptr gcs_table_storage, - std::shared_ptr raylet_client_pool, + rpc::NodeManagerClientPool *raylet_client_pool, const ClusterID &cluster_id); /// Handle register rpc request come from raylet. @@ -248,7 +248,7 @@ class GcsNodeManager : public rpc::NodeInfoHandler { /// Storage for GCS tables. std::shared_ptr gcs_table_storage_; /// Raylet client pool. - std::shared_ptr raylet_client_pool_; + rpc::NodeManagerClientPool *raylet_client_pool_ = nullptr; /// Cluster ID to be shared with clients when connecting. const ClusterID cluster_id_; diff --git a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc index c60bcd43cc45..1aec60e9603c 100644 --- a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc @@ -14,6 +14,8 @@ #include "ray/gcs/gcs_server/gcs_placement_group_manager.h" +#include + #include "ray/common/asio/asio_util.h" #include "ray/common/asio/instrumented_io_context.h" #include "ray/common/ray_config.h" @@ -181,15 +183,15 @@ rpc::PlacementGroupStats *GcsPlacementGroup::GetMutableStats() { GcsPlacementGroupManager::GcsPlacementGroupManager( instrumented_io_context &io_context, - std::shared_ptr scheduler, + GcsPlacementGroupSchedulerInterface *scheduler, std::shared_ptr gcs_table_storage, GcsResourceManager &gcs_resource_manager, std::function get_ray_namespace) : io_context_(io_context), - gcs_placement_group_scheduler_(std::move(scheduler)), + gcs_placement_group_scheduler_(scheduler), gcs_table_storage_(std::move(gcs_table_storage)), gcs_resource_manager_(gcs_resource_manager), - get_ray_namespace_(get_ray_namespace) { + get_ray_namespace_(std::move(get_ray_namespace)) { placement_group_state_counter_.reset( new CounterMap()); placement_group_state_counter_->SetOnChangeCallback( diff --git a/src/ray/gcs/gcs_server/gcs_placement_group_manager.h b/src/ray/gcs/gcs_server/gcs_placement_group_manager.h index d90fdccf3a8a..a7d91388e264 100644 --- a/src/ray/gcs/gcs_server/gcs_placement_group_manager.h +++ b/src/ray/gcs/gcs_server/gcs_placement_group_manager.h @@ -237,7 +237,7 @@ class GcsPlacementGroupManager : public rpc::PlacementGroupInfoHandler { /// \param gcs_resource_manager Reference of GcsResourceManager. /// \param get_ray_namespace A callback to get the ray namespace. GcsPlacementGroupManager(instrumented_io_context &io_context, - std::shared_ptr scheduler, + GcsPlacementGroupSchedulerInterface *scheduler, std::shared_ptr gcs_table_storage, GcsResourceManager &gcs_resource_manager, std::function get_ray_namespace); @@ -480,8 +480,8 @@ class GcsPlacementGroupManager : public rpc::PlacementGroupInfoHandler { std::deque> infeasible_placement_groups_; /// The scheduler to schedule all registered placement_groups. - std::shared_ptr - gcs_placement_group_scheduler_; + /// Scheduler's lifecycle lies in [GcsServer]. + gcs::GcsPlacementGroupSchedulerInterface *gcs_placement_group_scheduler_ = nullptr; /// Used to update placement group information upon creation, deletion, etc. std::shared_ptr gcs_table_storage_; diff --git a/src/ray/gcs/gcs_server/gcs_placement_group_scheduler.cc b/src/ray/gcs/gcs_server/gcs_placement_group_scheduler.cc index 6bc2737c14a6..85a94f863598 100644 --- a/src/ray/gcs/gcs_server/gcs_placement_group_scheduler.cc +++ b/src/ray/gcs/gcs_server/gcs_placement_group_scheduler.cc @@ -26,13 +26,13 @@ GcsPlacementGroupScheduler::GcsPlacementGroupScheduler( std::shared_ptr gcs_table_storage, const gcs::GcsNodeManager &gcs_node_manager, ClusterResourceScheduler &cluster_resource_scheduler, - std::shared_ptr raylet_client_pool) + rpc::NodeManagerClientPool &raylet_client_pool) : io_context_(io_context), return_timer_(io_context), gcs_table_storage_(std::move(gcs_table_storage)), gcs_node_manager_(gcs_node_manager), cluster_resource_scheduler_(cluster_resource_scheduler), - raylet_client_pool_(std::move(raylet_client_pool)) {} + raylet_client_pool_(raylet_client_pool) {} void GcsPlacementGroupScheduler::ScheduleUnplacedBundles( const SchedulePgRequest &request) { @@ -279,7 +279,7 @@ void GcsPlacementGroupScheduler::CancelResourceReserve( std::shared_ptr GcsPlacementGroupScheduler::GetOrConnectLeaseClient(const rpc::Address &raylet_address) { - return raylet_client_pool_->GetOrConnectByAddress(raylet_address); + return raylet_client_pool_.GetOrConnectByAddress(raylet_address); } std::shared_ptr diff --git a/src/ray/gcs/gcs_server/gcs_placement_group_scheduler.h b/src/ray/gcs/gcs_server/gcs_placement_group_scheduler.h index ec7ac53941bd..df16f025d082 100644 --- a/src/ray/gcs/gcs_server/gcs_placement_group_scheduler.h +++ b/src/ray/gcs/gcs_server/gcs_placement_group_scheduler.h @@ -290,12 +290,11 @@ class GcsPlacementGroupScheduler : public GcsPlacementGroupSchedulerInterface { /// \param cluster_resource_scheduler The resource scheduler which is used when /// scheduling. /// \param lease_client_factory Factory to create remote lease client. - GcsPlacementGroupScheduler( - instrumented_io_context &io_context, - std::shared_ptr gcs_table_storage, - const GcsNodeManager &gcs_node_manager, - ClusterResourceScheduler &cluster_resource_scheduler, - std::shared_ptr raylet_client_pool); + GcsPlacementGroupScheduler(instrumented_io_context &io_context, + std::shared_ptr gcs_table_storage, + const GcsNodeManager &gcs_node_manager, + ClusterResourceScheduler &cluster_resource_scheduler, + rpc::NodeManagerClientPool &raylet_client_pool); virtual ~GcsPlacementGroupScheduler() = default; @@ -502,7 +501,7 @@ class GcsPlacementGroupScheduler : public GcsPlacementGroupSchedulerInterface { placement_group_leasing_in_progress_; /// The cached raylet clients used to communicate with raylets. - std::shared_ptr raylet_client_pool_; + rpc::NodeManagerClientPool &raylet_client_pool_; /// The nodes which are releasing unused bundles. absl::flat_hash_set nodes_of_releasing_unused_bundles_; diff --git a/src/ray/gcs/gcs_server/gcs_resource_manager.h b/src/ray/gcs/gcs_server/gcs_resource_manager.h index dff95380cd21..47ecf9fff3a3 100644 --- a/src/ray/gcs/gcs_server/gcs_resource_manager.h +++ b/src/ray/gcs/gcs_server/gcs_resource_manager.h @@ -67,7 +67,7 @@ class GcsResourceManager : public rpc::NodeResourceInfoHandler, NodeID local_node_id, std::shared_ptr cluster_task_manager = nullptr); - virtual ~GcsResourceManager() {} + virtual ~GcsResourceManager() = default; /// Handle the resource update. void ConsumeSyncMessage(std::shared_ptr message) override; diff --git a/src/ray/gcs/gcs_server/gcs_server.cc b/src/ray/gcs/gcs_server/gcs_server.cc index c51c14bbb0dd..a8ad05ff3c9d 100644 --- a/src/ray/gcs/gcs_server/gcs_server.cc +++ b/src/ray/gcs/gcs_server/gcs_server.cc @@ -64,7 +64,7 @@ GcsServer::GcsServer(const ray::gcs::GcsServerConfig &config, ClusterID::Nil(), RayConfig::instance().gcs_server_rpc_client_thread_num()), raylet_client_pool_( - std::make_shared(client_call_manager_)), + std::make_unique(client_call_manager_)), pubsub_periodical_runner_(io_context_provider_.GetIOContext()), periodical_runner_(io_context_provider_.GetDefaultIOContext()), is_started_(false), @@ -289,7 +289,7 @@ void GcsServer::InitGcsNodeManager(const GcsInitData &gcs_init_data) { RAY_CHECK(gcs_table_storage_ && gcs_publisher_); gcs_node_manager_ = std::make_unique(gcs_publisher_, gcs_table_storage_, - raylet_client_pool_, + raylet_client_pool_.get(), rpc_server_.GetClusterId()); // Initialize by gcs tables data. gcs_node_manager_->Initialize(gcs_init_data); @@ -323,7 +323,7 @@ void GcsServer::InitGcsHealthCheckManager(const GcsInitData &gcs_init_data) { void GcsServer::InitGcsResourceManager(const GcsInitData &gcs_init_data) { RAY_CHECK(cluster_resource_scheduler_ && cluster_task_manager_); - gcs_resource_manager_ = std::make_shared( + gcs_resource_manager_ = std::make_unique( io_context_provider_.GetDefaultIOContext(), cluster_resource_scheduler_->GetClusterResourceManager(), *gcs_node_manager_, @@ -446,25 +446,25 @@ void GcsServer::InitGcsActorManager(const GcsInitData &gcs_init_data) { const rpc::PushTaskReply &reply) { gcs_actor_manager_->OnActorCreationSuccess(std::move(actor), reply); }; - auto client_factory = [this](const rpc::Address &address) { - return std::make_shared(address, client_call_manager_); - }; RAY_CHECK(gcs_resource_manager_ && cluster_task_manager_); scheduler = std::make_unique( io_context_provider_.GetDefaultIOContext(), gcs_table_storage_->ActorTable(), *gcs_node_manager_, - cluster_task_manager_, + *cluster_task_manager_, schedule_failure_handler, schedule_success_handler, - raylet_client_pool_, - client_factory, + *raylet_client_pool_, + /*factory=*/ + [this](const rpc::Address &address) { + return std::make_shared(address, client_call_manager_); + }, /*normal_task_resources_changed_callback=*/ [this](const NodeID &node_id, const rpc::ResourcesData &resources) { gcs_resource_manager_->UpdateNodeNormalTaskResources(node_id, resources); }); - gcs_actor_manager_ = std::make_shared( + gcs_actor_manager_ = std::make_unique( std::move(scheduler), gcs_table_storage_, gcs_publisher_, @@ -480,23 +480,23 @@ void GcsServer::InitGcsActorManager(const GcsInitData &gcs_init_data) { // Initialize by gcs tables data. gcs_actor_manager_->Initialize(gcs_init_data); // Register service. - actor_info_service_.reset(new rpc::ActorInfoGrpcService( - io_context_provider_.GetDefaultIOContext(), *gcs_actor_manager_)); + actor_info_service_ = std::make_unique( + io_context_provider_.GetDefaultIOContext(), *gcs_actor_manager_); rpc_server_.RegisterService(*actor_info_service_); } void GcsServer::InitGcsPlacementGroupManager(const GcsInitData &gcs_init_data) { RAY_CHECK(gcs_table_storage_ && gcs_node_manager_); - gcs_placement_group_scheduler_ = std::make_shared( + gcs_placement_group_scheduler_ = std::make_unique( io_context_provider_.GetDefaultIOContext(), gcs_table_storage_, *gcs_node_manager_, *cluster_resource_scheduler_, - raylet_client_pool_); + *raylet_client_pool_); - gcs_placement_group_manager_ = std::make_shared( + gcs_placement_group_manager_ = std::make_unique( io_context_provider_.GetDefaultIOContext(), - gcs_placement_group_scheduler_, + gcs_placement_group_scheduler_.get(), gcs_table_storage_, *gcs_resource_manager_, [this](const JobID &job_id) { @@ -671,7 +671,7 @@ void GcsServer::InitGcsAutoscalerStateManager(const GcsInitData &gcs_init_data) *gcs_node_manager_, *gcs_actor_manager_, *gcs_placement_group_manager_, - raylet_client_pool_); + *raylet_client_pool_); gcs_autoscaler_state_manager_->Initialize(gcs_init_data); autoscaler_state_service_.reset(new rpc::autoscaler::AutoscalerStateGrpcService( @@ -826,7 +826,7 @@ std::shared_ptr GcsServer::GetOrConnectRedis() { RAY_CHECK(status.ok()) << "Failed to init redis gcs client as " << status; // Init redis failure detector. - gcs_redis_failure_detector_ = std::make_shared( + gcs_redis_failure_detector_ = std::make_unique( io_context_provider_.GetDefaultIOContext(), redis_client_, []() { RAY_LOG(FATAL) << "Redis connection failed. Shutdown GCS."; }); diff --git a/src/ray/gcs/gcs_server/gcs_server.h b/src/ray/gcs/gcs_server/gcs_server.h index 22ece4dda229..6c37e9d8210c 100644 --- a/src/ray/gcs/gcs_server/gcs_server.h +++ b/src/ray/gcs/gcs_server/gcs_server.h @@ -14,6 +14,8 @@ #pragma once +#include + #include "ray/common/asio/asio_util.h" #include "ray/common/asio/instrumented_io_context.h" #include "ray/common/ray_syncer/ray_syncer.h" @@ -218,9 +220,9 @@ class GcsServer { /// The `ClientCallManager` object that is shared by all `NodeManagerWorkerClient`s. rpc::ClientCallManager client_call_manager_; /// Node manager client pool. - std::shared_ptr raylet_client_pool_; + std::unique_ptr raylet_client_pool_; /// The gcs resource manager. - std::shared_ptr gcs_resource_manager_; + std::unique_ptr gcs_resource_manager_; /// The cluster resource scheduler. std::shared_ptr cluster_resource_scheduler_; /// The cluster task manager. @@ -230,15 +232,17 @@ class GcsServer { /// The gcs node manager. std::unique_ptr gcs_node_manager_; /// The health check manager. - std::shared_ptr gcs_healthcheck_manager_; + std::unique_ptr gcs_healthcheck_manager_; /// The gcs redis failure detector. - std::shared_ptr gcs_redis_failure_detector_; + std::unique_ptr gcs_redis_failure_detector_; /// The gcs actor manager. - std::shared_ptr gcs_actor_manager_; + std::unique_ptr gcs_actor_manager_; /// The gcs placement group scheduler. - std::shared_ptr gcs_placement_group_scheduler_; + /// [gcs_placement_group_scheduler_] depends on [raylet_client_pool_]. + std::unique_ptr gcs_placement_group_scheduler_; /// The gcs placement group manager. - std::shared_ptr gcs_placement_group_manager_; + /// [gcs_placement_group_manager_] depends on [gcs_placement_group_scheduler_]. + std::unique_ptr gcs_placement_group_manager_; /// Job info handler and service. std::unique_ptr gcs_job_manager_; std::unique_ptr job_info_service_; diff --git a/src/ray/gcs/gcs_server/test/export_api/gcs_node_manager_export_event_test.cc b/src/ray/gcs/gcs_server/test/export_api/gcs_node_manager_export_event_test.cc index 742716da4b45..61d2d0e8b932 100644 --- a/src/ray/gcs/gcs_server/test/export_api/gcs_node_manager_export_event_test.cc +++ b/src/ray/gcs/gcs_server/test/export_api/gcs_node_manager_export_event_test.cc @@ -41,7 +41,7 @@ class GcsNodeManagerExportAPITest : public ::testing::Test { public: GcsNodeManagerExportAPITest() { raylet_client_ = std::make_shared(); - client_pool_ = std::make_shared( + client_pool_ = std::make_unique( [this](const rpc::Address &) { return raylet_client_; }); gcs_publisher_ = std::make_shared( std::make_unique()); @@ -72,7 +72,7 @@ class GcsNodeManagerExportAPITest : public ::testing::Test { protected: std::shared_ptr gcs_table_storage_; std::shared_ptr raylet_client_; - std::shared_ptr client_pool_; + std::unique_ptr client_pool_; std::shared_ptr gcs_publisher_; instrumented_io_context io_service_; std::string log_dir_; @@ -81,7 +81,7 @@ class GcsNodeManagerExportAPITest : public ::testing::Test { TEST_F(GcsNodeManagerExportAPITest, TestExportEventRegisterNode) { // Test export event is written when a node is added with HandleRegisterNode gcs::GcsNodeManager node_manager( - gcs_publisher_, gcs_table_storage_, client_pool_, ClusterID::Nil()); + gcs_publisher_, gcs_table_storage_, client_pool_.get(), ClusterID::Nil()); auto node = Mocker::GenNodeInfo(); rpc::RegisterNodeRequest register_request; @@ -103,7 +103,7 @@ TEST_F(GcsNodeManagerExportAPITest, TestExportEventRegisterNode) { TEST_F(GcsNodeManagerExportAPITest, TestExportEventUnregisterNode) { // Test export event is written when a node is removed with HandleUnregisterNode gcs::GcsNodeManager node_manager( - gcs_publisher_, gcs_table_storage_, client_pool_, ClusterID::Nil()); + gcs_publisher_, gcs_table_storage_, client_pool_.get(), ClusterID::Nil()); auto node = Mocker::GenNodeInfo(); auto node_id = NodeID::FromBinary(node->node_id()); node_manager.AddNode(node); diff --git a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_mock_test.cc b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_mock_test.cc index 6beeb8b7504c..aca66ca39c09 100644 --- a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_mock_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_mock_test.cc @@ -42,7 +42,7 @@ class GcsActorSchedulerMockTest : public Test { std::make_unique(nullptr, nullptr, nullptr, ClusterID::Nil()); raylet_client = std::make_shared(); core_worker_client = std::make_shared(); - client_pool = std::make_shared( + client_pool = std::make_unique( [this](const rpc::Address &) { return raylet_client; }); local_node_id = NodeID::FromRandom(); auto cluster_resource_scheduler = std::make_shared( @@ -52,7 +52,7 @@ class GcsActorSchedulerMockTest : public Test { /*is_node_available_fn=*/ [](auto) { return true; }, /*is_local_node_with_raylet=*/false); - auto cluster_task_manager = std::make_shared( + cluster_task_manager = std::make_unique( local_node_id, cluster_resource_scheduler, /*get_node_info=*/ @@ -70,10 +70,10 @@ class GcsActorSchedulerMockTest : public Test { io_context, *actor_table, *gcs_node_manager, - cluster_task_manager, + *cluster_task_manager, [this](auto a, auto b, auto c) { schedule_failure_handler(a); }, [this](auto a, const rpc::PushTaskReply) { schedule_success_handler(a); }, - client_pool, + *client_pool, [this](const rpc::Address &) { return core_worker_client; }); auto node_info = std::make_shared(); node_info->set_state(rpc::GcsNodeInfo::ALIVE); @@ -82,14 +82,16 @@ class GcsActorSchedulerMockTest : public Test { worker_id = WorkerID::FromRandom(); gcs_node_manager->AddNode(node_info); } + std::shared_ptr raylet_client; instrumented_io_context io_context; std::shared_ptr store_client; std::unique_ptr actor_table; - std::unique_ptr actor_scheduler; std::unique_ptr gcs_node_manager; + std::unique_ptr cluster_task_manager; + std::unique_ptr actor_scheduler; std::shared_ptr core_worker_client; - std::shared_ptr client_pool; + std::unique_ptr client_pool; std::shared_ptr>> counter; MockCallback schedule_failure_handler; diff --git a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc index 48a0c0becbe9..6302ee02ed63 100644 --- a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc @@ -12,10 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include + #include // clang-format off -#include "gtest/gtest.h" #include "ray/common/asio/instrumented_io_context.h" #include "ray/gcs/gcs_server/gcs_actor_scheduler.h" #include "ray/gcs/gcs_server/test/gcs_server_test_util.h" @@ -39,7 +40,7 @@ class GcsActorSchedulerTest : public ::testing::Test { store_client_ = std::make_shared(io_service_); gcs_table_storage_ = std::make_shared(io_service_); gcs_node_manager_ = std::make_shared( - gcs_publisher_, gcs_table_storage_, raylet_client_pool_, ClusterID::Nil()); + gcs_publisher_, gcs_table_storage_, raylet_client_pool_.get(), ClusterID::Nil()); gcs_actor_table_ = std::make_shared(store_client_); local_node_id_ = NodeID::FromRandom(); @@ -73,7 +74,7 @@ class GcsActorSchedulerTest : public ::testing::Test { io_service_, *gcs_actor_table_, *gcs_node_manager_, - cluster_task_manager_, + *cluster_task_manager_, /*schedule_failure_handler=*/ [this](std::shared_ptr actor, const rpc::RequestWorkerLeaseReply::SchedulingFailureType failure_type, @@ -84,7 +85,7 @@ class GcsActorSchedulerTest : public ::testing::Test { [this](std::shared_ptr actor, const rpc::PushTaskReply &reply) { success_actors_.emplace_back(std::move(actor)); }, - raylet_client_pool_, + *raylet_client_pool_, /*client_factory=*/ [this](const rpc::Address &address) { return worker_client_; }, /*normal_task_resources_changed_callback=*/ diff --git a/src/ray/gcs/gcs_server/test/gcs_autoscaler_state_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_autoscaler_state_manager_test.cc index 856261b032e4..2f281fa31844 100644 --- a/src/ray/gcs/gcs_server/test/gcs_autoscaler_state_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_autoscaler_state_manager_test.cc @@ -61,7 +61,7 @@ class GcsAutoscalerStateManagerTest : public ::testing::Test { void SetUp() override { raylet_client_ = std::make_shared(); - client_pool_ = std::make_shared( + client_pool_ = std::make_unique( [this](const rpc::Address &) { return raylet_client_; }); cluster_resource_manager_ = std::make_unique(io_service_); gcs_node_manager_ = std::make_shared(); @@ -86,7 +86,7 @@ class GcsAutoscalerStateManagerTest : public ::testing::Test { *gcs_node_manager_, *gcs_actor_manager_, *gcs_placement_group_manager_, - client_pool_)); + *client_pool_)); } public: diff --git a/src/ray/gcs/gcs_server/test/gcs_node_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_node_manager_test.cc index fcc273aa4f9f..eb12d59dbdb3 100644 --- a/src/ray/gcs/gcs_server/test/gcs_node_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_node_manager_test.cc @@ -28,7 +28,7 @@ class GcsNodeManagerTest : public ::testing::Test { public: GcsNodeManagerTest() { raylet_client_ = std::make_shared(); - client_pool_ = std::make_shared( + client_pool_ = std::make_unique( [this](const rpc::Address &) { return raylet_client_; }); gcs_publisher_ = std::make_shared( std::make_unique()); @@ -37,13 +37,13 @@ class GcsNodeManagerTest : public ::testing::Test { protected: std::shared_ptr gcs_table_storage_; std::shared_ptr raylet_client_; - std::shared_ptr client_pool_; + std::unique_ptr client_pool_; std::shared_ptr gcs_publisher_; }; TEST_F(GcsNodeManagerTest, TestManagement) { gcs::GcsNodeManager node_manager( - gcs_publisher_, gcs_table_storage_, client_pool_, ClusterID::Nil()); + gcs_publisher_, gcs_table_storage_, client_pool_.get(), ClusterID::Nil()); // Test Add/Get/Remove functionality. auto node = Mocker::GenNodeInfo(); auto node_id = NodeID::FromBinary(node->node_id()); @@ -58,7 +58,7 @@ TEST_F(GcsNodeManagerTest, TestManagement) { TEST_F(GcsNodeManagerTest, TestListener) { gcs::GcsNodeManager node_manager( - gcs_publisher_, gcs_table_storage_, client_pool_, ClusterID::Nil()); + gcs_publisher_, gcs_table_storage_, client_pool_.get(), ClusterID::Nil()); // Test AddNodeAddedListener. int node_count = 1000; std::vector> added_nodes; diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_mock_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_mock_test.cc index 6cfd689ac168..1e3ef61060c8 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_mock_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_mock_test.cc @@ -47,7 +47,7 @@ class GcsPlacementGroupManagerMockTest : public Test { gcs_placement_group_manager_ = std::make_unique(io_context_, - gcs_placement_group_scheduler_, + gcs_placement_group_scheduler_.get(), gcs_table_storage_, *resource_manager_, [](auto &) { return ""; }); diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc index d407332d8021..ad808b644b67 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc @@ -89,7 +89,7 @@ class GcsPlacementGroupManagerTest : public ::testing::Test { io_service_, cluster_resource_manager_, *gcs_node_manager_, NodeID::FromRandom()); gcs_placement_group_manager_.reset(new gcs::GcsPlacementGroupManager( io_service_, - mock_placement_group_scheduler_, + mock_placement_group_scheduler_.get(), gcs_table_storage_, *gcs_resource_manager_, [this](const JobID &job_id) { return job_namespace_table_[job_id]; })); diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc index 8bbcaedefc63..093bdaf13fcc 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc @@ -55,21 +55,21 @@ class GcsPlacementGroupSchedulerTest : public ::testing::Test { [](auto) { return true; }, /*is_local_node_with_raylet=*/false); gcs_node_manager_ = std::make_shared( - gcs_publisher_, gcs_table_storage_, raylet_client_pool_, ClusterID::Nil()); + gcs_publisher_, gcs_table_storage_, raylet_client_pool_.get(), ClusterID::Nil()); gcs_resource_manager_ = std::make_shared( io_service_, cluster_resource_scheduler_->GetClusterResourceManager(), *gcs_node_manager_, local_node_id); store_client_ = std::make_shared(io_service_); - raylet_client_pool_ = std::make_shared( + raylet_client_pool_ = std::make_unique( [this](const rpc::Address &addr) { return raylet_clients_[addr.port()]; }); scheduler_ = std::make_shared( io_service_, gcs_table_storage_, *gcs_node_manager_, *cluster_resource_scheduler_, - raylet_client_pool_); + *raylet_client_pool_); counter_.reset(new CounterMap()); } @@ -296,7 +296,7 @@ class GcsPlacementGroupSchedulerTest : public ::testing::Test { ABSL_GUARDED_BY(placement_group_requests_mutex_); std::shared_ptr gcs_publisher_; std::shared_ptr gcs_table_storage_; - std::shared_ptr raylet_client_pool_; + std::unique_ptr raylet_client_pool_; std::shared_ptr> counter_; }; diff --git a/src/ray/raylet/scheduling/cluster_task_manager.h b/src/ray/raylet/scheduling/cluster_task_manager.h index 058c40f97fcf..752cbd6b3e98 100644 --- a/src/ray/raylet/scheduling/cluster_task_manager.h +++ b/src/ray/raylet/scheduling/cluster_task_manager.h @@ -161,6 +161,7 @@ class ClusterTaskManager : public ClusterTaskManagerInterface { const NodeID &self_node_id_; /// Responsible for resource tracking/view of the cluster. + /// TODO(hjiang): Use reference instead of shared pointer. std::shared_ptr cluster_resource_scheduler_; /// Function to get the node information of a given node id. From 3496c5b4b1176919722501ab1b39ce42dcc31638 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Mon, 25 Nov 2024 19:21:37 -0800 Subject: [PATCH 089/107] [data] use dask 2022.10.2 (#48898) 2022.10.1 is yanked Signed-off-by: Lonnie Liu --- python/requirements/ml/data-requirements.txt | 4 ++-- python/requirements_compiled.txt | 4 ++-- release/ray_release/byod/requirements_byod_3.9.txt | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/python/requirements/ml/data-requirements.txt b/python/requirements/ml/data-requirements.txt index 6e2baa5592fe..de91b5010c7c 100644 --- a/python/requirements/ml/data-requirements.txt +++ b/python/requirements/ml/data-requirements.txt @@ -1,8 +1,8 @@ # Used by CI for datasets and docs. # https://github.com/ray-project/ray/pull/29448#discussion_r1006256498 -dask[complete]==2022.10.1; python_version < '3.12' -distributed==2022.10.1; python_version < '3.12' +dask[complete]==2022.10.2; python_version < '3.12' +distributed==2022.10.2; python_version < '3.12' dask[complete]==2024.6.0; python_version >= '3.12' distributed==2024.6.0; python_version >= '3.12' aioboto3==11.2.0 diff --git a/python/requirements_compiled.txt b/python/requirements_compiled.txt index 9dd3caa3ecab..f3b39647b0e8 100644 --- a/python/requirements_compiled.txt +++ b/python/requirements_compiled.txt @@ -360,7 +360,7 @@ cython==0.29.37 # via # -r /ray/ci/../python/requirements/test-requirements.txt # gpy -dask==2022.10.1 ; python_version < "3.12" +dask==2022.10.2 ; python_version < "3.12" # via # -r /ray/ci/../python/requirements/ml/data-requirements.txt # distributed @@ -398,7 +398,7 @@ dill==0.3.7 # multiprocess distlib==0.3.7 # via virtualenv -distributed==2022.10.1 ; python_version < "3.12" +distributed==2022.10.2 ; python_version < "3.12" # via # -r /ray/ci/../python/requirements/ml/data-requirements.txt # dask diff --git a/release/ray_release/byod/requirements_byod_3.9.txt b/release/ray_release/byod/requirements_byod_3.9.txt index c5b98cebe695..f1dcf9ee13a8 100644 --- a/release/ray_release/byod/requirements_byod_3.9.txt +++ b/release/ray_release/byod/requirements_byod_3.9.txt @@ -657,9 +657,9 @@ cython==0.29.37 \ # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_byod_3.9.in -dask[complete]==2022.10.1 ; python_version < "3.12" \ - --hash=sha256:2e6765bb6011c97c59fd4792540df679c703100443fcd99c82b98d8697295822 \ - --hash=sha256:79d283326045700af0de7e2be57fd663499958c63638bf5076839cbcde64aa3f +dask[complete]==2022.10.2 ; python_version < "3.12" \ + --hash=sha256:42cb43f601709575fa46ce09e74bea83fdd464187024f56954e09d9b428ceaab \ + --hash=sha256:928003a97b890a14c8a09a01f15320d261053bda530a8bf191d84f33db4a63b8 # via # -c release/ray_release/byod/requirements_compiled.txt # -r release/ray_release/byod/requirements_byod_3.9.in @@ -680,9 +680,9 @@ diskcache==5.6.3 \ --hash=sha256:2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc \ --hash=sha256:5e31b2d5fbad117cc363ebaf6b689474db18a1f6438bc82358b024abd4c2ca19 # via petastorm -distributed==2022.10.1 ; python_version < "3.12" \ - --hash=sha256:31abab8ecc76951875828a3689d47dc4f20226b3ec99a0dc1af6183d02dbe5fe \ - --hash=sha256:42c6fe7d3bea491e23ce020879c411f2ecfecdb4914a6cb6b4a63530a7b0fa70 +distributed==2022.10.2 ; python_version < "3.12" \ + --hash=sha256:53f0a5bf6efab9a5ab3345cd913f6d3f3d4ea444ee2edbea331c7fef96fd67d0 \ + --hash=sha256:ae4fffdb55c6cb510ba1cbdf2856563af80ebf93e5ceacb91c1ce79e7da108d8 # via # -c release/ray_release/byod/requirements_compiled.txt # dask From 824c29fe0569750f4a73969d386df8943dcc3f3e Mon Sep 17 00:00:00 2001 From: Kai-Hsun Chen Date: Mon, 25 Nov 2024 20:47:23 -0800 Subject: [PATCH 090/107] [core][autoscaler] Remove unused deprecated flags for AutoscalerMonitor (#48919) Signed-off-by: kaihsun --- python/ray/autoscaler/_private/monitor.py | 10 ---------- python/ray/autoscaler/v2/monitor.py | 10 ---------- 2 files changed, 20 deletions(-) diff --git a/python/ray/autoscaler/_private/monitor.py b/python/ray/autoscaler/_private/monitor.py index a641652615df..90b5610f59ae 100644 --- a/python/ray/autoscaler/_private/monitor.py +++ b/python/ray/autoscaler/_private/monitor.py @@ -604,16 +604,6 @@ def log_resource_batch_data_if_desired( parser.add_argument( "--gcs-address", required=False, type=str, help="The address (ip:port) of GCS." ) - parser.add_argument( - "--redis-address", required=False, type=str, help="This is deprecated" - ) - parser.add_argument( - "--redis-password", - required=False, - type=str, - default=None, - help="This is deprecated", - ) parser.add_argument( "--autoscaling-config", required=False, diff --git a/python/ray/autoscaler/v2/monitor.py b/python/ray/autoscaler/v2/monitor.py index c9334244aa8c..558725f3e78f 100644 --- a/python/ray/autoscaler/v2/monitor.py +++ b/python/ray/autoscaler/v2/monitor.py @@ -198,16 +198,6 @@ def record_autoscaler_v2_usage(gcs_client: GcsClient) -> None: parser.add_argument( "--gcs-address", required=False, type=str, help="The address (ip:port) of GCS." ) - parser.add_argument( - "--redis-address", required=False, type=str, help="This is deprecated" - ) - parser.add_argument( - "--redis-password", - required=False, - type=str, - default=None, - help="This is deprecated", - ) parser.add_argument( "--autoscaling-config", required=False, From 104b2bce51933e1c9360c70461f5fea40663190a Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Mon, 25 Nov 2024 22:58:46 -0600 Subject: [PATCH 091/107] [CI] Add "defaults" feature for release test configs (#48880) The Ray Data release test config contains lots of duplicate configuration (e.g., the working directory and team name). To reduce the maintenance cost and improve readability, this PR adds a feature where you can specify defaults. --------- Signed-off-by: Balaji Veeramani --- release/ray_release/config.py | 11 ++++++++ release/ray_release/tests/test_config.py | 36 ++++++++++++++++++++++++ release/release_data_tests.yaml | 33 ++++------------------ 3 files changed, 53 insertions(+), 27 deletions(-) diff --git a/release/ray_release/config.py b/release/ray_release/config.py index 884292545f1d..c38fa4b2f7e4 100644 --- a/release/ray_release/config.py +++ b/release/ray_release/config.py @@ -81,11 +81,22 @@ def _test_definition_invariant( def parse_test_definition(test_definitions: List[TestDefinition]) -> List[Test]: + default_definition = {} tests = [] for test_definition in test_definitions: + if test_definition["name"] == "DEFAULTS": + default_definition = copy.deepcopy(test_definition) + continue + + # Add default values to the test definition. + test_definition = deep_update( + copy.deepcopy(default_definition), test_definition + ) + if "variations" not in test_definition: tests.append(Test(test_definition)) continue + variations = test_definition.pop("variations") _test_definition_invariant( test_definition, diff --git a/release/ray_release/tests/test_config.py b/release/ray_release/tests/test_config.py index 9b1f5adbb929..c7884b116880 100644 --- a/release/ray_release/tests/test_config.py +++ b/release/ray_release/tests/test_config.py @@ -93,6 +93,42 @@ def test_parse_test_definition(): parse_test_definition([invalid_test_definition]) +def test_parse_test_definition_with_defaults(): + test_definitions = yaml.safe_load( + """ + - name: DEFAULTS + working_dir: default_working_dir + - name: sample_test_with_default_working_dir + frequency: nightly + team: sample + cluster: + byod: + type: gpu + cluster_compute: compute.yaml + run: + timeout: 100 + script: python script.py + - name: sample_test_with_overridden_working_dir + working_dir: overridden_working_dir + frequency: nightly + team: sample + cluster: + byod: + type: gpu + cluster_compute: compute.yaml + run: + timeout: 100 + script: python script.py + """ + ) + test_with_default, test_with_override = parse_test_definition(test_definitions) + schema = load_schema_file() + assert not validate_test(test_with_default, schema) + assert not validate_test(test_with_override, schema) + assert test_with_default["working_dir"] == "default_working_dir" + assert test_with_override["working_dir"] == "overridden_working_dir" + + def test_schema_validation(): test = VALID_TEST.copy() diff --git a/release/release_data_tests.yaml b/release/release_data_tests.yaml index 752aaacb4fb0..1a7adae918be 100644 --- a/release/release_data_tests.yaml +++ b/release/release_data_tests.yaml @@ -1,8 +1,4 @@ -############### -# Reading tests -############### - -- name: read_parquet +- name: DEFAULTS group: data-tests working_dir: nightly_tests/dataset @@ -13,23 +9,17 @@ byod: type: gpu cluster_compute: multi_node_autoscaling_compute.yaml + +############### +# Reading tests +############### +- name: read_parquet run: timeout: 3600 script: python read_and_consume_benchmark.py s3://ray-benchmark-data/parquet/10TiB --format parquet --iterate - name: read_images - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: multi_node_autoscaling_compute.yaml - run: timeout: 3600 script: python read_and_consume_benchmark.py s3://air-example-data-2/300G-image-data-synthetic-raw --format image --iterate @@ -39,17 +29,6 @@ ############### - name: count_parquet - group: data-tests - working_dir: nightly_tests/dataset - - frequency: nightly - team: data - - cluster: - byod: - type: gpu - cluster_compute: multi_node_autoscaling_compute.yaml - run: timeout: 600 script: python read_and_consume_benchmark.py s3://ray-benchmark-data/parquet/10TiB --format parquet --count From 5e661d2adec29bf9b638ad2d795e41bb03b62977 Mon Sep 17 00:00:00 2001 From: Kai-Hsun Chen Date: Mon, 25 Nov 2024 21:25:45 -0800 Subject: [PATCH 092/107] [core] Make `include_log_monitor` configurable in the `ray start` command (#48891) Signed-off-by: kaihsun --- python/ray/scripts/scripts.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/python/ray/scripts/scripts.py b/python/ray/scripts/scripts.py index 1f26a483a7aa..eed702bb7438 100644 --- a/python/ray/scripts/scripts.py +++ b/python/ray/scripts/scripts.py @@ -622,6 +622,15 @@ def debug(address: str, verbose: bool): type=str, help="a JSON serialized dictionary mapping label name to label value.", ) +@click.option( + "--include-log-monitor", + default=None, + type=bool, + help="If set to True or left unset, a log monitor will start monitoring " + "the log files of all processes on this node and push their contents to GCS. " + "Only one log monitor should be started per physical host to avoid log " + "duplication on the driver process.", +) @add_click_logging_options @PublicAPI def start( @@ -668,6 +677,7 @@ def start( ray_debugger_external, disable_usage_stats, labels, + include_log_monitor, ): """Start Ray processes manually on the local machine.""" @@ -757,6 +767,7 @@ def start( no_monitor=no_monitor, tracing_startup_hook=tracing_startup_hook, ray_debugger_external=ray_debugger_external, + include_log_monitor=include_log_monitor, ) if ray_constants.RAY_START_HOOK in os.environ: From 58425b9ff03989d5a50cdfb59ac66742b512dab6 Mon Sep 17 00:00:00 2001 From: Mengjin Yan Date: Mon, 25 Nov 2024 21:36:51 -0800 Subject: [PATCH 093/107] [Core] Add task_name, task_function_name and actor_name in Structured Logging (#48703) Signed-off-by: Mengjin Yan --- python/ray/_private/ray_logging/constants.py | 3 + python/ray/_private/ray_logging/filters.py | 9 ++ python/ray/_private/worker.py | 8 ++ python/ray/_raylet.pyx | 55 +++++++- python/ray/includes/libcoreworker.pxd | 2 + python/ray/runtime_context.py | 124 ++++++++++++++++-- python/ray/serve/tests/test_logging.py | 12 ++ python/ray/tests/test_logging_2.py | 6 + python/ray/tests/test_runtime_context.py | 130 +++++++++++++++++++ src/ray/core_worker/core_worker.h | 13 ++ 10 files changed, 350 insertions(+), 12 deletions(-) diff --git a/python/ray/_private/ray_logging/constants.py b/python/ray/_private/ray_logging/constants.py index de84d510c16c..54552bdfe1d7 100644 --- a/python/ray/_private/ray_logging/constants.py +++ b/python/ray/_private/ray_logging/constants.py @@ -41,6 +41,9 @@ class LogKey(str, Enum): NODE_ID = "node_id" ACTOR_ID = "actor_id" TASK_ID = "task_id" + ACTOR_NAME = "actor_name" + TASK_NAME = "task_name" + TASK_FUNCTION_NAME = "task_func_name" # Logger built-in context ASCTIME = "asctime" diff --git a/python/ray/_private/ray_logging/filters.py b/python/ray/_private/ray_logging/filters.py index e7003022040a..91233a2b11c6 100644 --- a/python/ray/_private/ray_logging/filters.py +++ b/python/ray/_private/ray_logging/filters.py @@ -20,4 +20,13 @@ def filter(self, record): task_id = runtime_context.get_task_id() if task_id is not None: setattr(record, LogKey.TASK_ID.value, task_id) + task_name = runtime_context.get_task_name() + if task_name is not None: + setattr(record, LogKey.TASK_NAME.value, task_name) + task_function_name = runtime_context.get_task_function_name() + if task_function_name is not None: + setattr(record, LogKey.TASK_FUNCTION_NAME.value, task_function_name) + actor_name = runtime_context.get_actor_name() + if actor_name is not None: + setattr(record, LogKey.ACTOR_NAME.value, actor_name) return True diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index e62375c5e523..d2b0cf3b013d 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -531,6 +531,14 @@ def actor_name(self): def current_task_id(self): return self.core_worker.get_current_task_id() + @property + def current_task_name(self): + return self.core_worker.get_current_task_name() + + @property + def current_task_function_name(self): + return self.core_worker.get_current_task_function_name() + @property def current_node_id(self): return self.core_worker.get_current_node_id() diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 944adba8fc33..f3d93dce33ba 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -261,6 +261,9 @@ cdef optional[ObjectIDIndexType] NULL_PUT_INDEX = nullopt # https://docs.python.org/3/library/contextvars.html#contextvars.ContextVar # It is thread-safe. async_task_id = contextvars.ContextVar('async_task_id', default=None) +async_task_name = contextvars.ContextVar('async_task_name', default=None) +async_task_function_name = contextvars.ContextVar('async_task_function_name', + default=None) class DynamicObjectRefGenerator: @@ -1815,7 +1818,8 @@ cdef void execute_task( return core_worker.run_async_func_or_coro_in_event_loop( async_function, function_descriptor, name_of_concurrency_group_to_execute, task_id=task_id, - func_args=(actor, *arguments), func_kwargs=kwarguments) + task_name=task_name, func_args=(actor, *arguments), + func_kwargs=kwarguments) return function(actor, *arguments, **kwarguments) @@ -1927,7 +1931,8 @@ cdef void execute_task( execute_streaming_generator_async(context), function_descriptor, name_of_concurrency_group_to_execute, - task_id=task_id) + task_id=task_id, + task_name=task_name) else: execute_streaming_generator_sync(context) @@ -3415,6 +3420,48 @@ cdef class CoreWorker: with nogil: CCoreWorkerProcess.GetCoreWorker().Exit(c_exit_type, detail, null_ptr) + def get_current_task_name(self) -> str: + """Return the current task name. + + If it is a normal task, it returns the task name from the main thread. + If it is a threaded actor, it returns the task name for the current thread. + If it is async actor, it returns the task name stored in contextVar for + the current asyncio task. + """ + # We can only obtain the correct task name within asyncio task + # via async_task_name contextvar. We try this first. + # It is needed because the core worker's GetCurrentTask API + # doesn't have asyncio context, thus it cannot return the + # correct task name. + task_name = async_task_name.get() + if task_name is None: + # if it is not within asyncio context, fallback to TaskName + # obtainable from core worker. + task_name = CCoreWorkerProcess.GetCoreWorker().GetCurrentTaskName() \ + .decode("utf-8") + return task_name + + def get_current_task_function_name(self) -> str: + """Return the current task function. + + If it is a normal task, it returns the task function from the main thread. + If it is a threaded actor, it returns the task function for the current thread. + If it is async actor, it returns the task function stored in contextVar for + the current asyncio task. + """ + # We can only obtain the correct task function within asyncio task + # via async_task_function_name contextvar. We try this first. + # It is needed because the core Worker's GetCurrentTask API + # doesn't have asyncio context, thus it cannot return the + # correct task function. + task_function_name = async_task_function_name.get() + if task_function_name is None: + # if it is not within asyncio context, fallback to TaskName + # obtainable from core worker. + task_function_name = CCoreWorkerProcess.GetCoreWorker() \ + .GetCurrentTaskFunctionName().decode("utf-8") + return task_function_name + def get_current_task_id(self) -> TaskID: """Return the current task ID. @@ -4822,6 +4869,7 @@ cdef class CoreWorker: specified_cgname: str, *, task_id: Optional[TaskID] = None, + task_name: Optional[str] = None, func_args: Optional[Tuple] = None, func_kwargs: Optional[Dict] = None, ): @@ -4868,6 +4916,9 @@ cdef class CoreWorker: try: if task_id: async_task_id.set(task_id) + if task_name is not None: + async_task_name.set(task_name) + async_task_function_name.set(function_descriptor.repr) if inspect.isawaitable(func_or_coro): coroutine = func_or_coro diff --git a/python/ray/includes/libcoreworker.pxd b/python/ray/includes/libcoreworker.pxd index 242c5f10dd49..87f5d59a8583 100644 --- a/python/ray/includes/libcoreworker.pxd +++ b/python/ray/includes/libcoreworker.pxd @@ -191,6 +191,8 @@ cdef extern from "ray/core_worker/core_worker.h" nogil: CJobID GetCurrentJobId() CTaskID GetCurrentTaskId() + const c_string GetCurrentTaskName() + const c_string GetCurrentTaskFunctionName() void UpdateTaskIsDebuggerPaused( const CTaskID &task_id, const c_bool is_debugger_paused) diff --git a/python/ray/runtime_context.py b/python/ray/runtime_context.py index 78f7cee9502d..5cacae69371b 100644 --- a/python/ray/runtime_context.py +++ b/python/ray/runtime_context.py @@ -124,7 +124,7 @@ def get_worker_id(self) -> str: @property @Deprecated(message="Use get_task_id() instead", warning=True) def task_id(self): - """Get current task ID for this worker or driver. + """Get current task ID for this worker. Task ID is the id of a Ray task. This shouldn't be used in a driver process. @@ -155,7 +155,7 @@ def f(): Returns: The current worker's task id. None if there's no task id. """ - # only worker mode has actor_id + # only worker mode has task_id assert ( self.worker.mode == ray._private.worker.WORKER_MODE ), f"This method is only available when the process is a\ @@ -165,7 +165,7 @@ def f(): return task_id if not task_id.is_nil() else None def get_task_id(self) -> Optional[str]: - """Get current task ID for this worker or driver. + """Get current task ID for this worker. Task ID is the id of a Ray task. The ID will be in hex format. This shouldn't be used in a driver process. @@ -201,7 +201,7 @@ def get_task_id(): Returns: The current worker's task id in hex. None if there's no task id. """ - # only worker mode has actor_id + # only worker mode has task_id if self.worker.mode != ray._private.worker.WORKER_MODE: logger.warning( "This method is only available when the process is a " @@ -212,12 +212,116 @@ def get_task_id(): return task_id.hex() if not task_id.is_nil() else None def _get_current_task_id(self) -> TaskID: - async_task_id = ray._raylet.async_task_id.get() - if async_task_id is None: - task_id = self.worker.current_task_id - else: - task_id = async_task_id - return task_id + return self.worker.current_task_id + + def get_task_name(self) -> Optional[str]: + """Get current task name for this worker. + + Task name by default is the task's funciton call string. It can also be + specified in options when triggering a task. + + Example: + + .. testcode:: + + import ray + + @ray.remote + class Actor: + def get_task_name(self): + return ray.get_runtime_context().get_task_name() + + @ray.remote + class AsyncActor: + async def get_task_name(self): + return ray.get_runtime_context().get_task_name() + + @ray.remote + def get_task_name(): + return ray.get_runtime_context().get_task_name() + + a = Actor.remote() + b = AsyncActor.remote() + # Task names are available for actor tasks. + print(ray.get(a.get_task_name.remote())) + # Task names are avaiable for async actor tasks. + print(ray.get(b.get_task_name.remote())) + # Task names are available for normal tasks. + # Get default task name + print(ray.get(get_task_name.remote())) + # Get specified task name + print(ray.get(get_task_name.options(name="task_name").remote())) + + .. testoutput:: + :options: +MOCK + + Actor.get_task_name + AsyncActor.get_task_name + get_task_name + task_nams + + Returns: + The current worker's task name + """ + # only worker mode has task_name + if self.worker.mode != ray._private.worker.WORKER_MODE: + logger.warning( + "This method is only available when the process is a " + f"worker. Current mode: {self.worker.mode}" + ) + return None + return self.worker.current_task_name + + def get_task_function_name(self) -> Optional[str]: + """Get current task function name string for this worker. + + Example: + + .. testcode:: + + import ray + + @ray.remote + class Actor: + def get_task_function_name(self): + return ray.get_runtime_context().get_task_function_name() + + @ray.remote + class AsyncActor: + async def get_task_function_name(self): + return ray.get_runtime_context().get_task_function_name() + + @ray.remote + def get_task_function_name(): + return ray.get_runtime_context().get_task_function_name() + + a = Actor.remote() + b = AsyncActor.remote() + # Task functions are available for actor tasks. + print(ray.get(a.get_task_function_name.remote())) + # Task functions are available for async actor tasks. + print(ray.get(b.get_task_function_name.remote())) + # Task functions are available for normal tasks. + print(ray.get(get_task_function_name.remote())) + + .. testoutput:: + :options: +MOCK + + [python modual name].Actor.get_task_function_name + [python modual name].AsyncActor.get_task_function_name + [python modual name].get_task_function_name + + Returns: + The current worker's task function call string + """ + # only worker mode has task_function_name + if self.worker.mode != ray._private.worker.WORKER_MODE: + logger.warning( + "This method is only available when the process is a " + f"worker. Current mode: {self.worker.mode}" + ) + return None + return self.worker.current_task_function_name @property @Deprecated(message="Use get_actor_id() instead", warning=True) diff --git a/python/ray/serve/tests/test_logging.py b/python/ray/serve/tests/test_logging.py index 061dafd75fe0..24f9a47a01a1 100644 --- a/python/ray/serve/tests/test_logging.py +++ b/python/ray/serve/tests/test_logging.py @@ -351,6 +351,9 @@ def fn(*args): "actor_id": ray.get_runtime_context().get_actor_id(), "worker_id": ray.get_runtime_context().get_worker_id(), "node_id": ray.get_runtime_context().get_node_id(), + "task_name": ray.get_runtime_context().get_task_name(), + "task_func_name": ray.get_runtime_context().get_task_function_name(), + "actor_name": ray.get_runtime_context().get_actor_name(), } @serve.deployment( @@ -369,6 +372,9 @@ def __call__(self, req: starlette.requests.Request): "actor_id": ray.get_runtime_context().get_actor_id(), "worker_id": ray.get_runtime_context().get_worker_id(), "node_id": ray.get_runtime_context().get_node_id(), + "task_name": ray.get_runtime_context().get_task_name(), + "task_func_name": ray.get_runtime_context().get_task_function_name(), + "actor_name": ray.get_runtime_context().get_actor_name(), } serve.run(fn.bind(), name="app1", route_prefix="/fn") @@ -418,6 +424,9 @@ def check_log(): f'"worker_id": "{resp["worker_id"]}", ' f'"node_id": "{resp["node_id"]}", ' f'"actor_id": "{resp["actor_id"]}", ' + f'"task_name": "{resp["task_name"]}", ' + f'"task_func_name": "{resp["task_func_name"]}", ' + f'"actor_name": "{resp["actor_name"]}", ' f'"deployment": "{resp["app_name"]}_fn", ' f'"replica": "{method_replica_id}", ' f'"component_name": "replica".*' @@ -430,6 +439,9 @@ def check_log(): f'"worker_id": "{resp2["worker_id"]}", ' f'"node_id": "{resp2["node_id"]}", ' f'"actor_id": "{resp2["actor_id"]}", ' + f'"task_name": "{resp2["task_name"]}", ' + f'"task_func_name": "{resp2["task_func_name"]}", ' + f'"actor_name": "{resp2["actor_name"]}", ' f'"deployment": "{resp2["app_name"]}_Model", ' f'"replica": "{class_method_replica_id}", ' f'"component_name": "replica".*' diff --git a/python/ray/tests/test_logging_2.py b/python/ray/tests/test_logging_2.py index b48b04e44a59..9d5be165f9ac 100644 --- a/python/ray/tests/test_logging_2.py +++ b/python/ray/tests/test_logging_2.py @@ -54,11 +54,14 @@ def f(): "worker_id": runtime_context.get_worker_id(), "node_id": runtime_context.get_node_id(), "task_id": runtime_context.get_task_id(), + "task_name": runtime_context.get_task_name(), + "task_func_name": runtime_context.get_task_function_name(), } for attr in should_exist: assert hasattr(record, attr) assert getattr(record, attr) == expected_values[attr] assert not hasattr(record, "actor_id") + assert not hasattr(record, "actor_name") obj_ref = f.remote() ray.get(obj_ref) @@ -77,7 +80,10 @@ def f(self): "worker_id": runtime_context.get_worker_id(), "node_id": runtime_context.get_node_id(), "actor_id": runtime_context.get_actor_id(), + "actor_name": runtime_context.get_actor_name(), "task_id": runtime_context.get_task_id(), + "task_name": runtime_context.get_task_name(), + "task_func_name": runtime_context.get_task_function_name(), } for attr in should_exist: assert hasattr(record, attr) diff --git a/python/ray/tests/test_runtime_context.py b/python/ray/tests/test_runtime_context.py index 3835a7e7eb3a..23bb39c6ab4d 100644 --- a/python/ray/tests/test_runtime_context.py +++ b/python/ray/tests/test_runtime_context.py @@ -264,6 +264,136 @@ def test_auto_init(shutdown_only): assert ray.is_initialized() +def test_get_task_name(shutdown_only): + ray.init() + + # for a normal task + @ray.remote + def get_task_name_for_normal_task(): + return ray.get_runtime_context().get_task_name() + + expected_task_name = "normal_task_name" + task_name = ray.get( + get_task_name_for_normal_task.options(name=expected_task_name).remote() + ) + assert ( + task_name == expected_task_name + ), f"Check normal task name failed. expected={expected_task_name}, \ +actual={task_name}" + + # for an actor task + @ray.remote + class Actor: + def get_task_name_for_actor_task(self): + return ray.get_runtime_context().get_task_name() + + expected_task_name = "Actor.get_task_name_for_actor_task" + actor = Actor.remote() + task_name = ray.get(actor.get_task_name_for_actor_task.remote()) + assert ( + task_name == expected_task_name + ), f"Check actor task name failed. expected={expected_task_name}, \ +actual={task_name}" + + # for a threaded actor task + @ray.remote + class ThreadedActor: + def get_task_name_for_threaded_actor_task(self): + return ray.get_runtime_context().get_task_name() + + expected_task_name = "ThreadedActor.get_task_name_for_threaded_actor_task" + threaded_actor = ThreadedActor.options(max_concurrency=2).remote() + task_name = ray.get(threaded_actor.get_task_name_for_threaded_actor_task.remote()) + assert ( + task_name == expected_task_name + ), f"Check actor task name failed. expected={expected_task_name}, \ +actual={task_name}" + + # for a async actor task + @ray.remote + class AsyncActor: + async def get_task_name_for_async_actor_task(self): + return ray.get_runtime_context().get_task_name() + + expected_task_name = "AsyncActor.get_task_name_for_async_actor_task" + async_actor = AsyncActor.remote() + task_name = ray.get(async_actor.get_task_name_for_async_actor_task.remote()) + assert ( + task_name == expected_task_name + ), f"Check actor task name failed. expected={expected_task_name}, \ +actual={task_name}" + + +def test_get_task_function_name(shutdown_only): + ray.init() + + # for a normal task + @ray.remote + def get_task_function_name_for_normal_task(): + return ray.get_runtime_context().get_task_function_name() + + expected_task_function_name = __name__ + ".get_task_function_name_for_normal_task" + task_function_name = ray.get(get_task_function_name_for_normal_task.remote()) + assert ( + task_function_name == expected_task_function_name + ), f"Check normal task function failed. expected={expected_task_function_name}, \ +actual={task_function_name}" + + # for an actor task + @ray.remote + class Actor: + def get_task_function_name_for_actor_task(self): + return ray.get_runtime_context().get_task_function_name() + + expected_task_function_name = ( + __name__ + ".Actor.get_task_function_name_for_actor_task" + ) + actor = Actor.remote() + task_function_name = ray.get(actor.get_task_function_name_for_actor_task.remote()) + assert ( + task_function_name == expected_task_function_name + ), f"Check actor task function failed. expected={expected_task_function_name}, \ +actual={task_function_name}" + + # for a threaded actor task + @ray.remote + class ThreadedActor: + def get_task_function_name_for_threaded_actor_task(self): + return ray.get_runtime_context().get_task_function_name() + + expected_task_function_name = ( + __name__ + ".ThreadedActor.get_task_function_name_for_threaded_actor_task" + ) + threaded_actor = ThreadedActor.options(max_concurrency=2).remote() + task_function_name = ray.get( + threaded_actor.get_task_function_name_for_threaded_actor_task.remote() + ) + assert ( + task_function_name == expected_task_function_name + ), f"Check actor task function failed. expected={expected_task_function_name}, \ +actual={task_function_name}" + + # for a async actor task + @ray.remote + class AsyncActor: + async def get_task_function_name_for_async_actor_task(self): + return ray.get_runtime_context().get_task_function_name() + + expected_task_function_name = ( + __name__ + + ".test_get_task_function_name..AsyncActor.\ +get_task_function_name_for_async_actor_task" + ) + async_actor = AsyncActor.remote() + task_function_name = ray.get( + async_actor.get_task_function_name_for_async_actor_task.remote() + ) + assert ( + task_function_name == expected_task_function_name + ), f"Check actor task function failed. expected={expected_task_function_name}, \ +actual={task_function_name}" + + def test_async_actor_task_id(shutdown_only): ray.init() diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index bdf7c13f15d5..4ab8a5cd0f3c 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -227,6 +227,19 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { const TaskID &GetCurrentTaskId() const { return worker_context_.GetCurrentTaskID(); } + const std::string GetCurrentTaskName() const { + return worker_context_.GetCurrentTask() != nullptr + ? worker_context_.GetCurrentTask()->GetName() + : ""; + } + + const std::string GetCurrentTaskFunctionName() const { + return (worker_context_.GetCurrentTask() != nullptr && + worker_context_.GetCurrentTask()->FunctionDescriptor() != nullptr) + ? worker_context_.GetCurrentTask()->FunctionDescriptor()->CallSiteString() + : ""; + } + /// Controls the is debugger paused flag. /// /// \param task_id The task id of the task to update. From 49e3061e356ed6e81e3a31547deb3e4028023b54 Mon Sep 17 00:00:00 2001 From: dentiny Date: Tue, 26 Nov 2024 09:13:04 -0800 Subject: [PATCH 094/107] [core] Fix process start type annotation (#48930) Signed-off-by: hjiang --- python/ray/_private/services.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/ray/_private/services.py b/python/ray/_private/services.py index 0de0ba78405f..23461e950286 100644 --- a/python/ray/_private/services.py +++ b/python/ray/_private/services.py @@ -834,8 +834,8 @@ def start_ray_process( use_valgrind_profiler: bool = False, use_perftools_profiler: bool = False, use_tmux: bool = False, - stdout_file: Optional[str] = None, - stderr_file: Optional[str] = None, + stdout_file: Optional[IO[AnyStr]] = None, + stderr_file: Optional[IO[AnyStr]] = None, pipe_stdin: bool = False, ): """Start one of the Ray processes. @@ -1443,8 +1443,8 @@ def start_gcs_server( redis_address: str, log_dir: str, session_name: str, - stdout_file: Optional[str] = None, - stderr_file: Optional[str] = None, + stdout_file: Optional[IO[AnyStr]] = None, + stderr_file: Optional[IO[AnyStr]] = None, redis_password: Optional[str] = None, config: Optional[dict] = None, fate_share: Optional[bool] = None, From 0f2c62c09c6e57cfd5afbda9f8b72205fec2dea7 Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Tue, 26 Nov 2024 11:14:02 -0600 Subject: [PATCH 095/107] [serve] Add FF to run sync methods in a threadpool (#48897) ## Why are these changes needed? Adds a feature flag to run sync user-defined methods in a threadpool by default. This matches the existing behavior when using a FastAPI ingress. This should address a lot of user confusion and make it easier to write performant code by default. For example, just sticking a torch model call in a sync method will now provide reasonable performance out of the box. However, there may be some existing user code that is not thread safe, so we need to do a gentle migration. This PR introduces the behavior behind a feature flag and warns users about the upcoming change and how to opt into the new behavior or maintain existing behavior once it does (just adding `async def` will do it). I've opted to set the max thread pool size to `max_ongoing_requests`, which seems like a reasonable policy. If needed we can add a user-facing API for this in the future. TODO before merging: - [x] Get it working for sync generators. - [x] Add warning for default change (people can keep behavior by changing to async def). - [x] Add/update UserCallableWrapper tests. - [x] Add/update some integration tests (verify that request context is set correctly!). - [x] Set maximum thread pool size. ## Related issue number Closes https://github.com/ray-project/ray/issues/44354 Closes https://github.com/ray-project/ray/issues/44403 Closes https://github.com/ray-project/ray/issues/48903 --------- Signed-off-by: Edward Oakes --- python/ray/serve/_private/constants.py | 14 ++ .../ray/serve/_private/local_testing_mode.py | 6 +- python/ray/serve/_private/replica.py | 145 +++++++++++++++--- python/ray/serve/tests/BUILD | 22 +++ .../serve/tests/test_replica_sync_methods.py | 127 +++++++++++++++ .../tests/unit/test_user_callable_wrapper.py | 101 +++++++++--- 6 files changed, 374 insertions(+), 41 deletions(-) create mode 100644 python/ray/serve/tests/test_replica_sync_methods.py diff --git a/python/ray/serve/_private/constants.py b/python/ray/serve/_private/constants.py index f4c2adac37d1..6e45bae19ceb 100644 --- a/python/ray/serve/_private/constants.py +++ b/python/ray/serve/_private/constants.py @@ -355,3 +355,17 @@ RAY_SERVE_FORCE_LOCAL_TESTING_MODE = ( os.environ.get("RAY_SERVE_FORCE_LOCAL_TESTING_MODE", "0") == "1" ) + +# Run sync methods defined in the replica in a thread pool by default. +RAY_SERVE_RUN_SYNC_IN_THREADPOOL = ( + os.environ.get("RAY_SERVE_RUN_SYNC_IN_THREADPOOL", "0") == "1" +) + +RAY_SERVE_RUN_SYNC_IN_THREADPOOL_WARNING = ( + "Calling sync method '{method_name}' directly on the " + "asyncio loop. In a future version, sync methods will be run in a " + "threadpool by default. Ensure your sync methods are thread safe " + "or keep the existing behavior by making them `async def`. Opt " + "into the new behavior by setting " + "RAY_SERVE_RUN_SYNC_IN_THREADPOOL=1." +) diff --git a/python/ray/serve/_private/local_testing_mode.py b/python/ray/serve/_private/local_testing_mode.py index 6ccc16cd3628..84525625a938 100644 --- a/python/ray/serve/_private/local_testing_mode.py +++ b/python/ray/serve/_private/local_testing_mode.py @@ -10,7 +10,10 @@ import ray from ray import cloudpickle from ray.serve._private.common import DeploymentID, RequestMetadata -from ray.serve._private.constants import SERVE_LOGGER_NAME +from ray.serve._private.constants import ( + RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + SERVE_LOGGER_NAME, +) from ray.serve._private.replica import UserCallableWrapper from ray.serve._private.replica_result import ReplicaResult from ray.serve._private.router import Router @@ -66,6 +69,7 @@ def make_local_deployment_handle( deployment.init_args, deployment.init_kwargs, deployment_id=deployment_id, + run_sync_methods_in_threadpool=RAY_SERVE_RUN_SYNC_IN_THREADPOOL, ) try: logger.info(f"Initializing local replica class for {deployment_id}.") diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index 079e3039d5d0..23fc7d237f94 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -1,5 +1,6 @@ import asyncio import concurrent.futures +import functools import inspect import logging import os @@ -7,9 +8,9 @@ import threading import time import traceback +import warnings from abc import ABC, abstractmethod from contextlib import contextmanager -from functools import wraps from importlib import import_module from typing import ( Any, @@ -23,6 +24,7 @@ ) import starlette.responses +from anyio import to_thread from starlette.types import ASGIApp, Message import ray @@ -47,6 +49,8 @@ HEALTH_CHECK_METHOD, RAY_SERVE_COLLECT_AUTOSCALING_METRICS_ON_HANDLE, RAY_SERVE_REPLICA_AUTOSCALING_METRIC_RECORD_PERIOD_S, + RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + RAY_SERVE_RUN_SYNC_IN_THREADPOOL_WARNING, RECONFIGURE_METHOD, SERVE_CONTROLLER_NAME, SERVE_LOGGER_NAME, @@ -274,6 +278,7 @@ def __init__( init_args, init_kwargs, deployment_id=self._deployment_id, + run_sync_methods_in_threadpool=RAY_SERVE_RUN_SYNC_IN_THREADPOOL, ) # Guards against calling the user's callable constructor multiple times. @@ -602,6 +607,11 @@ async def initialize(self, deployment_config: DeploymentConfig): self._user_callable_initialized = True if deployment_config: + await asyncio.wrap_future( + self._user_callable_wrapper.set_sync_method_threadpool_limit( + deployment_config.max_ongoing_requests + ) + ) await asyncio.wrap_future( self._user_callable_wrapper.call_reconfigure( deployment_config.user_config @@ -635,6 +645,11 @@ async def reconfigure(self, deployment_config: DeploymentConfig): if logging_config_changed: self._configure_logger_and_profilers(deployment_config.logging_config) + await asyncio.wrap_future( + self._user_callable_wrapper.set_sync_method_threadpool_limit( + deployment_config.max_ongoing_requests + ) + ) if user_config_changed: await asyncio.wrap_future( self._user_callable_wrapper.call_reconfigure( @@ -990,6 +1005,7 @@ def __init__( init_kwargs: Dict, *, deployment_id: DeploymentID, + run_sync_methods_in_threadpool: bool, ): if not (inspect.isfunction(deployment_def) or inspect.isclass(deployment_def)): raise TypeError( @@ -1003,6 +1019,8 @@ def __init__( self._is_function = inspect.isfunction(deployment_def) self._deployment_id = deployment_id self._destructor_called = False + self._run_sync_methods_in_threadpool = run_sync_methods_in_threadpool + self._warned_about_sync_method_change = False # Will be populated in `initialize_callable`. self._callable = None @@ -1033,7 +1051,7 @@ def _run_on_user_code_event_loop(f: Callable) -> Callable: f ), "_run_on_user_code_event_loop can only be used on coroutine functions." - @wraps(f) + @functools.wraps(f) def wrapper(self, *args, **kwargs) -> concurrent.futures.Future: return asyncio.run_coroutine_threadsafe( f(self, *args, **kwargs), @@ -1042,6 +1060,12 @@ def wrapper(self, *args, **kwargs) -> concurrent.futures.Future: return wrapper + @_run_on_user_code_event_loop + async def set_sync_method_threadpool_limit(self, limit: int): + # NOTE(edoakes): the limit is thread local, so this must + # be run on the user code event loop. + to_thread.current_default_thread_limiter().total_tokens = limit + def _get_user_callable_method(self, method_name: str) -> Callable: if self._is_function: return self._callable @@ -1082,17 +1106,89 @@ async def _send_user_result_over_asgi( else: await Response(result).send(scope, receive, send) - async def _call_func_or_gen(self, callable: Callable, *args, **kwargs) -> Any: + async def _call_func_or_gen( + self, + callable: Callable, + *, + args: Optional[Tuple[Any]] = None, + kwargs: Optional[Dict[str, Any]] = None, + request_metadata: Optional[RequestMetadata] = None, + generator_result_callback: Optional[Callable] = None, + run_sync_methods_in_threadpool_override: Optional[bool] = None, + ) -> Tuple[Any, bool]: """Call the callable with the provided arguments. This is a convenience wrapper that will work for `def`, `async def`, generator, and async generator functions. + + Returns the result and a boolean indicating if the result was a sync generator + that has already been consumed. """ - result = callable(*args, **kwargs) - if inspect.iscoroutine(result): - result = await result + sync_gen_consumed = False + args = args if args is not None else tuple() + kwargs = kwargs if kwargs is not None else dict() + run_sync_in_threadpool = ( + self._run_sync_methods_in_threadpool + if run_sync_methods_in_threadpool_override is None + else run_sync_methods_in_threadpool_override + ) + is_sync_method = ( + inspect.isfunction(callable) or inspect.ismethod(callable) + ) and not ( + inspect.iscoroutinefunction(callable) + or inspect.isasyncgenfunction(callable) + ) - return result + if is_sync_method and run_sync_in_threadpool: + is_generator = inspect.isgeneratorfunction(callable) + if is_generator: + sync_gen_consumed = True + if request_metadata and not request_metadata.is_streaming: + # TODO(edoakes): make this check less redundant with the one in + # _handle_user_method_result. + raise TypeError( + f"Method '{callable.__name__}' returned a generator. " + "You must use `handle.options(stream=True)` to call " + "generators on a deployment." + ) + + def run_callable(): + result = callable(*args, **kwargs) + if is_generator: + for r in result: + # TODO(edoakes): make this less redundant with the handling in + # _handle_user_method_result. + if request_metadata and request_metadata.is_grpc_request: + r = (request_metadata.grpc_context, r.SerializeToString()) + generator_result_callback(r) + + result = None + + return result + + # NOTE(edoakes): we use anyio.to_thread here because it's what Starlette + # uses (and therefore FastAPI too). The max size of the threadpool is + # set to max_ongoing_requests in the replica wrapper. + # anyio.to_thread propagates ContextVars to the worker thread automatically. + result = await to_thread.run_sync(run_callable) + else: + if ( + is_sync_method + and not self._warned_about_sync_method_change + and run_sync_methods_in_threadpool_override is None + ): + self._warned_about_sync_method_change = True + warnings.warn( + RAY_SERVE_RUN_SYNC_IN_THREADPOOL_WARNING.format( + method_name=callable.__name__, + ) + ) + + result = callable(*args, **kwargs) + if inspect.iscoroutine(result): + result = await result + + return result, sync_gen_consumed @property def user_callable(self) -> Optional[Callable]: @@ -1129,8 +1225,10 @@ async def initialize_callable(self) -> Optional[ASGIApp]: self._callable = self._deployment_def.__new__(self._deployment_def) await self._call_func_or_gen( self._callable.__init__, - *self._init_args, - **self._init_kwargs, + args=self._init_args, + kwargs=self._init_kwargs, + # Always run the constructor on the main user code thread. + run_sync_methods_in_threadpool_override=False, ) if isinstance(self._callable, ASGIAppReplicaWrapper): @@ -1192,7 +1290,7 @@ async def call_reconfigure(self, user_config: Any): ) await self._call_func_or_gen( getattr(self._callable, RECONFIGURE_METHOD), - user_config, + args=(user_config,), ) def _prepare_args_for_http_request( @@ -1264,6 +1362,7 @@ async def _handle_user_method_result( user_method_name: str, request_metadata: RequestMetadata, *, + sync_gen_consumed: bool, generator_result_callback: Optional[Callable], is_asgi_app: bool, asgi_args: Optional[ASGIArgs], @@ -1297,7 +1396,7 @@ async def _handle_user_method_result( # For the FastAPI codepath, the response has already been sent over # ASGI, but for the vanilla deployment codepath we need to send it. await self._send_user_result_over_asgi(result, asgi_args) - elif not request_metadata.is_http_request: + elif not request_metadata.is_http_request and not sync_gen_consumed: # If a unary method is called with stream=True for anything EXCEPT # an HTTP request, raise an error. # HTTP requests are always streaming regardless of if the method @@ -1382,12 +1481,20 @@ async def call_user_method( request_args[0], request_metadata, user_method_params ) - result = await self._handle_user_method_result( - await self._call_func_or_gen( - user_method, *request_args, **request_kwargs - ), + result, sync_gen_consumed = await self._call_func_or_gen( + user_method, + args=request_args, + kwargs=request_kwargs, + request_metadata=request_metadata, + generator_result_callback=generator_result_callback + if request_metadata.is_streaming + else None, + ) + return await self._handle_user_method_result( + result, user_method_name, request_metadata, + sync_gen_consumed=sync_gen_consumed, generator_result_callback=generator_result_callback, is_asgi_app=is_asgi_app, asgi_args=asgi_args, @@ -1412,8 +1519,6 @@ async def call_user_method( if receive_task is not None and not receive_task.done(): receive_task.cancel() - return result - @_run_on_user_code_event_loop async def call_destructor(self): """Explicitly call the `__del__` method of the user callable. @@ -1437,7 +1542,11 @@ async def call_destructor(self): try: if hasattr(self._callable, "__del__"): # Make sure to accept `async def __del__(self)` as well. - await self._call_func_or_gen(self._callable.__del__) + await self._call_func_or_gen( + self._callable.__del__, + # Always run the destructor on the main user callable thread. + run_sync_methods_in_threadpool_override=False, + ) if hasattr(self._callable, "__serve_multiplex_wrapper"): await getattr(self._callable, "__serve_multiplex_wrapper").shutdown() diff --git a/python/ray/serve/tests/BUILD b/python/ray/serve/tests/BUILD index 369b9a339c6f..9f3208084538 100644 --- a/python/ray/serve/tests/BUILD +++ b/python/ray/serve/tests/BUILD @@ -467,3 +467,25 @@ py_test_module_list( "//python/ray/serve:serve_lib", ], ) + + +# Test currently off-by-default behavior to run replica sync methods in a threadpool. +# TODO(edoakes): remove this once the FF is flipped on by default. +py_test_module_list( + size = "small", + env = {"RAY_SERVE_RUN_SYNC_IN_THREADPOOL": "1"}, + files = [ + "test_replica_sync_methods.py", + ], + name_suffix = "_with_run_sync_in_threadpool", + tags = [ + "exclusive", + "no_windows", + "team:serve", + ], + deps = [ + ":common", + ":conftest", + "//python/ray/serve:serve_lib", + ], +) diff --git a/python/ray/serve/tests/test_replica_sync_methods.py b/python/ray/serve/tests/test_replica_sync_methods.py new file mode 100644 index 000000000000..d6485704138f --- /dev/null +++ b/python/ray/serve/tests/test_replica_sync_methods.py @@ -0,0 +1,127 @@ +import asyncio +import sys + +import pytest +import requests +from anyio import to_thread +from fastapi import FastAPI +from starlette.responses import PlainTextResponse + +import ray +from ray import serve +from ray._private.test_utils import SignalActor, wait_for_condition +from ray.serve._private.constants import RAY_SERVE_RUN_SYNC_IN_THREADPOOL + + +@pytest.mark.skipif( + not RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + reason="Run sync method in threadpool FF disabled.", +) +@pytest.mark.parametrize("use_fastapi", [False, True]) +def test_not_running_in_asyncio_loop(serve_instance, use_fastapi: bool): + if use_fastapi: + fastapi_app = FastAPI() + + @serve.deployment + @serve.ingress(fastapi_app) + class D: + @fastapi_app.get("/") + def root(self): + with pytest.raises(RuntimeError, match="no running event loop"): + asyncio.get_running_loop() + + else: + + @serve.deployment + class D: + def __call__(self) -> str: + with pytest.raises(RuntimeError, match="no running event loop"): + asyncio.get_running_loop() + + serve.run(D.bind()) + # Would error if the check fails. + requests.get("http://localhost:8000/").raise_for_status() + + +@pytest.mark.skipif( + not RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + reason="Run sync method in threadpool FF disabled.", +) +def test_concurrent_execution(serve_instance): + signal_actor = SignalActor.remote() + + @serve.deployment + class D: + def do_sync(self): + ray.get(signal_actor.wait.remote()) + + async def do_async(self): + await signal_actor.wait.remote() + + h = serve.run(D.bind()) + + sync_results = [h.do_sync.remote(), h.do_sync.remote()] + async_results = [h.do_async.remote(), h.do_async.remote()] + + wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 4) + ray.get(signal_actor.send.remote()) + [r.result() for r in sync_results + async_results] + + +@pytest.mark.skipif( + not RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + reason="Run sync method in threadpool FF disabled.", +) +@pytest.mark.parametrize("use_fastapi", [False, True]) +def test_context_vars_propagated(serve_instance, use_fastapi: bool): + if use_fastapi: + fastapi_app = FastAPI() + + @serve.deployment + @serve.ingress(fastapi_app) + class D: + @fastapi_app.get("/") + def root(self): + return PlainTextResponse( + serve.context._serve_request_context.get().request_id + ) + + else: + + @serve.deployment + class D: + def __call__(self) -> str: + return PlainTextResponse( + serve.context._serve_request_context.get().request_id + ) + + serve.run(D.bind()) + + r = requests.get("http://localhost:8000/", headers={"X-Request-Id": "TEST-ID"}) + r.raise_for_status() + # If context vars weren't propagated, the request ID would be empty. + assert r.text == "TEST-ID" + + +@pytest.mark.skipif( + not RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + reason="Run sync method in threadpool FF disabled.", +) +def test_thread_limit_set_to_max_ongoing_requests(serve_instance): + @serve.deployment + class D: + async def __call__(self): + return to_thread.current_default_thread_limiter().total_tokens + + h = serve.run(D.bind()) + + # Check that it's set if max_ongoing_requests is defaulted. + assert h.remote().result() == 5 + + # Update to a custom value, check again. + h = serve.run(D.options(max_ongoing_requests=10).bind()) + assert h.remote().result() == 10 + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", "-s", __file__])) diff --git a/python/ray/serve/tests/unit/test_user_callable_wrapper.py b/python/ray/serve/tests/unit/test_user_callable_wrapper.py index b03c9ca7e39e..39188f32c421 100644 --- a/python/ray/serve/tests/unit/test_user_callable_wrapper.py +++ b/python/ray/serve/tests/unit/test_user_callable_wrapper.py @@ -4,7 +4,7 @@ import sys import threading from dataclasses import dataclass -from typing import AsyncGenerator, Callable, Generator, Optional +from typing import Any, AsyncGenerator, Callable, Dict, Generator, Optional, Tuple import pytest from fastapi import FastAPI @@ -90,13 +90,18 @@ async def basic_async_generator(n: int, raise_exception: bool = False): def _make_user_callable_wrapper( - callable: Optional[Callable] = None, *init_args, **init_kwargs + callable: Optional[Callable] = None, + *, + init_args: Optional[Tuple[Any]] = None, + init_kwargs: Optional[Dict[str, Any]] = None, + run_sync_methods_in_threadpool: bool = False, ) -> UserCallableWrapper: return UserCallableWrapper( callable if callable is not None else BasicClass, - init_args, - init_kwargs, + init_args or tuple(), + init_kwargs or dict(), deployment_id=DeploymentID(name="test_name"), + run_sync_methods_in_threadpool=run_sync_methods_in_threadpool, ) @@ -144,8 +149,11 @@ def test_calling_methods_before_initialize(): user_callable_wrapper.call_reconfigure(None).result() -def test_basic_class_callable(): - user_callable_wrapper = _make_user_callable_wrapper() +@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True]) +def test_basic_class_callable(run_sync_methods_in_threadpool: bool): + user_callable_wrapper = _make_user_callable_wrapper( + run_sync_methods_in_threadpool=run_sync_methods_in_threadpool + ) user_callable_wrapper.initialize_callable().result() @@ -215,8 +223,11 @@ def test_basic_class_callable(): ).result() -def test_basic_class_callable_generators(): - user_callable_wrapper = _make_user_callable_wrapper() +@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True]) +def test_basic_class_callable_generators(run_sync_methods_in_threadpool: bool): + user_callable_wrapper = _make_user_callable_wrapper( + run_sync_methods_in_threadpool=run_sync_methods_in_threadpool + ) user_callable_wrapper.initialize_callable().result() result_list = [] @@ -291,9 +302,12 @@ def test_basic_class_callable_generators(): assert result_list == [0] +@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True]) @pytest.mark.parametrize("fn", [basic_sync_function, basic_async_function]) -def test_basic_function_callable(fn: Callable): - user_callable_wrapper = _make_user_callable_wrapper(fn) +def test_basic_function_callable(fn: Callable, run_sync_methods_in_threadpool: bool): + user_callable_wrapper = _make_user_callable_wrapper( + fn, run_sync_methods_in_threadpool=run_sync_methods_in_threadpool + ) user_callable_wrapper.initialize_callable().result() # Call non-generator function with is_streaming. @@ -325,9 +339,14 @@ def test_basic_function_callable(fn: Callable): ).result() +@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True]) @pytest.mark.parametrize("fn", [basic_sync_generator, basic_async_generator]) -def test_basic_function_callable_generators(fn: Callable): - user_callable_wrapper = _make_user_callable_wrapper(fn) +def test_basic_function_callable_generators( + fn: Callable, run_sync_methods_in_threadpool: bool +): + user_callable_wrapper = _make_user_callable_wrapper( + fn, run_sync_methods_in_threadpool=run_sync_methods_in_threadpool + ) user_callable_wrapper.initialize_callable().result() result_list = [] @@ -366,36 +385,68 @@ def test_basic_function_callable_generators(fn: Callable): @pytest.mark.asyncio -async def test_user_code_runs_on_separate_loop(): +@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True]) +async def test_user_code_runs_on_separate_loop(run_sync_methods_in_threadpool: bool): main_loop = asyncio.get_running_loop() class GetLoop: def __init__(self): self._constructor_loop = asyncio.get_running_loop() - def check_health(self): + async def check_health(self): check_health_loop = asyncio.get_running_loop() assert ( check_health_loop == self._constructor_loop ), "User constructor and health check should run on the same loop." return check_health_loop - def __call__(self) -> asyncio.AbstractEventLoop: + async def call_async(self) -> Optional[asyncio.AbstractEventLoop]: user_method_loop = asyncio.get_running_loop() assert ( user_method_loop == self._constructor_loop ), "User constructor and other methods should run on the same loop." + return user_method_loop - user_callable_wrapper = _make_user_callable_wrapper(GetLoop) + def call_sync(self): + if run_sync_methods_in_threadpool: + with pytest.raises(RuntimeError, match="no running event loop"): + asyncio.get_running_loop() + + user_method_loop = None + else: + user_method_loop = asyncio.get_running_loop() + assert ( + user_method_loop == self._constructor_loop + ), "User constructor and other methods should run on the same loop." + + return user_method_loop + + user_callable_wrapper = _make_user_callable_wrapper( + GetLoop, run_sync_methods_in_threadpool=run_sync_methods_in_threadpool + ) user_callable_wrapper.initialize_callable().result() - request_metadata = _make_request_metadata() + + # Async methods should all run on the same loop. + request_metadata = _make_request_metadata(call_method="call_async") user_code_loop = user_callable_wrapper.call_user_method( request_metadata, tuple(), dict() ).result() assert isinstance(user_code_loop, asyncio.AbstractEventLoop) assert user_code_loop != main_loop + # Sync methods should run on the same loop if run_sync_methods_in_threadpool is off, + # else run in no asyncio loop. + request_metadata = _make_request_metadata(call_method="call_sync") + user_code_loop = user_callable_wrapper.call_user_method( + request_metadata, tuple(), dict() + ).result() + if run_sync_methods_in_threadpool: + assert user_code_loop is None + else: + assert isinstance(user_code_loop, asyncio.AbstractEventLoop) + assert user_code_loop != main_loop + # `check_health` method asserts that it runs on the correct loop. user_callable_wrapper.call_user_health_check().result() @@ -412,7 +463,7 @@ def __call__(self) -> str: msg = "hello world" user_callable_wrapper = _make_user_callable_wrapper( AsyncInitializer, - msg, + init_args=(msg,), ) user_callable_wrapper.initialize_callable().result() request_metadata = _make_request_metadata() @@ -498,8 +549,11 @@ def stream(self, msg: serve_pb2.UserDefinedMessage): yield serve_pb2.UserDefinedResponse(greeting=f"Hello {msg.greeting} {i}!") -def test_grpc_unary_request(): - user_callable_wrapper = _make_user_callable_wrapper(gRPCClass) +@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True]) +def test_grpc_unary_request(run_sync_methods_in_threadpool: bool): + user_callable_wrapper = _make_user_callable_wrapper( + gRPCClass, run_sync_methods_in_threadpool=run_sync_methods_in_threadpool + ) user_callable_wrapper.initialize_callable().result() grpc_request = gRPCRequest( @@ -518,8 +572,11 @@ def test_grpc_unary_request(): @pytest.mark.asyncio -def test_grpc_streaming_request(): - user_callable_wrapper = _make_user_callable_wrapper(gRPCClass) +@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True]) +def test_grpc_streaming_request(run_sync_methods_in_threadpool: bool): + user_callable_wrapper = _make_user_callable_wrapper( + gRPCClass, run_sync_methods_in_threadpool=run_sync_methods_in_threadpool + ) user_callable_wrapper.initialize_callable() grpc_request = gRPCRequest( From ed3d48c552e0ac2f235773322686c6ad5c521cd5 Mon Sep 17 00:00:00 2001 From: Kai-Hsun Chen Date: Tue, 26 Nov 2024 10:09:05 -0800 Subject: [PATCH 096/107] [core][autoscaler] Autoscaler doesn't scale up correctly when the KubeRay RayCluster is not in the goal state (#48909) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Why are these changes needed? ### Issue * Create a Autoscaler V2 RayCluster CR. * head Pod: `num-cpus: 0` * worker Pod: Each worker Pod has 1 CPU, and the `maxReplicas` of the worker group is 10. * Run the following script in the head Pod: https://gist.github.com/kevin85421/6f09368ba48572e28f53654dca854b57 * There are 10 scale requests to add a new node. However, only some of them will be created (e.g., 5). ### Reason In the reproduction script above, the `cloud_instance_updater` will send a request to scale up one worker Pod 10 times because the `maxReplicas` of the worker group is set to 10. However, the construction of the scale_request depends on the Pods in the Kubernetes cluster. For example, * cluster state: RayCluster Replicas: 2, Ray Pods: 1 * 1st scale request: launch 1 node --> goal state: RayCluster Replicas: 2 (Ray Pods + 1) * 2nd scale request: launch 1 node --> goal state: RayCluster Replicas: 2 (Ray Pods + 1) --> **this should be 3!** The above example is expected to create 3 Pods. However, it will ultimately create only 2 Pods. ### Solution Use RayCluster CR instead of Ray Pods to build scale requests. ## Related issue number Closes #46473 ## Checks 10 worker Pods are created successfully. Screenshot 2024-11-24 at 2 11 39 AM - [ ] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :( --------- Signed-off-by: kaihsun --- .../kuberay/ray-cluster.complete.yaml | 2 +- .../cloud_providers/kuberay/cloud_provider.py | 42 +++++-- .../autoscaler/v2/tests/test_node_provider.py | 118 ++++++++++++++++++ .../tests/kuberay/test_autoscaling_config.py | 6 +- 4 files changed, 152 insertions(+), 16 deletions(-) diff --git a/python/ray/autoscaler/kuberay/ray-cluster.complete.yaml b/python/ray/autoscaler/kuberay/ray-cluster.complete.yaml index f51ac9d4c242..d57f5d6f23b9 100644 --- a/python/ray/autoscaler/kuberay/ray-cluster.complete.yaml +++ b/python/ray/autoscaler/kuberay/ray-cluster.complete.yaml @@ -108,7 +108,7 @@ spec: workerGroupSpecs: # the pod replicas in this group typed worker - replicas: 1 - minReplicas: 1 + minReplicas: 0 maxReplicas: 300 # logical group name, for this called small-group, also can be functional groupName: small-group diff --git a/python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py b/python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py index 19e236cb4d19..c1b8ddc2a31b 100644 --- a/python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py +++ b/python/ray/autoscaler/v2/instance_manager/cloud_providers/kuberay/cloud_provider.py @@ -209,21 +209,25 @@ def _initialize_scale_request( cur_instances = self.instances # Get the worker groups that have pending deletes and the worker groups that - # have finished deletes. + # have finished deletes, and the set of workers included in the workersToDelete + # field of any worker group. ( worker_groups_with_pending_deletes, worker_groups_without_pending_deletes, - ) = self._get_workers_groups_with_deletes( - ray_cluster, set(cur_instances.keys()) - ) + worker_to_delete_set, + ) = self._get_workers_delete_info(ray_cluster, set(cur_instances.keys())) # Calculate the desired number of workers by type. num_workers_dict = defaultdict(int) - for _, cur_instance in cur_instances.items(): - if cur_instance.node_kind == NodeKind.HEAD: - # Only track workers. - continue - num_workers_dict[cur_instance.node_type] += 1 + worker_groups = ray_cluster["spec"].get("workerGroupSpecs", []) + for worker_group in worker_groups: + node_type = worker_group["groupName"] + # Handle the case where users manually increase `minReplicas` + # to scale up the number of worker Pods. In this scenario, + # `replicas` will be smaller than `minReplicas`. + num_workers_dict[node_type] = max( + worker_group["replicas"], worker_group["minReplicas"] + ) # Add to launch nodes. for node_type, count in to_launch.items(): @@ -242,6 +246,11 @@ def _initialize_scale_request( # Not possible to delete head node. continue + if to_delete_instance.cloud_instance_id in worker_to_delete_set: + # If the instance is already in the workersToDelete field of + # any worker group, skip it. + continue + num_workers_dict[to_delete_instance.node_type] -= 1 assert num_workers_dict[to_delete_instance.node_type] >= 0 to_delete_instances_by_type[to_delete_instance.node_type].append( @@ -321,6 +330,7 @@ def _submit_scale_request( # No patch required. return + logger.info(f"Submitting a scale request: {scale_request}") self._patch(f"rayclusters/{self._cluster_name}", patch_payload) def _add_launch_errors( @@ -392,9 +402,9 @@ def instances(self) -> Dict[CloudInstanceId, CloudInstance]: return copy.deepcopy(self._cached_instances) @staticmethod - def _get_workers_groups_with_deletes( + def _get_workers_delete_info( ray_cluster_spec: Dict[str, Any], node_set: Set[CloudInstanceId] - ) -> Tuple[Set[NodeType], Set[NodeType]]: + ) -> Tuple[Set[NodeType], Set[NodeType], Set[CloudInstanceId]]: """ Gets the worker groups that have pending deletes and the worker groups that have finished deletes. @@ -404,10 +414,13 @@ def _get_workers_groups_with_deletes( deletes. worker_groups_with_finished_deletes: The worker groups that have finished deletes. + worker_to_delete_set: A set of Pods that are included in the workersToDelete + field of any worker group. """ worker_groups_with_pending_deletes = set() worker_groups_with_deletes = set() + worker_to_delete_set = set() worker_groups = ray_cluster_spec["spec"].get("workerGroupSpecs", []) for worker_group in worker_groups: @@ -422,6 +435,7 @@ def _get_workers_groups_with_deletes( worker_groups_with_deletes.add(node_type) for worker in workersToDelete: + worker_to_delete_set.add(worker) if worker in node_set: worker_groups_with_pending_deletes.add(node_type) break @@ -429,7 +443,11 @@ def _get_workers_groups_with_deletes( worker_groups_with_finished_deletes = ( worker_groups_with_deletes - worker_groups_with_pending_deletes ) - return worker_groups_with_pending_deletes, worker_groups_with_finished_deletes + return ( + worker_groups_with_pending_deletes, + worker_groups_with_finished_deletes, + worker_to_delete_set, + ) def _fetch_instances(self) -> Dict[CloudInstanceId, CloudInstance]: """ diff --git a/python/ray/autoscaler/v2/tests/test_node_provider.py b/python/ray/autoscaler/v2/tests/test_node_provider.py index 02d84e376b8d..47483d3f61fa 100644 --- a/python/ray/autoscaler/v2/tests/test_node_provider.py +++ b/python/ray/autoscaler/v2/tests/test_node_provider.py @@ -492,6 +492,124 @@ def test_pending_deletes(self): }, ] + def test_increase_min_replicas_to_scale_up(self): + # Simulate the case where users manually increase the `minReplicas` field + # from 0 to $num_pods. KubeRay will create $num_pods worker Pods to meet the new + # `minReplicas`, even though the `replicas` field is still 0. + small_group = "small-group" + num_pods = 0 + assert ( + self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["groupName"] + == small_group + ) + for pod in self.mock_client._pod_list["items"]: + if pod["metadata"]["labels"]["ray.io/group"] == small_group: + num_pods += 1 + assert num_pods > 0 + self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["replicas"] = 0 + self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0][ + "minReplicas" + ] = num_pods + + # Launching a new node and `replicas` should be + # `max(replicas, minReplicas) + 1`. + self.provider.launch(shape={small_group: 1}, request_id="launch-1") + patches = self.mock_client.get_patches( + f"rayclusters/{self.provider._cluster_name}" + ) + assert len(patches) == 1 + assert patches[0] == { + "op": "replace", + "path": "/spec/workerGroupSpecs/0/replicas", + "value": num_pods + 1, + } + + def test_inconsistent_pods_raycr_scale_up(self): + """ + Test the case where the cluster state has not yet reached the desired state. + Specifically, the replicas field in the RayCluster CR does not match the actual + number of Pods. + """ + # Check the assumptions of the test + small_group = "small-group" + num_pods = 0 + for pod in self.mock_client._pod_list["items"]: + if pod["metadata"]["labels"]["ray.io/group"] == small_group: + num_pods += 1 + + assert ( + self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["groupName"] + == small_group + ) + desired_replicas = num_pods + 1 + self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0][ + "replicas" + ] = desired_replicas + + # Launch a new node. The replicas field should be incremented by 1, even though + # the cluster state has not yet reached the goal state. + launch_request = {"small-group": 1} + self.provider.launch(shape=launch_request, request_id="launch-1") + + patches = self.mock_client.get_patches( + f"rayclusters/{self.provider._cluster_name}" + ) + assert len(patches) == 1 + assert patches[0] == { + "op": "replace", + "path": "/spec/workerGroupSpecs/0/replicas", + "value": desired_replicas + 1, + } + + def test_inconsistent_pods_raycr_scale_down(self): + """ + Test the case where the cluster state has not yet reached the desired state. + Specifically, the replicas field in the RayCluster CR does not match the actual + number of Pods. + """ + # Check the assumptions of the test + small_group = "small-group" + num_pods = 0 + pod_to_delete = None + for pod in self.mock_client._pod_list["items"]: + if pod["metadata"]["labels"]["ray.io/group"] == small_group: + num_pods += 1 + pod_to_delete = pod["metadata"]["name"] + assert pod_to_delete is not None + + assert ( + self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["groupName"] + == small_group + ) + desired_replicas = num_pods + 1 + self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0][ + "replicas" + ] = desired_replicas + + # Terminate a node. The replicas field should be decremented by 1, even though + # the cluster state has not yet reached the goal state. + self.provider.terminate(ids=[pod_to_delete], request_id="term-1") + patches = self.mock_client.get_patches( + f"rayclusters/{self.provider._cluster_name}" + ) + assert len(patches) == 2 + assert patches == [ + { + "op": "replace", + "path": "/spec/workerGroupSpecs/0/replicas", + "value": desired_replicas - 1, + }, + { + "op": "replace", + "path": "/spec/workerGroupSpecs/0/scaleStrategy", + "value": { + "workersToDelete": [ + pod_to_delete, + ] + }, + }, + ] + if __name__ == "__main__": if os.environ.get("PARALLEL_CI"): diff --git a/python/ray/tests/kuberay/test_autoscaling_config.py b/python/ray/tests/kuberay/test_autoscaling_config.py index 6ae9ab7fc4fe..12b5c239f4a6 100644 --- a/python/ray/tests/kuberay/test_autoscaling_config.py +++ b/python/ray/tests/kuberay/test_autoscaling_config.py @@ -82,7 +82,7 @@ def _get_basic_autoscaling_config() -> dict: }, "small-group": { "max_workers": 300, - "min_workers": 1, + "min_workers": 0, "node_config": {}, "resources": { "CPU": 1, @@ -95,7 +95,7 @@ def _get_basic_autoscaling_config() -> dict: # and modified max_workers. "gpu-group": { "max_workers": 200, - "min_workers": 1, + "min_workers": 0, "node_config": {}, "resources": { "CPU": 1, @@ -109,7 +109,7 @@ def _get_basic_autoscaling_config() -> dict: # and modified max_workers and node_config. "tpu-group": { "max_workers": 4, - "min_workers": 1, + "min_workers": 0, "node_config": {}, "resources": { "CPU": 1, From 5896b3fa4c7e07a7d7cfc8f248bd8d50802cbb4c Mon Sep 17 00:00:00 2001 From: dentiny Date: Tue, 26 Nov 2024 12:20:19 -0800 Subject: [PATCH 097/107] [core] Fix error info timestamp unit (#48763) Addresses https://github.com/ray-project/ray/issues/48760 Reading through the initial PR (https://github.com/ray-project/ray/pull/2256/files), the timestamp unit for error information is actually seconds. After refactor PR (https://github.com/ray-project/ray/pull/5024), it's refactored from python class into protobuf. In this PR, I tried to use `absl::Time` instead of integer as argument, to avoid confusion on units. --------- Signed-off-by: dentiny Signed-off-by: hjiang Co-authored-by: Ruiyang Wang <56065503+rynewang@users.noreply.github.com> --- src/ray/gcs/gcs_server/gcs_actor_manager.cc | 7 +-- src/ray/gcs/gcs_server/gcs_node_manager.cc | 4 +- src/ray/gcs/pb_util.h | 23 ++-------- src/ray/gcs/pb_utils.cc | 47 +++++++++++++++++++++ src/ray/protobuf/gcs.proto | 2 + src/ray/raylet/node_manager.cc | 14 +++--- src/ray/raylet/worker_pool.cc | 38 ++++++++--------- src/ray/raylet/worker_pool.h | 14 +++--- src/ray/raylet/worker_pool_test.cc | 17 ++++---- 9 files changed, 100 insertions(+), 66 deletions(-) create mode 100644 src/ray/gcs/pb_utils.cc diff --git a/src/ray/gcs/gcs_server/gcs_actor_manager.cc b/src/ray/gcs/gcs_server/gcs_actor_manager.cc index aa609bacb445..8bf3f3d484f1 100644 --- a/src/ray/gcs/gcs_server/gcs_actor_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_actor_manager.cc @@ -749,11 +749,8 @@ Status GcsActorManager::RegisterActor(const ray::rpc::RegisterActorRequest &requ "explicitly connect to this namespace with ray.init(namespace=\"" << actor->GetRayNamespace() << "\", ...)"; - auto error_data_ptr = - gcs::CreateErrorTableData("detached_actor_anonymous_namespace", - stream.str(), - absl::GetCurrentTimeNanos(), - job_id); + auto error_data_ptr = gcs::CreateErrorTableData( + "detached_actor_anonymous_namespace", stream.str(), absl::Now(), job_id); RAY_LOG(WARNING) << error_data_ptr->SerializeAsString(); RAY_CHECK_OK( diff --git a/src/ray/gcs/gcs_server/gcs_node_manager.cc b/src/ray/gcs/gcs_server/gcs_node_manager.cc index c57e8ea2fd08..15aa488cb65f 100644 --- a/src/ray/gcs/gcs_server/gcs_node_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_node_manager.cc @@ -392,8 +392,8 @@ std::shared_ptr GcsNodeManager::RemoveNode( .WithField("ip", removed_node->node_manager_address()) << error_message.str(); RAY_LOG(WARNING) << error_message.str(); - auto error_data_ptr = - gcs::CreateErrorTableData(type, error_message.str(), current_time_ms()); + auto error_data_ptr = gcs::CreateErrorTableData( + type, error_message.str(), absl::FromUnixMillis(current_time_ms())); RAY_CHECK_OK(gcs_publisher_->PublishError(node_id.Hex(), *error_data_ptr, nullptr)); } diff --git a/src/ray/gcs/pb_util.h b/src/ray/gcs/pb_util.h index cb3c518072b2..0bdd056b0b8d 100644 --- a/src/ray/gcs/pb_util.h +++ b/src/ray/gcs/pb_util.h @@ -16,6 +16,7 @@ #include +#include "absl/time/time.h" #include "ray/common/constants.h" #include "ray/common/id.h" #include "ray/common/ray_config.h" @@ -61,27 +62,11 @@ inline std::shared_ptr CreateJobTableData( } /// Helper function to produce error table data. -inline std::shared_ptr CreateErrorTableData( +std::shared_ptr CreateErrorTableData( const std::string &error_type, const std::string &error_msg, - double timestamp, - const JobID &job_id = JobID::Nil()) { - uint32_t max_error_msg_size_bytes = RayConfig::instance().max_error_msg_size_bytes(); - auto error_info_ptr = std::make_shared(); - error_info_ptr->set_type(error_type); - if (error_msg.length() > max_error_msg_size_bytes) { - std::ostringstream stream; - stream << "The message size exceeds " << std::to_string(max_error_msg_size_bytes) - << " bytes. Find the full log from the log files. Here is abstract: " - << error_msg.substr(0, max_error_msg_size_bytes); - error_info_ptr->set_error_message(stream.str()); - } else { - error_info_ptr->set_error_message(error_msg); - } - error_info_ptr->set_timestamp(timestamp); - error_info_ptr->set_job_id(job_id.Binary()); - return error_info_ptr; -} + absl::Time timestamp, + const JobID &job_id = JobID::Nil()); /// Helper function to produce worker failure data. inline std::shared_ptr CreateWorkerFailureData( diff --git a/src/ray/gcs/pb_utils.cc b/src/ray/gcs/pb_utils.cc new file mode 100644 index 000000000000..9330f80c27ce --- /dev/null +++ b/src/ray/gcs/pb_utils.cc @@ -0,0 +1,47 @@ +// Copyright 2024 The Ray Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(hjiang): Move all functions from `pb_utils.h` to this implementation file. + +#include + +#include "absl/strings/str_format.h" +#include "ray/gcs/pb_util.h" + +namespace ray::gcs { + +std::shared_ptr CreateErrorTableData( + const std::string &error_type, + const std::string &error_msg, + absl::Time timestamp, + const JobID &job_id) { + uint32_t max_error_msg_size_bytes = RayConfig::instance().max_error_msg_size_bytes(); + auto error_info_ptr = std::make_shared(); + error_info_ptr->set_type(error_type); + if (error_msg.length() > max_error_msg_size_bytes) { + std::string formatted_error_message = absl::StrFormat( + "The message size exceeds %d bytes. Find the full log from the log files. Here " + "is abstract: %s", + max_error_msg_size_bytes, + std::string_view{error_msg}.substr(0, max_error_msg_size_bytes)); + error_info_ptr->set_error_message(std::move(formatted_error_message)); + } else { + error_info_ptr->set_error_message(error_msg); + } + error_info_ptr->set_timestamp(absl::ToUnixMillis(timestamp)); + error_info_ptr->set_job_id(job_id.Binary()); + return error_info_ptr; +} + +} // namespace ray::gcs diff --git a/src/ray/protobuf/gcs.proto b/src/ray/protobuf/gcs.proto index b34f5bd9f39b..2d0ff6dfac4a 100644 --- a/src/ray/protobuf/gcs.proto +++ b/src/ray/protobuf/gcs.proto @@ -165,6 +165,8 @@ message ErrorTableData { // The error message. string error_message = 3; // The timestamp of the error message. + // Unit: millisecond. + // TODO(hjiang): Update field naming from `timestamp` to `timestamp_millisec`. double timestamp = 4; } diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index 879edff0bb6c..18384711de58 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -143,7 +143,7 @@ NodeManager::NodeManager( /*starting_worker_timeout_callback=*/ [this] { cluster_task_manager_->ScheduleAndDispatchTasks(); }, config.ray_debugger_external, - /*get_time=*/[]() { return absl::GetCurrentTimeNanos() / 1e6; }), + /*get_time=*/[]() { return absl::Now(); }), client_call_manager_(io_service), worker_rpc_pool_(client_call_manager_), core_worker_subscriber_(std::make_unique( @@ -1562,8 +1562,8 @@ void NodeManager::DisconnectClient(const std::shared_ptr &clie .WithField("node_id", self_node_id_.Hex()) .WithField("job_id", worker->GetAssignedJobId().Hex()) << error_message_str; - auto error_data_ptr = - gcs::CreateErrorTableData(type, error_message_str, current_time_ms(), job_id); + auto error_data_ptr = gcs::CreateErrorTableData( + type, error_message_str, absl::FromUnixMillis(current_time_ms()), job_id); RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr)); } } @@ -1762,9 +1762,11 @@ void NodeManager::ProcessPushErrorRequestMessage(const uint8_t *message_data) { auto const &type = string_from_flatbuf(*message->type()); auto const &error_message = string_from_flatbuf(*message->error_message()); + // TODO(hjiang): Figure out what's the unit for `PushErrorRequest`. double timestamp = message->timestamp(); JobID job_id = from_flatbuf(*message->job_id()); - auto error_data_ptr = gcs::CreateErrorTableData(type, error_message, timestamp, job_id); + auto error_data_ptr = gcs::CreateErrorTableData( + type, error_message, absl::FromUnixMillis(timestamp), job_id); RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr)); } @@ -2126,8 +2128,8 @@ void NodeManager::MarkObjectsAsFailed( << " object may hang forever."; std::string error_message = stream.str(); RAY_LOG(ERROR) << error_message; - auto error_data_ptr = - gcs::CreateErrorTableData("task", error_message, current_time_ms(), job_id); + auto error_data_ptr = gcs::CreateErrorTableData( + "task", error_message, absl::FromUnixMillis(current_time_ms()), job_id); RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr)); } } diff --git a/src/ray/raylet/worker_pool.cc b/src/ray/raylet/worker_pool.cc index eda893e8efbe..5964063a3205 100644 --- a/src/ray/raylet/worker_pool.cc +++ b/src/ray/raylet/worker_pool.cc @@ -89,10 +89,10 @@ WorkerPool::WorkerPool(instrumented_io_context &io_service, const std::vector &worker_ports, std::shared_ptr gcs_client, const WorkerCommandMap &worker_commands, - const std::string &native_library_path, + std::string native_library_path, std::function starting_worker_timeout_callback, int ray_debugger_external, - const std::function get_time) + std::function get_time) : worker_startup_token_counter_(0), io_service_(&io_service), node_id_(node_id), @@ -105,15 +105,15 @@ WorkerPool::WorkerPool(instrumented_io_context &io_service, RayConfig::instance().worker_maximum_startup_concurrency() : maximum_startup_concurrency), gcs_client_(std::move(gcs_client)), - native_library_path_(native_library_path), - starting_worker_timeout_callback_(starting_worker_timeout_callback), + native_library_path_(std::move(native_library_path)), + starting_worker_timeout_callback_(std::move(starting_worker_timeout_callback)), ray_debugger_external(ray_debugger_external), first_job_registered_python_worker_count_(0), first_job_driver_wait_num_python_workers_( std::min(num_prestarted_python_workers, maximum_startup_concurrency_)), num_prestart_python_workers(num_prestarted_python_workers), periodical_runner_(io_service), - get_time_(get_time) { + get_time_(std::move(get_time)) { RAY_CHECK_GT(maximum_startup_concurrency_, 0); // We need to record so that the metric exists. This way, we report that 0 // processes have started before a task runs on the node (as opposed to the @@ -1043,7 +1043,7 @@ void WorkerPool::PushWorker(const std::shared_ptr &worker) { } void WorkerPool::TryKillingIdleWorkers() { - int64_t now = get_time_(); + const auto now = get_time_(); // Filter out all idle workers that are already dead and/or associated with // jobs that have already finished. @@ -1056,14 +1056,14 @@ void WorkerPool::TryKillingIdleWorkers() { } const auto &job_id = idle_worker->GetAssignedJobId(); - if (finished_jobs_.count(job_id) > 0) { + if (finished_jobs_.contains(job_id)) { // The job has finished, so we should kill the worker immediately. KillIdleWorker(idle_worker, it->second); it = idle_of_all_languages_.erase(it); } else { - if (it->second == -1 || - now - it->second > - RayConfig::instance().idle_worker_killing_time_threshold_ms()) { + if (now - it->second > + absl::Milliseconds( + RayConfig::instance().idle_worker_killing_time_threshold_ms())) { // The job has not yet finished and the worker has been idle for longer // than the timeout. num_killable_idle_workers++; @@ -1085,9 +1085,9 @@ void WorkerPool::TryKillingIdleWorkers() { auto it = idle_of_all_languages_.begin(); while (num_killable_idle_workers > num_desired_idle_workers && it != idle_of_all_languages_.end()) { - if (it->second == -1 || - now - it->second > - RayConfig::instance().idle_worker_killing_time_threshold_ms()) { + if (now - it->second > + absl::Milliseconds( + RayConfig::instance().idle_worker_killing_time_threshold_ms())) { RAY_LOG(DEBUG) << "Number of idle workers " << num_killable_idle_workers << " is larger than the number of desired workers " << num_desired_idle_workers << " killing idle worker with PID " @@ -1102,7 +1102,7 @@ void WorkerPool::TryKillingIdleWorkers() { } void WorkerPool::KillIdleWorker(std::shared_ptr idle_worker, - int64_t last_time_used_ms) { + absl::Time last_time_used) { // To avoid object lost issue caused by forcibly killing, send an RPC request to the // worker to allow it to do cleanup before exiting. We kill it anyway if the driver // is already exited. @@ -1122,8 +1122,8 @@ void WorkerPool::KillIdleWorker(std::shared_ptr idle_worker, } rpc_client->Exit( request, - [this, idle_worker, last_time_used_ms](const ray::Status &status, - const rpc::ExitReply &r) { + [this, idle_worker, last_time_used](const ray::Status &status, + const rpc::ExitReply &r) { RAY_CHECK(pending_exit_idle_workers_.erase(idle_worker->WorkerId())); if (!status.ok()) { RAY_LOG(ERROR) << "Failed to send exit request: " << status.ToString(); @@ -1149,8 +1149,7 @@ void WorkerPool::KillIdleWorker(std::shared_ptr idle_worker, // kill the worker (e.g., when the worker owns the object). Without this, // if the first N workers own objects, it can't kill idle workers that are // >= N+1. - idle_of_all_languages_.push_back( - std::make_pair(idle_worker, last_time_used_ms)); + idle_of_all_languages_.emplace_back(idle_worker, last_time_used); } }); } @@ -1311,7 +1310,7 @@ void WorkerPool::PopWorker(const TaskSpecification &task_spec, auto worker_fits_for_task_fn = [this, &pop_worker_request, &skip_reason_count]( - const std::pair, int64_t> &pair) -> bool { + const std::pair, absl::Time> &pair) -> bool { const auto &worker = pair.first; WorkerUnfitForTaskReason reason = WorkerFitsForTask(*worker, *pop_worker_request); if (reason == WorkerUnfitForTaskReason::NONE) { @@ -1544,6 +1543,7 @@ void WorkerPool::WarnAboutSize() { << "some discussion of workarounds)."; std::string warning_message_str = warning_message.str(); RAY_LOG(WARNING) << warning_message_str; + auto error_data_ptr = gcs::CreateErrorTableData( "worker_pool_large", warning_message_str, get_time_()); RAY_CHECK_OK(gcs_client_->Errors().AsyncReportJobError(error_data_ptr, nullptr)); diff --git a/src/ray/raylet/worker_pool.h b/src/ray/raylet/worker_pool.h index ef2e1e048635..3d7f456f82cb 100644 --- a/src/ray/raylet/worker_pool.h +++ b/src/ray/raylet/worker_pool.h @@ -25,6 +25,7 @@ #include #include +#include "absl/time/time.h" #include "ray/common/asio/instrumented_io_context.h" #include "ray/common/asio/periodical_runner.h" #include "ray/common/client_connection.h" @@ -200,7 +201,7 @@ class WorkerPool : public WorkerPoolInterface, public IOWorkerPoolInterface { /// it times out to start a worker. /// \param ray_debugger_external Ray debugger in workers will be started in a way /// that they are accessible from outside the node. - /// \param get_time A callback to get the current time. + /// \param get_time A callback to get the current time in milliseconds. WorkerPool(instrumented_io_context &io_service, const NodeID node_id, const std::string node_address, @@ -212,10 +213,10 @@ class WorkerPool : public WorkerPoolInterface, public IOWorkerPoolInterface { const std::vector &worker_ports, std::shared_ptr gcs_client, const WorkerCommandMap &worker_commands, - const std::string &native_library_path, + std::string native_library_path, std::function starting_worker_timeout_callback, int ray_debugger_external, - const std::function get_time); + std::function get_time); /// Destructor responsible for freeing a set of workers owned by this class. virtual ~WorkerPool() override; @@ -472,7 +473,7 @@ class WorkerPool : public WorkerPoolInterface, public IOWorkerPoolInterface { /// TODO(scv119): replace dynamic options by runtime_env. const std::vector &LookupWorkerDynamicOptions(StartupToken token) const; - void KillIdleWorker(std::shared_ptr worker, int64_t last_time_used_ms); + void KillIdleWorker(std::shared_ptr worker, absl::Time last_time_used); /// Gloabl startup token variable. Incremented once assigned /// to a worker process and is added to @@ -587,7 +588,8 @@ class WorkerPool : public WorkerPoolInterface, public IOWorkerPoolInterface { /// The pool of idle non-actor workers of all languages. This is used to kill idle /// workers in FIFO order. The second element of std::pair is the time a worker becomes /// idle. - std::list, int64_t>> idle_of_all_languages_; + std::list, absl::Time>> + idle_of_all_languages_; private: /// A helper function that returns the reference of the pool state @@ -792,7 +794,7 @@ class WorkerPool : public WorkerPoolInterface, public IOWorkerPoolInterface { PeriodicalRunner periodical_runner_; /// A callback to get the current time. - const std::function get_time_; + const std::function get_time_; /// Runtime env manager client. std::shared_ptr runtime_env_agent_client_; /// Stats diff --git a/src/ray/raylet/worker_pool_test.cc b/src/ray/raylet/worker_pool_test.cc index 0da18903fdfe..022c5055522a 100644 --- a/src/ray/raylet/worker_pool_test.cc +++ b/src/ray/raylet/worker_pool_test.cc @@ -14,8 +14,10 @@ #include "ray/raylet/worker_pool.h" -#include "gmock/gmock.h" -#include "gtest/gtest.h" +#include +#include + +#include "absl/time/time.h" #include "nlohmann/json.hpp" #include "ray/common/asio/asio_util.h" #include "ray/common/asio/instrumented_io_context.h" @@ -26,9 +28,8 @@ #include "src/ray/protobuf/runtime_env_agent.pb.h" using json = nlohmann::json; -namespace ray { -namespace raylet { +namespace ray::raylet { int MAXIMUM_STARTUP_CONCURRENCY = 15; int PYTHON_PRESTART_WORKERS = 15; @@ -140,7 +141,7 @@ class WorkerPoolMock : public WorkerPool { "", []() {}, 0, - [this]() { return current_time_ms_; }), + [this]() { return absl::FromUnixMillis(current_time_ms_); }), last_worker_process_(), instrumented_io_service_(io_service), error_message_type_(1), @@ -241,7 +242,7 @@ class WorkerPoolMock : public WorkerPool { size_t GetIdleWorkerSize() { return idle_of_all_languages_.size(); } - std::list, int64_t>> &GetIdleWorkers() { + std::list, absl::Time>> &GetIdleWorkers() { return idle_of_all_languages_; } @@ -2151,9 +2152,7 @@ TEST_F(WorkerPoolTest, RegisterFirstJavaDriverCallbackImmediately) { ASSERT_TRUE(callback_called); } -} // namespace raylet - -} // namespace ray +} // namespace ray::raylet int main(int argc, char **argv) { InitShutdownRAII ray_log_shutdown_raii( From 62d59ba60823ceb7b2fb9a121c40ff58042be166 Mon Sep 17 00:00:00 2001 From: Alexey Kudinkin Date: Tue, 26 Nov 2024 21:53:53 -0800 Subject: [PATCH 098/107] [Data] Rebased PA overflow tests onto standalone Ray cluster (#48887) Tidying up Signed-off-by: Alexey Kudinkin --- python/ray/data/tests/test_arrow_block.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/ray/data/tests/test_arrow_block.py b/python/ray/data/tests/test_arrow_block.py index 22fe9f7ef7a0..00ed13363f20 100644 --- a/python/ray/data/tests/test_arrow_block.py +++ b/python/ray/data/tests/test_arrow_block.py @@ -91,7 +91,7 @@ def binary_dataset_single_file_gt_2gb(): ], ) def test_single_row_gt_2gb( - ray_start_regular_shared, + ray_start_regular, restore_data_context, binary_dataset_single_file_gt_2gb, col_name, @@ -124,7 +124,7 @@ def _id(row): ], ) def test_arrow_batch_gt_2gb( - ray_start_regular_shared, + ray_start_regular, parquet_dataset_single_column_gt_2gb, restore_data_context, op, @@ -158,10 +158,10 @@ def _id(x): print( f">>> Batch:\n" f"------\n" - "Column: 'id'" + "Column: 'id'\n" f"Values: {batch['id']}\n" f"------\n" - "Column: 'bin'" + "Column: 'bin'\n" f"Total: {total_binary_column_size / GiB} GiB\n" f"Values: {[str(v)[:3] + ' x ' + str(len(v)) for v in batch['bin']]}\n" ) From 3d7fc8f60aea371cba4af7a5ac021222a88763d1 Mon Sep 17 00:00:00 2001 From: Blocka Date: Wed, 27 Nov 2024 14:28:57 +0800 Subject: [PATCH 099/107] [Doc] Logging: Specify the Helm chart version in the Persist KubeRay Operator Logs doc (#48937) Signed-off-by: win5923 --- .../kubernetes/user-guides/persist-kuberay-operator-logs.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/cluster/kubernetes/user-guides/persist-kuberay-operator-logs.md b/doc/source/cluster/kubernetes/user-guides/persist-kuberay-operator-logs.md index 496afd34e4a5..3cadfbb81938 100644 --- a/doc/source/cluster/kubernetes/user-guides/persist-kuberay-operator-logs.md +++ b/doc/source/cluster/kubernetes/user-guides/persist-kuberay-operator-logs.md @@ -19,7 +19,7 @@ helm repo add grafana https://grafana.github.io/helm-charts helm repo update # Install Loki with single replica mode -helm install loki grafana/loki -f https://raw.githubusercontent.com/grafana/loki/refs/heads/main/production/helm/loki/single-binary-values.yaml +helm install loki grafana/loki --version 6.21.0 -f https://raw.githubusercontent.com/grafana/loki/refs/heads/main/production/helm/loki/single-binary-values.yaml ``` ### Configure log processing @@ -48,7 +48,7 @@ Deploy the Fluent Bit deployment with the [Helm chart repository](https://github helm repo add fluent https://fluent.github.io/helm-charts helm repo update -helm install fluent-bit fluent/fluent-bit -f fluent-bit-config.yaml +helm install fluent-bit fluent/fluent-bit --version 0.48.2 -f fluent-bit-config.yaml ``` ### Install the KubeRay Operator @@ -75,7 +75,7 @@ Deploy the Grafana deployment with the [Helm chart repository](https://github.co helm repo add grafana https://grafana.github.io/helm-charts helm repo update -helm install grafana grafana/grafana -f datasource-config.yaml +helm install grafana grafana/grafana --version 8.6.2 -f datasource-config.yaml ``` ### Check the Grafana Dashboard From 3bd3a02449f9a90443a7a35e63ab962da2f69e81 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Wed, 27 Nov 2024 11:45:17 -0600 Subject: [PATCH 100/107] [Data] Clarify schema validation error (#48882) ```python ray.data.range(1).groupby("does_not_exist").count().materialize() ``` **Before** ``` ValueError: The column 'does_not_exist' does not exist in the schema 'Column Type ------ ---- id int64'. ``` **After** ``` ValueError: You specified the column 'does_not_exist', but there's no such column in the dataset. The dataset has columns: {'id'} ``` --------- Signed-off-by: Balaji Veeramani --- .../_internal/planner/exchange/sort_task_spec.py | 5 +++-- python/ray/data/tests/test_execution_optimizer.py | 15 ++++----------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/python/ray/data/_internal/planner/exchange/sort_task_spec.py b/python/ray/data/_internal/planner/exchange/sort_task_spec.py index 827c4a2c7a51..7c67b3dbdefe 100644 --- a/python/ray/data/_internal/planner/exchange/sort_task_spec.py +++ b/python/ray/data/_internal/planner/exchange/sort_task_spec.py @@ -81,8 +81,9 @@ def validate_schema(self, schema: Optional[Union[type, "pyarrow.lib.Schema"]]): for column in self._columns: if column not in schema_names_set: raise ValueError( - "The column '{}' does not exist in the " - "schema '{}'.".format(column, schema) + f"You specified the column '{column}', but there's no such " + "column in the dataset. The dataset has columns: " + f"{schema_names_set}" ) @property diff --git a/python/ray/data/tests/test_execution_optimizer.py b/python/ray/data/tests/test_execution_optimizer.py index d657ce1c9d98..af7af855b187 100644 --- a/python/ray/data/tests/test_execution_optimizer.py +++ b/python/ray/data/tests/test_execution_optimizer.py @@ -1145,9 +1145,7 @@ def test_sort_validate_keys(ray_start_regular_shared): assert extract_values("id", ds.sort("id").take_all()) == list(range(10)) invalid_col_name = "invalid_column" - with pytest.raises( - ValueError, match=f"The column '{invalid_col_name}' does not exist" - ): + with pytest.raises(ValueError, match="there's no such column in the dataset"): ds.sort(invalid_col_name).take_all() ds_named = ray.data.from_items( @@ -1165,10 +1163,7 @@ def test_sort_validate_keys(ray_start_regular_shared): assert [d["col1"] for d in r1] == [7, 5, 3, 1] assert [d["col2"] for d in r2] == [8, 6, 4, 2] - with pytest.raises( - ValueError, - match=f"The column '{invalid_col_name}' does not exist in the schema", - ): + with pytest.raises(ValueError, match="there's no such column in the dataset"): ds_named.sort(invalid_col_name).take_all() @@ -1279,9 +1274,7 @@ def test_aggregate_e2e(ray_start_regular_shared, use_push_based_shuffle): def test_aggregate_validate_keys(ray_start_regular_shared): ds = ray.data.range(10) invalid_col_name = "invalid_column" - with pytest.raises( - ValueError, match=f"The column '{invalid_col_name}' does not exist" - ): + with pytest.raises(ValueError): ds.groupby(invalid_col_name).count() ds_named = ray.data.from_items( @@ -1308,7 +1301,7 @@ def test_aggregate_validate_keys(ray_start_regular_shared): with pytest.raises( ValueError, - match=f"The column '{invalid_col_name}' does not exist in the schema", + match="there's no such column in the dataset", ): ds_named.groupby(invalid_col_name).count() From 9c4f2f59633ee9816f50d7350c0880aaf9a4e4b0 Mon Sep 17 00:00:00 2001 From: Kit Lee <7000003+wingkitlee0@users.noreply.github.com> Date: Wed, 27 Nov 2024 20:37:55 -0500 Subject: [PATCH 101/107] [data] fix partial function name parsing in map_groups (#48907) There was an error to parse the function name when using `partial` function with `groupby.map_groups`. Signed-off-by: Kit Lee <7000003+wingkitlee0@users.noreply.github.com> --- python/ray/data/grouped_data.py | 6 ++++- python/ray/data/tests/test_all_to_all.py | 28 +++++++++++++++++++++++- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/python/ray/data/grouped_data.py b/python/ray/data/grouped_data.py index 8f7b7dde118d..427ea18b7bbf 100644 --- a/python/ray/data/grouped_data.py +++ b/python/ray/data/grouped_data.py @@ -1,3 +1,4 @@ +from functools import partial from typing import Any, Dict, Iterable, List, Optional, Tuple, Union from ray.data._internal.aggregate import Count, Max, Mean, Min, Std, Sum @@ -261,7 +262,10 @@ def wrapped_fn(batch, *args, **kwargs): # Change the name of the wrapped function so that users see the name of their # function rather than `wrapped_fn` in the progress bar. - wrapped_fn.__name__ = fn.__name__ + if isinstance(fn, partial): + wrapped_fn.__name__ = fn.func.__name__ + else: + wrapped_fn.__name__ = fn.__name__ # Note we set batch_size=None here, so it will use the entire block as a batch, # which ensures that each group will be contained within a batch in entirety. diff --git a/python/ray/data/tests/test_all_to_all.py b/python/ray/data/tests/test_all_to_all.py index cf0cb8b2b2e7..a6b173383145 100644 --- a/python/ray/data/tests/test_all_to_all.py +++ b/python/ray/data/tests/test_all_to_all.py @@ -1167,7 +1167,6 @@ def test_groupby_map_groups_multicolumn( ray_start_regular_shared, ds_format, num_parts, use_push_based_shuffle ): # Test built-in count aggregation - print(f"Seeding RNG for test_groupby_arrow_count with: {RANDOM_SEED}") random.seed(RANDOM_SEED) xs = list(range(100)) random.shuffle(xs) @@ -1190,6 +1189,33 @@ def test_groupby_map_groups_multicolumn( ] +def test_groupby_map_groups_with_partial(): + """ + The partial function name should show up as + +- Sort + +- MapBatches(func) + """ + from functools import partial + + def func(x, y): + return {f"x_add_{y}": [len(x["id"]) + y]} + + df = pd.DataFrame({"id": list(range(100))}) + df["key"] = df["id"] % 5 + + ds = ray.data.from_pandas(df).groupby("key").map_groups(partial(func, y=5)) + result = ds.take_all() + + assert result == [ + {"x_add_5": 25}, + {"x_add_5": 25}, + {"x_add_5": 25}, + {"x_add_5": 25}, + {"x_add_5": 25}, + ] + assert "MapBatches(func)" in ds.__repr__() + + def test_random_block_order_schema(ray_start_regular_shared): df = pd.DataFrame({"a": np.random.rand(10), "b": np.random.rand(10)}) ds = ray.data.from_pandas(df).randomize_block_order() From c6493b686d9aef157634524c05410ec865f1eb29 Mon Sep 17 00:00:00 2001 From: dentiny Date: Wed, 27 Nov 2024 18:19:29 -0800 Subject: [PATCH 102/107] [core] Add shared lru cache (#48948) A shared lru cache is needed for my PR: https://github.com/ray-project/ray/pull/48928 Signed-off-by: hjiang --- src/ray/util/BUILD | 10 ++ src/ray/util/shared_lru.h | 207 ++++++++++++++++++++++++++ src/ray/util/tests/BUILD | 12 ++ src/ray/util/tests/shared_lru_test.cc | 76 ++++++++++ 4 files changed, 305 insertions(+) create mode 100644 src/ray/util/shared_lru.h create mode 100644 src/ray/util/tests/shared_lru_test.cc diff --git a/src/ray/util/BUILD b/src/ray/util/BUILD index 23d9f1e90150..87f8a57e8dea 100644 --- a/src/ray/util/BUILD +++ b/src/ray/util/BUILD @@ -55,3 +55,13 @@ cc_library( srcs = ["thread_checker.cc"], visibility = ["//visibility:public"], ) + +cc_library( + name = "shared_lru", + hdrs = ["shared_lru.h"], + visibility = ["//visibility:public"], + deps = [ + ":util", + "@com_google_absl//absl/container:flat_hash_map", + ], +) diff --git a/src/ray/util/shared_lru.h b/src/ray/util/shared_lru.h new file mode 100644 index 000000000000..8132e38b6f12 --- /dev/null +++ b/src/ray/util/shared_lru.h @@ -0,0 +1,207 @@ +// Copyright 2024 The Ray Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SharedLruCache is a LRU cache, with all entries shared, which means a single entry +// could be accessed by multiple getters. All values are wrapped with shared pointer to +// avoid copy at get operation, meanwhile also useful to maintain memory validity at any +// time. +// +// Example usage: +// SharedLruCache cache{cap}; +// // Put a key-value pair into cache. +// cache.Put("key", std::make_shared("val")); +// +// // Get a key-value pair from cache. +// auto val = cache.Get("key"); +// // Check and consume `val`. +// +// TODO(hjiang): +// 1. Add template arguments for key hash and key equal, to pass into absl::flat_hash_map. +// 2. Provide a key hash wrapper to save a copy. +// 3. flat hash map supports heterogeneous lookup, expose `KeyLike` templated interface. +// 4. Add a `GetOrCreate` interface, which takes factory function to creation value. +// 5. For thread-safe cache, add a sharded container wrapper to reduce lock contention. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/flat_hash_map.h" +#include "src/ray/util/logging.h" + +namespace ray::utils::container { + +template +class SharedLruCache final { + public: + using key_type = Key; + using mapped_type = Val; + + // A `max_entries` of 0 means that there is no limit on the number of entries + // in the cache. + explicit SharedLruCache(size_t max_entries) : max_entries_(max_entries) {} + + SharedLruCache(const SharedLruCache &) = delete; + SharedLruCache &operator=(const SharedLruCache &) = delete; + + ~SharedLruCache() = default; + + // Insert `value` with key `key`. This will replace any previous entry with + // the same key. + void Put(Key key, std::shared_ptr value) { + RAY_CHECK(value != nullptr); + auto iter = cache_.find(key); + if (iter != cache_.end()) { + lru_list_.splice(lru_list_.begin(), lru_list_, iter->second.lru_iterator); + iter->second.value = std::move(value); + return; + } + + lru_list_.emplace_front(key); + Entry new_entry{std::move(value), lru_list_.begin()}; + cache_[std::move(key)] = std::move(new_entry); + + if (max_entries_ > 0 && lru_list_.size() > max_entries_) { + const auto &stale_key = lru_list_.back(); + cache_.erase(stale_key); + lru_list_.pop_back(); + } + + RAY_CHECK_EQ(lru_list_.size(), cache_.size()); + } + + // Delete the entry with key `key`. Return true if the entry was found for + // `key`, false if the entry was not found. In both cases, there is no entry + // with key `key` existed after the call. + bool Delete(const Key &key) { + auto it = cache_.find(key); + if (it == cache_.end()) { + return false; + } + lru_list_.erase(it->second.lru_iterator); + cache_.erase(it); + return true; + } + + // Look up the entry with key `key`. Return nullptr if key doesn't exist. + std::shared_ptr Get(const Key &key) { + const auto cache_iter = cache_.find(key); + if (cache_iter == cache_.end()) { + return nullptr; + } + lru_list_.splice(lru_list_.begin(), lru_list_, cache_iter->second.lru_iterator); + return cache_iter->second.value; + } + + // Clear the cache. + void Clear() { + cache_.clear(); + lru_list_.clear(); + } + + // Accessors for cache parameters. + size_t max_entries() const { return max_entries_; } + + private: + struct Entry { + // The entry's value. + std::shared_ptr value; + + // A list iterator pointing to the entry's position in the LRU list. + typename std::list::iterator lru_iterator; + }; + + using EntryMap = absl::flat_hash_map; + + // The maximum number of entries in the cache. A value of 0 means there is no + // limit on entry count. + const size_t max_entries_; + + // Stores key-value pairs. + EntryMap cache_; + + // The LRU list of entries. The front of the list identifies the most + // recently accessed entry. + std::list lru_list_; +}; + +// Same interfaces as `SharedLruCache`, but all cached values are +// `const`-specified to avoid concurrent updates. +template +using SharedLruConstCache = SharedLruCache; + +// Same interface and functionality as `SharedLruCache`, but thread-safe version. +template +class ThreadSafeSharedLruCache final { + public: + using key_type = Key; + using mapped_type = Val; + + // A `max_entries` of 0 means that there is no limit on the number of entries + // in the cache. + explicit ThreadSafeSharedLruCache(size_t max_entries) : cache_(max_entries) {} + + ThreadSafeSharedLruCache(const ThreadSafeSharedLruCache &) = delete; + ThreadSafeSharedLruCache &operator=(const ThreadSafeSharedLruCache &) = delete; + + ~ThreadSafeSharedLruCache() = default; + + // Insert `value` with key `key`. This will replace any previous entry with + // the same key. + void Put(Key key, std::shared_ptr value) { + std::lock_guard lck(mu_); + cache_.Put(std::move(key), std::move(value)); + } + + // Delete the entry with key `key`. Return true if the entry was found for + // `key`, false if the entry was not found. In both cases, there is no entry + // with key `key` existed after the call. + bool Delete(const Key &key) { + std::lock_guard lck(mu_); + return cache_.Delete(key); + } + + // Look up the entry with key `key`. Return std::nullopt if key doesn't exist. + // If found, return a copy for the value. + std::shared_ptr Get(const Key &key) { + std::lock_guard lck(mu_); + return cache_.Get(key); + } + + // Clear the cache. + void Clear() { + std::lock_guard lck(mu_); + cache_.Clear(); + } + + // Accessors for cache parameters. + size_t max_entries() const { return cache_.max_entries(); } + + private: + std::mutex mu_; + SharedLruCache cache_; +}; + +// Same interfaces as `SharedLruCache`, but all cached values are +// `const`-specified to avoid concurrent updates. +template +using ThreadSafeSharedLruConstCache = ThreadSafeSharedLruCache; + +} // namespace ray::utils::container diff --git a/src/ray/util/tests/BUILD b/src/ray/util/tests/BUILD index 2941d105cf91..b85c01f28ebf 100644 --- a/src/ray/util/tests/BUILD +++ b/src/ray/util/tests/BUILD @@ -194,3 +194,15 @@ cc_test( "@com_google_googletest//:gtest_main", ], ) + +cc_test( + name = "shared_lru_test", + srcs = ["shared_lru_test.cc"], + deps = [ + "//src/ray/util:shared_lru", + "@com_google_googletest//:gtest_main", + ], + size = "small", + copts = COPTS, + tags = ["team:core"], +) diff --git a/src/ray/util/tests/shared_lru_test.cc b/src/ray/util/tests/shared_lru_test.cc new file mode 100644 index 000000000000..7c47f4d1daf0 --- /dev/null +++ b/src/ray/util/tests/shared_lru_test.cc @@ -0,0 +1,76 @@ +// Copyright 2024 The Ray Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "src/ray/util/shared_lru.h" + +#include + +#include +#include + +namespace ray::utils::container { + +namespace { +constexpr size_t kTestCacheSz = 1; +} // namespace + +TEST(SharedLruCache, PutAndGet) { + ThreadSafeSharedLruCache cache{kTestCacheSz}; + + // No value initially. + auto val = cache.Get("1"); + EXPECT_EQ(val, nullptr); + + // Check put and get. + cache.Put("1", std::make_shared("1")); + val = cache.Get("1"); + EXPECT_NE(val, nullptr); + EXPECT_EQ(*val, "1"); + + // Check key eviction. + cache.Put("2", std::make_shared("2")); + val = cache.Get("1"); + EXPECT_EQ(val, nullptr); + val = cache.Get("2"); + EXPECT_NE(val, nullptr); + EXPECT_EQ(*val, "2"); + + // Check deletion. + EXPECT_FALSE(cache.Delete("1")); + EXPECT_TRUE(cache.Delete("2")); + val = cache.Get("2"); + EXPECT_EQ(val, nullptr); +} + +// Testing senario: push multiple same keys into the cache. +TEST(SharedLruCache, SameKeyTest) { + ThreadSafeSharedLruCache cache{2}; + + cache.Put(1, std::make_shared(1)); + auto val = cache.Get(1); + EXPECT_NE(val, nullptr); + EXPECT_EQ(1, *val); + + cache.Put(1, std::make_shared(2)); + val = cache.Get(1); + EXPECT_NE(val, nullptr); + EXPECT_EQ(2, *val); +} + +TEST(SharedLruConstCache, TypeAliasAssertion) { + static_assert( + std::is_same_v, SharedLruCache>); +} + +} // namespace ray::utils::container From c3325783eeb5cc43210318b6541aefd87e7c7b69 Mon Sep 17 00:00:00 2001 From: richo-anyscale Date: Wed, 27 Nov 2024 20:12:27 -0800 Subject: [PATCH 103/107] Document the publicity of the algolia key (#48971) Someone kindly reported this key as an exposure, but it is meant to be public. --- doc/source/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index 98bed502ee8c..d8ae19629647 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -74,6 +74,8 @@ ] # Configuration for algolia +# Note: This API key grants read access to our indexes and is intended to be public. +# See https://www.algolia.com/doc/guides/security/api-keys/ for more information. docsearch_app_id = "LBHF0PABBL" docsearch_api_key = "6c42f30d9669d8e42f6fc92f44028596" docsearch_index_name = "docs-ray" From 19818cfc267365f84637e2b654f459e2ef41509e Mon Sep 17 00:00:00 2001 From: dentiny Date: Thu, 28 Nov 2024 06:53:47 -0800 Subject: [PATCH 104/107] Fix release yaml lint (#48982) It breaks my pre-push linter. Signed-off-by: hjiang --- release/release_data_tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/release_data_tests.yaml b/release/release_data_tests.yaml index 1a7adae918be..9ea55e27af8e 100644 --- a/release/release_data_tests.yaml +++ b/release/release_data_tests.yaml @@ -9,7 +9,7 @@ byod: type: gpu cluster_compute: multi_node_autoscaling_compute.yaml - + ############### # Reading tests ############### From 80329856295f5bf52929f2fa5c8ed0ad249d9369 Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Sat, 30 Nov 2024 08:53:14 -0800 Subject: [PATCH 105/107] [serve] async shutdown for handles (#48972) ## Why are these changes needed? Add async shutdown for handles. --------- Signed-off-by: Cindy Zhang --- .../ray/serve/_private/local_testing_mode.py | 4 ++- python/ray/serve/_private/router.py | 8 +++--- python/ray/serve/handle.py | 27 ++++++++++--------- python/ray/serve/tests/test_handle_2.py | 25 +++++++++++++++++ python/ray/serve/tests/test_multiplex.py | 2 +- 5 files changed, 47 insertions(+), 19 deletions(-) diff --git a/python/ray/serve/_private/local_testing_mode.py b/python/ray/serve/_private/local_testing_mode.py index 84525625a938..af38c04c5e65 100644 --- a/python/ray/serve/_private/local_testing_mode.py +++ b/python/ray/serve/_private/local_testing_mode.py @@ -314,4 +314,6 @@ def generator_result_callback(item: Any): return noop_future def shutdown(self): - pass + noop_future = concurrent.futures.Future() + noop_future.set_result(None) + return noop_future diff --git a/python/ray/serve/_private/router.py b/python/ray/serve/_private/router.py index 9cd8c10f5f82..85d391c95d52 100644 --- a/python/ray/serve/_private/router.py +++ b/python/ray/serve/_private/router.py @@ -327,7 +327,7 @@ def assign_request( pass @abstractmethod - def shutdown(self): + def shutdown(self) -> concurrent.futures.Future: pass @@ -680,7 +680,7 @@ def assign_request( loop=self._asyncio_loop, ) - def shutdown(self): - asyncio.run_coroutine_threadsafe( + def shutdown(self) -> concurrent.futures.Future: + return asyncio.run_coroutine_threadsafe( self._asyncio_router.shutdown(), loop=self._asyncio_loop - ).result() + ) diff --git a/python/ray/serve/handle.py b/python/ray/serve/handle.py index 0eba1c5dc5ee..ead2e174948b 100644 --- a/python/ray/serve/handle.py +++ b/python/ray/serve/handle.py @@ -76,16 +76,6 @@ def __init__( extra={"log_to_stderr": False}, ) - def _get_or_create_router(self) -> Router: - if self._router is None: - self._router = self._create_router( - handle_id=self.handle_id, - deployment_id=self.deployment_id, - handle_options=self.init_options, - ) - - return self._router - @staticmethod def _gen_handle_tag(app_name: str, deployment_name: str, handle_id: str): if app_name: @@ -150,8 +140,13 @@ def _init(self, **kwargs): f"was initialized with {self.init_options}." ) - self.init_options = create_init_handle_options(**kwargs) - self._get_or_create_router() + init_options = create_init_handle_options(**kwargs) + self._router = self._create_router( + handle_id=self.handle_id, + deployment_id=self.deployment_id, + handle_options=init_options, + ) + self.init_options = init_options # Record handle api telemetry when not in the proxy if ( @@ -209,7 +204,13 @@ def __getattr__(self, name): def shutdown(self): if self._router: - self._router.shutdown() + shutdown_future = self._router.shutdown() + shutdown_future.result() + + async def shutdown_async(self): + if self._router: + shutdown_future = self._router.shutdown() + await asyncio.wrap_future(shutdown_future) def __repr__(self): return f"{self.__class__.__name__}" f"(deployment='{self.deployment_name}')" diff --git a/python/ray/serve/tests/test_handle_2.py b/python/ray/serve/tests/test_handle_2.py index cc58f970f5b7..6b238d8211d9 100644 --- a/python/ray/serve/tests/test_handle_2.py +++ b/python/ray/serve/tests/test_handle_2.py @@ -472,5 +472,30 @@ async def _assert_one_waiter(): tasks = pending +def test_shutdown(serve_instance): + @serve.deployment + class Hi: + def __call__(self): + return "hi" + + h = serve.run(Hi.bind()) + assert h.remote().result() == "hi" + + h.shutdown() + + +@pytest.mark.asyncio +async def test_shutdown_async(serve_instance): + @serve.deployment + class Hi: + def __call__(self): + return "hi" + + h = serve.run(Hi.bind()) + assert await h.remote() == "hi" + + await h.shutdown_async() + + if __name__ == "__main__": sys.exit(pytest.main(["-v", "-s", __file__])) diff --git a/python/ray/serve/tests/test_multiplex.py b/python/ray/serve/tests/test_multiplex.py index 994605565fd5..1da243af2212 100644 --- a/python/ray/serve/tests/test_multiplex.py +++ b/python/ray/serve/tests/test_multiplex.py @@ -21,7 +21,7 @@ def _get_replica_scheduler(handle: DeploymentHandle) -> ReplicaScheduler: # TODO(edoakes): we shouldn't be reaching into private fields, but better # to isolate it to one place (this function). - return handle._get_or_create_router()._asyncio_router._replica_scheduler + return handle._router._asyncio_router._replica_scheduler @pytest.fixture() From f89aaf9ab7083dd4384804cda0f5e00b670fe2bb Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Mon, 2 Dec 2024 12:38:59 +0100 Subject: [PATCH 106/107] [RLlib] Add comments to example files for templates. (#48988) --- rllib/examples/envs/custom_gym_env.py | 8 ++++++++ rllib/tuned_examples/ppo/atari_ppo.py | 20 +++++++++++++------- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/rllib/examples/envs/custom_gym_env.py b/rllib/examples/envs/custom_gym_env.py index 01fa5ecc452f..2612575adb63 100644 --- a/rllib/examples/envs/custom_gym_env.py +++ b/rllib/examples/envs/custom_gym_env.py @@ -45,6 +45,8 @@ | 18.3034 | 28000 | 0.908918 | 12.9676 | +------------------+-------+----------+--------------------+ """ +# These tags allow extracting portions of this script on Anyscale. +# ws-template-imports-start import gymnasium as gym from gymnasium.spaces import Discrete, Box import numpy as np @@ -52,6 +54,8 @@ from typing import Optional +# ws-template-imports-end + from ray.rllib.utils.test_utils import ( add_rllib_example_script_args, run_rllib_example_script_experiment, @@ -71,6 +75,8 @@ ) +# These tags allow extracting portions of this script on Anyscale. +# ws-template-code-start class SimpleCorridor(gym.Env): """Example of a custom env in which the agent has to walk down a corridor. @@ -126,6 +132,8 @@ def step(self, action): ) +# ws-template-code-end + if __name__ == "__main__": args = parser.parse_args() diff --git a/rllib/tuned_examples/ppo/atari_ppo.py b/rllib/tuned_examples/ppo/atari_ppo.py index b4d881574f4e..c58c47898a1a 100644 --- a/rllib/tuned_examples/ppo/atari_ppo.py +++ b/rllib/tuned_examples/ppo/atari_ppo.py @@ -1,3 +1,5 @@ +# These tags allow extracting portions of this script on Anyscale. +# ws-template-imports-start import gymnasium as gym from ray import tune @@ -8,6 +10,7 @@ from ray.rllib.env.wrappers.atari_wrappers import wrap_atari_for_new_api_stack from ray.rllib.utils.test_utils import add_rllib_example_script_args +# ws-template-imports-end parser = add_rllib_example_script_args( default_reward=float("inf"), @@ -22,7 +25,12 @@ # and (if needed) use their values to set up `config` below. args = parser.parse_args() +NUM_LEARNERS = args.num_learners or 1 +ENV = args.env + +# These tags allow extracting portions of this script on Anyscale. +# ws-template-code-start def _make_env_to_module_connector(env): return FrameStackingEnvToModule(num_frames=4) @@ -35,7 +43,7 @@ def _make_learner_connector(input_observation_space, input_action_space): # We would like our frame stacking connector to do this job. def _env_creator(cfg): return wrap_atari_for_new_api_stack( - gym.make(args.env, **cfg, render_mode="rgb_array"), + gym.make(ENV, **cfg, render_mode="rgb_array"), # Perform frame-stacking through ConnectorV2 API. framestack=None, ) @@ -43,7 +51,6 @@ def _env_creator(cfg): tune.register_env("env", _env_creator) - config = ( PPOConfig() .environment( @@ -57,20 +64,19 @@ def _env_creator(cfg): clip_rewards=True, ) .env_runners( - # num_envs_per_env_runner=5, # 5 on old yaml example env_to_module_connector=_make_env_to_module_connector, ) .training( learner_connector=_make_learner_connector, - train_batch_size_per_learner=4000, # 5000 on old yaml example - minibatch_size=128, # 500 on old yaml example + train_batch_size_per_learner=4000, + minibatch_size=128, lambda_=0.95, kl_coeff=0.5, clip_param=0.1, vf_clip_param=10.0, entropy_coeff=0.01, num_epochs=10, - lr=0.00015 * (args.num_learners or 1), + lr=0.00015 * NUM_LEARNERS, grad_clip=100.0, grad_clip_by="global_norm", ) @@ -83,7 +89,7 @@ def _env_creator(cfg): ), ) ) - +# ws-template-code-end if __name__ == "__main__": from ray.rllib.utils.test_utils import run_rllib_example_script_experiment From 0e9f3d13bf59cc1d3af2b9c2d5f85ff104c2ddbd Mon Sep 17 00:00:00 2001 From: simonsays1980 Date: Mon, 2 Dec 2024 15:31:39 +0100 Subject: [PATCH 107/107] [RLlib; Offline RL] Fix small memory leak in `OfflineSingleAgentEnvRunner`. (#48309) --- rllib/BUILD | 11 ++ rllib/algorithms/algorithm_config.py | 13 ++ .../examples/offline_rl/cartpole_recording.py | 163 ++++++++++++++++++ rllib/offline/offline_env_runner.py | 41 +++-- rllib/tuned_examples/bc/cartpole_recording.py | 61 ------- 5 files changed, 217 insertions(+), 72 deletions(-) create mode 100644 rllib/examples/offline_rl/cartpole_recording.py delete mode 100644 rllib/tuned_examples/bc/cartpole_recording.py diff --git a/rllib/BUILD b/rllib/BUILD index 5886d865a052..dc3fd6830245 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -2929,6 +2929,17 @@ py_test( # subdirectory: offline_rl/ # .................................... +# Does run into scheduling problems in CI tests. Works on local +# and GCP cloud. +# py_test( +# name = "examples/offline_rl/cartpole_recording", +# main = "examples/offline_rl/cartpole_recording.py", +# tags = ["team:rllib", "examples", "exclusive"], +# size = "large", +# srcs = ["examples/offline_rl/cartpole_recording.py"], +# args = ["--enable-new-api-stack", "--as-test", "--framework=torch", "--num-cpus=12"], +# ) + py_test( name = "examples/offline_rl/train_w_bc_finetune_w_ppo", main = "examples/offline_rl/train_w_bc_finetune_w_ppo.py", diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index 051a0307367a..a9c3b5598ef5 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -468,6 +468,7 @@ def __init__(self, algo_class: Optional[type] = None): self.output_compress_columns = [Columns.OBS, Columns.NEXT_OBS] self.output_max_file_size = 64 * 1024 * 1024 self.output_max_rows_per_file = None + self.output_write_remaining_data = False self.output_write_method = "write_parquet" self.output_write_method_kwargs = {} self.output_filesystem = None @@ -2579,6 +2580,7 @@ def offline_data( output_compress_columns: Optional[List[str]] = NotProvided, output_max_file_size: Optional[float] = NotProvided, output_max_rows_per_file: Optional[int] = NotProvided, + output_write_remaining_data: Optional[bool] = NotProvided, output_write_method: Optional[str] = NotProvided, output_write_method_kwargs: Optional[Dict] = NotProvided, output_filesystem: Optional[str] = NotProvided, @@ -2748,6 +2750,15 @@ def offline_data( to a new file. output_max_rows_per_file: Max output row numbers before rolling over to a new file. + output_write_remaining_data: Determines whether any remaining data in the + recording buffers should be stored to disk. It is only applicable if + `output_max_rows_per_file` is defined. When sampling data, it is + buffered until the threshold specified by `output_max_rows_per_file` + is reached. Only complete multiples of `output_max_rows_per_file` are + written to disk, while any leftover data remains in the buffers. If a + recording session is stopped, residual data may still reside in these + buffers. Setting `output_write_remaining_data` to `True` ensures this + data is flushed to disk. By default, this attribute is set to `False`. output_write_method: Write method for the `ray.data.Dataset` to write the offline data to `output`. The default is `read_parquet` for Parquet files. See https://docs.ray.io/en/latest/data/api/input_output.html for @@ -2855,6 +2866,8 @@ def offline_data( self.output_max_file_size = output_max_file_size if output_max_rows_per_file is not NotProvided: self.output_max_rows_per_file = output_max_rows_per_file + if output_write_remaining_data is not NotProvided: + self.output_write_remaining_data = output_write_remaining_data if output_write_method is not NotProvided: self.output_write_method = output_write_method if output_write_method_kwargs is not NotProvided: diff --git a/rllib/examples/offline_rl/cartpole_recording.py b/rllib/examples/offline_rl/cartpole_recording.py new file mode 100644 index 000000000000..42258ac46fe0 --- /dev/null +++ b/rllib/examples/offline_rl/cartpole_recording.py @@ -0,0 +1,163 @@ +"""Example showing how to record expert data from a trained policy. + +This example: + - demonstrates how you can train a single-agent expert PPO Policy (RLModule) + and checkpoint it. + - shows how you can then record expert data from the trained PPO Policy to + disk during evaluation. + +How to run this script +---------------------- +`python [script file name].py --checkpoint-at-end` + +For debugging, use the following additional command line options +`--no-tune --num-env-runners=0` +which should allow you to set breakpoints anywhere in the RLlib code and +have the execution stop there for inspection and debugging. + +For logging to your WandB account, use: +`--wandb-key=[your WandB API key] --wandb-project=[some project name] +--wandb-run-name=[optional: WandB run name (within the defined project)]` + +Results to expect +----------------- +In the console output you can see that the episode return of 350.0 is reached +before the timestep stop criteria is touched. Afterwards evaluation starts and +runs 10 iterations while recording the data. The number of recorded experiences +might differ from evaluation run to evaluation run because evaluation +`EnvRunner`s sample episodes while recording timesteps and episodes contain +usually different numbers of timesteps. Note, this is different when recording +episodes - in this case each row is one episode. + ++-----------------------------+------------+----------------------+ +| Trial name | status | loc | +| | | | +|-----------------------------+------------+----------------------+ +| PPO_CartPole-v1_df83f_00000 | TERMINATED | 192.168.0.119:233661 | ++-----------------------------+------------+----------------------+ ++--------+------------------+------------------------+------------------------+ +| iter | total time (s) | num_training_step_ca | num_env_steps_sample | +| | | lls_per_iteration | d_lifetime | ++--------+------------------+------------------------+------------------------| +| 21 | 25.9162 | 1 | 84000 | ++--------+------------------+------------------------+------------------------+ + +... + +Number of experiences recorded: 26644 +""" + +import ray + +from ray.rllib.algorithms.ppo import PPOConfig +from ray.rllib.core import COMPONENT_RL_MODULE +from ray.rllib.core.columns import Columns +from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig +from ray.rllib.utils.metrics import ( + ENV_RUNNER_RESULTS, + EPISODE_RETURN_MEAN, + EVALUATION_RESULTS, + NUM_ENV_STEPS_SAMPLED_LIFETIME, +) +from ray.rllib.utils.test_utils import add_rllib_example_script_args + +parser = add_rllib_example_script_args( + default_timesteps=200000, + default_reward=350.0, +) +parser.set_defaults(checkpoint_at_end=True, max_concurrent_trials=1) +# Use `parser` to add your own custom command line options to this script +# and (if needed) use their values to set up `config` below. +args = parser.parse_args() + +config = ( + PPOConfig() + .env_runners( + num_env_runners=5, + ) + .environment("CartPole-v1") + .rl_module( + model_config=DefaultModelConfig( + fcnet_hiddens=[32], + fcnet_activation="linear", + vf_share_layers=True, + ), + ) + .training( + lr=0.0003, + num_epochs=6, + vf_loss_coeff=0.01, + ) + .evaluation( + evaluation_num_env_runners=1, + evaluation_interval=1, + evaluation_parallel_to_training=True, + evaluation_config=PPOConfig.overrides(explore=False), + ) +) + +stop = { + f"{NUM_ENV_STEPS_SAMPLED_LIFETIME}": args.stop_timesteps, + f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": ( + args.stop_reward + ), +} + + +if __name__ == "__main__": + from ray.rllib.utils.test_utils import run_rllib_example_script_experiment + + results = run_rllib_example_script_experiment(config, args, stop=stop) + + # Store the best checkpoint for recording. + best_checkpoint = results.get_best_result( + metric=f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}", + mode="max", + ).checkpoint.path + + # Configure the algorithm for offline recording. + config.offline_data( + output="local:///tmp/cartpole/", + # Store columnar (tabular) data. + output_write_episodes=False, + # Each file should hold 1,000 rows. + output_max_rows_per_file=1000, + output_write_remaining_data=True, + # LZ4-compress columns 'obs', 'new_obs', and 'actions' to + # save disk space and increase performance. Note, this means + # that you have to use `input_compress_columns` in the same + # way when using the data for training in `RLlib`. + output_compress_columns=[Columns.OBS, Columns.ACTIONS], + ) + # Change the evaluation settings to sample exactly 50 episodes + # per evaluation iteration and increase the number of evaluation + # env-runners to 5. + config.evaluation( + evaluation_num_env_runners=5, + evaluation_duration=50, + evaluation_duration_unit="episodes", + evaluation_interval=1, + evaluation_parallel_to_training=False, + evaluation_config=PPOConfig.overrides(explore=False), + ) + + # Build the algorithm for evaluation. + algo = config.build() + # Load the checkpoint stored above. + algo.restore_from_path( + best_checkpoint, + component=COMPONENT_RL_MODULE, + ) + + # Evaluate over 10 iterations and record the data. + for i in range(10): + print(f"Iteration: {i + 1}:\n") + res = algo.evaluate() + print(res) + + # Stop the algorithm. + algo.stop() + + # Check the number of rows in the dataset. + ds = ray.data.read_parquet("local:///tmp/cartpole") + print(f"Number of experiences recorded: {ds.count()}") diff --git a/rllib/offline/offline_env_runner.py b/rllib/offline/offline_env_runner.py index 9da38b60bd6a..a0fee273c007 100644 --- a/rllib/offline/offline_env_runner.py +++ b/rllib/offline/offline_env_runner.py @@ -29,6 +29,13 @@ def __init__(self, config: AlgorithmConfig, **kwargs): # Initialize the parent. super().__init__(config, **kwargs) + # Get the data context for this `EnvRunner`. + data_context = ray.data.DataContext.get_current() + # Limit the resources for Ray Data to the CPUs given to this `EnvRunner`. + data_context.execution_options.resource_limits.cpu = ( + config.num_cpus_per_env_runner + ) + # Set the output write method. self.output_write_method = self.config.output_write_method self.output_write_method_kwargs = self.config.output_write_method_kwargs @@ -92,6 +99,10 @@ def __init__(self, config: AlgorithmConfig, **kwargs): else: self.write_data_this_iter = True + # If the remaining data should be stored. Note, this is only + # relevant in case `output_max_rows_per_file` is defined. + self.write_remaining_data = self.config.output_write_remaining_data + # Counts how often `sample` is called to define the output path for # each file. self._sample_counter = 0 @@ -155,15 +166,18 @@ def sample( if self.output_max_rows_per_file: # Reset the event. self.write_data_this_iter = False - - # Extract the number of samples to be written to disk this iteration. - samples_to_write = self._samples[: self.output_max_rows_per_file] - # Reset the buffer to the remaining data. This only makes sense, if - # `rollout_fragment_length` is smaller `output_max_rows_per_file` or - # a 2 x `output_max_rows_per_file`. - # TODO (simon): Find a better way to write these data. - self._samples = self._samples[self.output_max_rows_per_file :] - samples_ds = ray.data.from_items(samples_to_write) + # Ensure that all data ready to be written is released from + # the buffer. Note, this is important in case we have many + # episodes sampled and a relatively small `output_max_rows_per_file`. + while len(self._samples) >= self.output_max_rows_per_file: + # Extract the number of samples to be written to disk this + # iteration. + samples_to_write = self._samples[: self.output_max_rows_per_file] + # Reset the buffer to the remaining data. This only makes sense, if + # `rollout_fragment_length` is smaller `output_max_rows_per_file` or + # a 2 x `output_max_rows_per_file`. + self._samples = self._samples[self.output_max_rows_per_file :] + samples_ds = ray.data.from_items(samples_to_write) # Otherwise, write the complete data. else: samples_ds = ray.data.from_items(self._samples) @@ -183,6 +197,11 @@ def sample( except Exception as e: logger.error(e) + self.metrics.log_value( + key="recording_buffer_size", + value=len(self._samples), + ) + # Finally return the samples as usual. return samples @@ -196,11 +215,11 @@ def stop(self) -> None: """ # If there are samples left over we have to write htem to disk. them # to a dataset. - if self._samples: + if self._samples and self.write_remaining_data: # Convert them to a `ray.data.Dataset`. samples_ds = ray.data.from_items(self._samples) # Increase the sample counter for the folder/file name. - self._sample_counter += 1.0 + self._sample_counter += 1 # Try to write the dataset to disk/cloud storage. try: # Setup the path for writing data. Each run will be written to diff --git a/rllib/tuned_examples/bc/cartpole_recording.py b/rllib/tuned_examples/bc/cartpole_recording.py deleted file mode 100644 index a75cb31a9228..000000000000 --- a/rllib/tuned_examples/bc/cartpole_recording.py +++ /dev/null @@ -1,61 +0,0 @@ -from ray.rllib.algorithms.ppo import PPOConfig -from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig -from ray.rllib.utils.metrics import ( - ENV_RUNNER_RESULTS, - EPISODE_RETURN_MEAN, - EVALUATION_RESULTS, - NUM_ENV_STEPS_SAMPLED_LIFETIME, -) -from ray.rllib.utils.test_utils import add_rllib_example_script_args - -parser = add_rllib_example_script_args() -# Use `parser` to add your own custom command line options to this script -# and (if needed) use their values to set up `config` below. -args = parser.parse_args() - -config = ( - PPOConfig() - .env_runners( - rollout_fragment_length=1000, num_env_runners=0, batch_mode="truncate_episodes" - ) - .environment("CartPole-v1") - .rl_module( - model_config=DefaultModelConfig( - fcnet_hiddens=[32], - fcnet_activation="linear", - vf_share_layers=True, - ), - ) - .training( - lr=0.0003, - num_epochs=6, - vf_loss_coeff=0.01, - ) - .evaluation( - evaluation_num_env_runners=1, - evaluation_interval=1, - evaluation_parallel_to_training=True, - evaluation_config=PPOConfig.overrides(exploration=False), - ) - .offline_data( - output="local:///tmp/cartpole/", - output_write_episodes=False, - output_max_rows_per_file=1000, - # LZ4-compress columns 'obs', 'new_obs', and 'actions' to - # save disk space and increase performance. Note, this means - # that you have to use `input_compress_columns` in the same - # way when using the data for training in `RLlib`. - output_compress_columns=["obs", "new_obs", "actions"], - ) -) - -stop = { - f"{NUM_ENV_STEPS_SAMPLED_LIFETIME}": 200000, - f"{EVALUATION_RESULTS}/{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": 350.0, -} - - -if __name__ == "__main__": - from ray.rllib.utils.test_utils import run_rllib_example_script_experiment - - run_rllib_example_script_experiment(config, args, stop=stop)