From 5b9a16a3870fd6efeb48d1383396d7167f15f99b Mon Sep 17 00:00:00 2001 From: Klaus Satzke Date: Fri, 10 Jul 2020 14:38:04 +0000 Subject: [PATCH 01/64] adding GPU support, adding test cases, adding NVIDIA runtime support --- ManagementService/python/deployWorkflow.py | 3 +- Sandbox/Dockerfile | 20 ++++- Sandbox/Dockerfile.cpu | 57 +++++++++++++ Sandbox/Dockerfile.gpu | 75 ++++++++++++++++++ deploy/ansible/Makefile | 2 +- deploy/ansible/installnvidiadocker.yaml | 10 +++ .../ansible/scripts/install-nvidia-docker.sh | 22 +++++ tests/asl_DLIB/python/dlibtest.py | 24 ++++++ tests/asl_DLIB/python/requirements.txt | 1 + tests/asl_DLIB/settings.json | 4 + tests/asl_DLIB/test.py | 39 +++++++++ tests/asl_DLIB/workflow_dlib_test.json | 11 +++ tests/asl_Face_Recognition/settings.json | 4 + tests/asl_Face_Recognition/test.py | 40 ++++++++++ .../workflow_face_recognition_test.json | 11 +++ tests/asl_Face_Recognition/zips/facer.zip | Bin 0 -> 97352 bytes .../python/requirements.txt | 1 + .../python/tensorf.py | 27 +++++++ tests/asl_Tensorflow_HelloWorld/settings.json | 4 + tests/asl_Tensorflow_HelloWorld/test.py | 41 ++++++++++ .../workflow_tensorflow_test.json | 11 +++ tests/settings.json | 10 +++ 22 files changed, 414 insertions(+), 3 deletions(-) create mode 100644 Sandbox/Dockerfile.cpu create mode 100644 Sandbox/Dockerfile.gpu create mode 100644 deploy/ansible/installnvidiadocker.yaml create mode 100755 deploy/ansible/scripts/install-nvidia-docker.sh create mode 100644 tests/asl_DLIB/python/dlibtest.py create mode 100644 tests/asl_DLIB/python/requirements.txt create mode 100644 tests/asl_DLIB/settings.json create mode 100644 tests/asl_DLIB/test.py create mode 100644 tests/asl_DLIB/workflow_dlib_test.json create mode 100644 tests/asl_Face_Recognition/settings.json create mode 100644 tests/asl_Face_Recognition/test.py create mode 100644 tests/asl_Face_Recognition/workflow_face_recognition_test.json create mode 100644 tests/asl_Face_Recognition/zips/facer.zip create mode 100644 tests/asl_Tensorflow_HelloWorld/python/requirements.txt create mode 100644 tests/asl_Tensorflow_HelloWorld/python/tensorf.py create mode 100644 tests/asl_Tensorflow_HelloWorld/settings.json create mode 100644 tests/asl_Tensorflow_HelloWorld/test.py create mode 100644 tests/asl_Tensorflow_HelloWorld/workflow_tensorflow_test.json create mode 100644 tests/settings.json diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index f61c704b..7e2484cb 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -202,7 +202,8 @@ def start_docker_sandbox(host_to_deploy, uid, sid, wid, wname, sandbox_image_nam try: print("Starting sandbox docker container for: " + uid + " " + sid + " " + wid + " " + sandbox_image_name) print("Docker daemon: " + "tcp://" + host_to_deploy[1] + ":2375" + ", environment variables: " + str(env_vars)) - client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc) + client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc, runtime="nvidia") + #client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc) # TEST/DEVELOPMENT: no auto_remove to access sandbox logs #client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc) except Exception as exc: diff --git a/Sandbox/Dockerfile b/Sandbox/Dockerfile index 54c74044..52ca2ef1 100644 --- a/Sandbox/Dockerfile +++ b/Sandbox/Dockerfile @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM ubuntu:18.04 +#FROM ubuntu:18.04 +FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 # Install (as root) # Base @@ -20,6 +21,10 @@ RUN apt-get update --fix-missing RUN apt-get -y --no-install-recommends install build-essential RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 +# CUDA 10.1 dependencies and tools to build dlib +RUN apt-get -y --no-install-recommends install libsm6 libxrender1 libxrender-dev libxext6 libglib2.0-0 git cmake +RUN apt-get install -y --no-install-recommends libnvinfer6=6.0.1-1+cuda10.1 libnvinfer-dev=6.0.1-1+cuda10.1 libnvinfer-plugin6=6.0.1-1+cuda10.1 + # Python RUN apt-get -y --no-install-recommends install python3 python3-dev RUN apt-get -y --no-install-recommends install python3-pip @@ -38,6 +43,19 @@ RUN /usr/bin/python3 -m pip install fastcache # Needed for multi-language support (currently just Java) RUN /usr/bin/python3 -m pip install thriftpy2 +# Install dlib for CUDA +RUN git clone https://github.com/davisking/dlib.git +RUN mkdir -p /dlib/build + +RUN cmake -H/dlib -B/dlib/build -DDLIB_USE_CUDA=1 -DUSE_AVX_INSTRUCTIONS=1 +RUN cmake --build /dlib/build + +RUN cd /dlib; python3 /dlib/setup.py install + +# Install the face recognition package and tensorflow +RUN pip3 install face_recognition +RUN pip3 install tensorflow==2.1.0 + # Java (for queue service) RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless diff --git a/Sandbox/Dockerfile.cpu b/Sandbox/Dockerfile.cpu new file mode 100644 index 00000000..54c74044 --- /dev/null +++ b/Sandbox/Dockerfile.cpu @@ -0,0 +1,57 @@ +# Copyright 2020 The KNIX Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:18.04 + +# Install (as root) +# Base +RUN apt-get update --fix-missing +RUN apt-get -y --no-install-recommends install build-essential +RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 + +# Python +RUN apt-get -y --no-install-recommends install python3 python3-dev +RUN apt-get -y --no-install-recommends install python3-pip +RUN apt-get -y --no-install-recommends install zlib1g libssl1.0 libsasl2-2 ca-certificates + +RUN /usr/bin/python3 -m pip install --upgrade pip + +RUN /usr/bin/python3 -m pip install setuptools +RUN /usr/bin/python3 -m pip install thrift>=0.12.0 +RUN /usr/bin/python3 -m pip install anytree +RUN /usr/bin/python3 -m pip install ujsonpath +RUN /usr/bin/python3 -m pip install requests +RUN /usr/bin/python3 -m pip install retry +# remove warnings from anytree package +RUN /usr/bin/python3 -m pip install fastcache +# Needed for multi-language support (currently just Java) +RUN /usr/bin/python3 -m pip install thriftpy2 + +# Java (for queue service) +RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless + +# Add components (as mfn) +RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn +RUN mkdir /opt/mfn/logs + +COPY build/queueservice.jar /opt/mfn/ +ADD frontend/frontend /opt/mfn/frontend +ADD build/SandboxAgent.tar.gz /opt/mfn/ +ADD build/FunctionWorker.tar.gz /opt/mfn/ +ADD build/LoggingService.tar.gz /opt/mfn/ + +RUN chown mfn:mfn -R /opt/mfn +USER mfn +WORKDIR /opt/mfn +CMD ["python3", "/opt/mfn/SandboxAgent/sandboxagent.py"] diff --git a/Sandbox/Dockerfile.gpu b/Sandbox/Dockerfile.gpu new file mode 100644 index 00000000..52ca2ef1 --- /dev/null +++ b/Sandbox/Dockerfile.gpu @@ -0,0 +1,75 @@ +# Copyright 2020 The KNIX Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#FROM ubuntu:18.04 +FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 + +# Install (as root) +# Base +RUN apt-get update --fix-missing +RUN apt-get -y --no-install-recommends install build-essential +RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 + +# CUDA 10.1 dependencies and tools to build dlib +RUN apt-get -y --no-install-recommends install libsm6 libxrender1 libxrender-dev libxext6 libglib2.0-0 git cmake +RUN apt-get install -y --no-install-recommends libnvinfer6=6.0.1-1+cuda10.1 libnvinfer-dev=6.0.1-1+cuda10.1 libnvinfer-plugin6=6.0.1-1+cuda10.1 + +# Python +RUN apt-get -y --no-install-recommends install python3 python3-dev +RUN apt-get -y --no-install-recommends install python3-pip +RUN apt-get -y --no-install-recommends install zlib1g libssl1.0 libsasl2-2 ca-certificates + +RUN /usr/bin/python3 -m pip install --upgrade pip + +RUN /usr/bin/python3 -m pip install setuptools +RUN /usr/bin/python3 -m pip install thrift>=0.12.0 +RUN /usr/bin/python3 -m pip install anytree +RUN /usr/bin/python3 -m pip install ujsonpath +RUN /usr/bin/python3 -m pip install requests +RUN /usr/bin/python3 -m pip install retry +# remove warnings from anytree package +RUN /usr/bin/python3 -m pip install fastcache +# Needed for multi-language support (currently just Java) +RUN /usr/bin/python3 -m pip install thriftpy2 + +# Install dlib for CUDA +RUN git clone https://github.com/davisking/dlib.git +RUN mkdir -p /dlib/build + +RUN cmake -H/dlib -B/dlib/build -DDLIB_USE_CUDA=1 -DUSE_AVX_INSTRUCTIONS=1 +RUN cmake --build /dlib/build + +RUN cd /dlib; python3 /dlib/setup.py install + +# Install the face recognition package and tensorflow +RUN pip3 install face_recognition +RUN pip3 install tensorflow==2.1.0 + +# Java (for queue service) +RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless + +# Add components (as mfn) +RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn +RUN mkdir /opt/mfn/logs + +COPY build/queueservice.jar /opt/mfn/ +ADD frontend/frontend /opt/mfn/frontend +ADD build/SandboxAgent.tar.gz /opt/mfn/ +ADD build/FunctionWorker.tar.gz /opt/mfn/ +ADD build/LoggingService.tar.gz /opt/mfn/ + +RUN chown mfn:mfn -R /opt/mfn +USER mfn +WORKDIR /opt/mfn +CMD ["python3", "/opt/mfn/SandboxAgent/sandboxagent.py"] diff --git a/deploy/ansible/Makefile b/deploy/ansible/Makefile index afe8d1e2..f857a362 100644 --- a/deploy/ansible/Makefile +++ b/deploy/ansible/Makefile @@ -21,7 +21,7 @@ NAMES := $(YAML:%.yaml=%) .PHONY: $(NAMES) default: prepare_packages install -install: init_once riak elasticsearch fluentbit datalayer frontend sandbox management nginx +install: init_once installnvidiadocker riak elasticsearch fluentbit datalayer frontend sandbox management nginx echo "Installed KNIX MicroFunctions" clean: diff --git a/deploy/ansible/installnvidiadocker.yaml b/deploy/ansible/installnvidiadocker.yaml new file mode 100644 index 00000000..865da857 --- /dev/null +++ b/deploy/ansible/installnvidiadocker.yaml @@ -0,0 +1,10 @@ +--- +- hosts: all + vars: + proxy_env: + http_proxy: "{{ lookup('env','http_proxy') }}" + https_proxy: "{{ lookup('env','https_proxy') }}" + become: true + roles: + - ../.. + diff --git a/deploy/ansible/scripts/install-nvidia-docker.sh b/deploy/ansible/scripts/install-nvidia-docker.sh new file mode 100755 index 00000000..7149ecf8 --- /dev/null +++ b/deploy/ansible/scripts/install-nvidia-docker.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2020 The KNIX Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +git clone https://github.com/NVIDIA/ansible-role-nvidia-docker.git ansible-role-nvidia-docker +cd ansible-role-nvidia-docker +cp ../installnvidiadocker.yaml tests/installnvidiadocker.yaml +ansible-playbook --inventory ../inventory.cfg tests/installnvidiadocker.yaml +cd .. +rm -r -f ansible-role-nvidia-docker + diff --git a/tests/asl_DLIB/python/dlibtest.py b/tests/asl_DLIB/python/dlibtest.py new file mode 100644 index 00000000..97115360 --- /dev/null +++ b/tests/asl_DLIB/python/dlibtest.py @@ -0,0 +1,24 @@ +# Copyright 2020 The KNIX Authors + +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#import json + +def handle(event, context): + import dlib + # Simple hello world using TensorFlow + + #return "Hello from Tensorflow " + str(tf.__version__) + #return "GPU available: " + str(tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None)) + return "GPU available: " + str(dlib.DLIB_USE_CUDA) + diff --git a/tests/asl_DLIB/python/requirements.txt b/tests/asl_DLIB/python/requirements.txt new file mode 100644 index 00000000..0f571440 --- /dev/null +++ b/tests/asl_DLIB/python/requirements.txt @@ -0,0 +1 @@ +tensorflow diff --git a/tests/asl_DLIB/settings.json b/tests/asl_DLIB/settings.json new file mode 100644 index 00000000..02108534 --- /dev/null +++ b/tests/asl_DLIB/settings.json @@ -0,0 +1,4 @@ +{ + "workflow_name": "__dlib_", + "workflow_description_file": "workflow_dlib_test.json" +} diff --git a/tests/asl_DLIB/test.py b/tests/asl_DLIB/test.py new file mode 100644 index 00000000..281da20d --- /dev/null +++ b/tests/asl_DLIB/test.py @@ -0,0 +1,39 @@ +# Copyright 2020 The KNIX Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import os, sys +import json + +sys.path.append("../") +from mfn_test_utils import MFNTest + +class DlibTest(unittest.TestCase): + + """ Example ASL state test with Dlib + + """ + def test_dlib(self): + """ testing dlib """ + + inp1 = '"abc"' + #res1 = '"Hello from Tensorflow 2.1.0"' + + res1 = '"GPU available: True"' + + testtuplelist =[(inp1, res1)] + + test = MFNTest(test_name = "Dlib__Test") + test.exec_tests(testtuplelist) + diff --git a/tests/asl_DLIB/workflow_dlib_test.json b/tests/asl_DLIB/workflow_dlib_test.json new file mode 100644 index 00000000..6dadd070 --- /dev/null +++ b/tests/asl_DLIB/workflow_dlib_test.json @@ -0,0 +1,11 @@ +{ + "Comment": "Dlib State Machine Example", + "StartAt": "dlibtest", + "States": { + "dlibtest":{ + "Resource":"dlibtest", + "Type":"Task", + "End":true + } + } +} diff --git a/tests/asl_Face_Recognition/settings.json b/tests/asl_Face_Recognition/settings.json new file mode 100644 index 00000000..5934407d --- /dev/null +++ b/tests/asl_Face_Recognition/settings.json @@ -0,0 +1,4 @@ +{ + "workflow_name": "__face_recognition_", + "workflow_description_file": "workflow_face_recognition_test.json" +} diff --git a/tests/asl_Face_Recognition/test.py b/tests/asl_Face_Recognition/test.py new file mode 100644 index 00000000..4caf93c8 --- /dev/null +++ b/tests/asl_Face_Recognition/test.py @@ -0,0 +1,40 @@ +# Copyright 2020 The KNIX Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import os, sys +import json +#import time + +sys.path.append("../") +from mfn_test_utils import MFNTest + +class FaceRecognitionTest(unittest.TestCase): + + """ Example ASL state test with face_recognition + + """ + def test_tensorflow(self): + """ testing face_recognition """ + + inp1 = '"abc"' + + #res1 = '"1.2.3"' # version number + res1 = '"[(68, 497, 175, 390)]"' + testtuplelist =[(inp1, res1)] + + test = MFNTest(test_name = "FaceRecognition__Test") + #time.sleep(10) # wait for deployment + test.exec_tests(testtuplelist) + diff --git a/tests/asl_Face_Recognition/workflow_face_recognition_test.json b/tests/asl_Face_Recognition/workflow_face_recognition_test.json new file mode 100644 index 00000000..88d3318b --- /dev/null +++ b/tests/asl_Face_Recognition/workflow_face_recognition_test.json @@ -0,0 +1,11 @@ +{ + "Comment": "Tensorflow State Machine Example", + "StartAt": "tensorf", + "States": { + "tensorf":{ + "Resource":"facer", + "Type":"Task", + "End":true + } + } +} diff --git a/tests/asl_Face_Recognition/zips/facer.zip b/tests/asl_Face_Recognition/zips/facer.zip new file mode 100644 index 0000000000000000000000000000000000000000..aca9d5b9a8e3b514369f478cfb926512e5694822 GIT binary patch literal 97352 zcmV)EK)}CHO9KQH00ICA0Q*tNQ2m}s`)UFJ0D=Sn00;ma0A^uhWpXZXc~w*i00UWJ z-(Fc^-(Gci3jhHG>H`1(1nL6-0F6{lbDJ;_z4I%!_)wU-kjXKf9B4i!Ogi?Aahm4D z$YKGdAW=z#_}_OW)6|~yP+u5n-@f9@3es+Ea+ZN$|mH&Wjw5`%6 z2o}MDVBZ8sX|RG(R;a-eEE^c7&sb>=!Ge&$RX=osNscfH+j^myCh;UsGl|++MtDXY1D)z)BrrSMcE8rz;UZ0^ zX-b&4oG9I--2rOSZ~4{Ds<`<~w&$h?*`YC@@z*FcjlA0f8^{%BJM!AG36uubXr!7| z&h?~)6|z}^QMH|zM$c0TW3||g_9V7H9%=St00qY+gjoT35yIE3$cv?CdCa$W>xV5o zW}8j6+U7R}tT%AAUR~$ge7&O6Eo7@FxX)MDOF#;Zgz$CHZU!8Ow{UN z$_&UwEjUdln~^mLje0>XX($*__rkc<8nReJZ=Gnix#29u2V26-O-wj zOYD|$EH}tiBZc#LtWvw0ephpmU%o3Qonlq#t6n?`Q#IJMuVqSlKkH6Ssi8g4Gp10D z(sk-6bgFi&XUX%>gs~U!#GT^v%Ko8riio^s>{@-lMmuT=^jxD;V7z#{J7z1(m&3n- zi_51qGlX|OEtk*-+W&7*O9KQH00ICA0D(=(Py`7NKKpk80E>|U00{sb0B>SpZDB5I zaA#Fi2>=7)S>IksTi;%Fcnbgl1nL6-00im-008X$WmFtN`z?-6gb*M=aEIXT?!jfy z!9B>}?oNV)AcGDvXmEG;;O>K4aCi5c_x#VbFZaH`weGij_F6Nmx~o@J*R1ZnpWRbW z_3QlWI+BW|tE-~`E33T=i?Nx537Eyy!H(6_*pZc;g^d*;BzlUc`K=#dfS-tn^B00d=c^#@U(NZ1G^f3@wBtG zcMV^o4_sorjf;pOu}9nT=h5jbDI`{mcIb#lPO1&CCTrlG6XXuYX&@6#sir9v&Vn z9-J%=&K9if{QUf^Y#gi{9L)cYV0Q7ccQy88ws)cYzqcR>b}@Cfa&)zFu>bO(ZDSJ$ zH&fmDO;P{1;g$=+bE~ogO8t@-{hLc1tL<6ed?|(;1KDrb4e_a9u1Kz&*5B`0> zefRd?=iS?Xc=x}7@b3LTyhA`h`0(Ka0smim01Utzj5lvF-n{kzrT}jM z0K|Xi3;2KF-Ft)&h#!&OBL8Ra8-)M2y7&JE^uKC91Kzy-zee)@0|F8PBEr84z*FiW`Tqe%d=O1z7xc{}BP2Dy1x(N96?%!}R-eLem0r#(f@1NfSzJ7cI`11b$ zQ;{esA>z{p5(3&@BH2rv>CYzLXPwg&m%D&{BS{BES_}g9zxy2RTHH4i#1wnTntxcI zXmgd0;3DVok9s6R}CZIr7%l(j@6UdKHb zznfqZp-0d5E9C7Q>w@oM9>?Zj7C}>%F>i3&bg70K zoL-)XV)%T#i?5lb+|%wp)lbnmX8*IhxWkbgiky?bbWWzgr>dQyU5U*T=h9`9NPS#jJf^$H1hxS}X<2s0yhv5RtUC`fj`s1ytp`P8Un z!?e3tzCobe;aj#Ldnb3?;n^WplaANRl1rj=gOk;jM9E{`#=P(Td7JC)oyr6x&~E!) zM#FG7gpi0)3gqb<=F6*8rnz@4SJaa~T!k7PYSYJ{HReNUp(hPtxny^DI2)yvFB#o% zUs5SnC_ig)i;`j!==a_af|JJ6HXhvw|lU$8;zGU2Lbz1AKr8bg8>67c8v8!CrjW{xb zvmBCZ-1>=TDM>cjeh60$(nrii~aK0BISTg(KU#~0#xRFX#vPu{20k)05*<0uKPbX~!?R>;dLGT*OU+j?&n z94lA9uuE#_a2eK`E6Hd4;1*@A|TXhYH%MuYSU$ch_Ilwr$ ziSa0vJg>Tdf9?`B<&<1NoAxoaSGa=FXcw?@_;ZkV(F(?PCZxobYquY;v?15RlmZq@F;vOTxJ|plzeurjL4m>6a4?3UH=sWUMK#0 zGKM4}N|~oAt$8pzyaMPkx|iGiOK)4y9A5!>ei#$Px!plnRUt-JAD!)rf&tT+DD`A- z?xSKeYi2wdx$IhiVoK8@A9kEPj_QYf_Xe%qS$e|s>WYxe*OJ_NafA{?ZEA%PZlblP z>#UZJNfquZ3{sm58=Hv_|J-pumG|BSG2`Yn(_Nw@pnY~H0bxte)#Vi21Vyyb;0V-R z2^g7aw2dC^-zVn?G|x;8BLmth^VRQi+>`E3gshbODf>APl68=sUCHy?lz zSb`&YS4&^7j#oeXB(+P01lp$QhoG;$IJwUDAO~Q?y9^qhLpj5Uo~PQ4J~7>-P+R*< zn4|r*dgf%|@u)c@Gf+)Ax3_lnNX~I~YVK|%&zT+MxrXQ(Y!=0xWQR#>dwkM+iMW^6 zaiLP%GG3gL?Tvajr!Cq?MGXZ>tL9KWr;LWW2I20S@Svq^E7^9)4}>u&KsUbHE9R%X z+d!T`DBC9)73^19FuoE|IA)H1yt>(?(K}WtV06weVK16tN-*el&hz>~4Bwkc^IT$+ z5*LO8iP0`2keM)}W9xEoM5wVi^CK!)tBa#`b+EeSi0Dvk-`g=UC%9e=G;kZRNG&mk z6wR|wGIW7zty}E%T=q4`1&WjM6Wz4_jC~4-&1X%jY3V&xa8x|puN<5VDo7gH&%@m z|7pO}B0LJ8cEQ{*GjdBiKH0OfGkK?eV?`~6Yd#k;wqoa(El>oBpU+WR^BE2))f;$cx#N1E?BzPx(%jz#uKhM-7B9v69iTKz8apALkE*ao zBTEQmgxO)rHKPX?u5NjK94Wy_f7f*}l*+Ly@=fRKiWcvK)R-4T*`WLlx&df8~KjD6!ZH;ux2cUIvV|Dx6~gBMdw1vH$uMr_hG{+;@%Jj0E9=G30|YWr$t}#PVj4r3x(fh>2L3|OgZ%1{T{ukS zWvf5k{ZZCIA!BkVTHn9o>m|c>nO>J)!WGqw4MJ`Hp8a8s?(FX0#blEQ+Zrau7%^zj zJ9QY__%bWKrsNav`?WBy`M1_1x4%|oEr0f?*gB^HhvN9vQrPjBX!#^XQ`WRsjfjZ_ z*Z6cu{Z#zbFI5>u=kqT8x?XD6z}(}%YZu|PO+I%(M5KgIy3v;r|2R6M2j5p(VGNHO z%(dZjk@O01?PYG~Qh;kMbG|tf=JQ|hEAIk-TiV5Rz$M6?H0;~kq{jK{9>+7EJhSOi z7Jww%z2!f;FYG0A6+R5hNS*J|=q$kH(^wH-OOuNjViO)kv9k-^R-+q2mDX5KB)iJ#?#=|ofI6#>lKdGBe3N&I1+LwFn-Dst*sd?&-=UkA-^mufX*!3ue zMig8mi-0ZE3U11%Ls!>z4Ut@T2S78~^++u*=)*4NKi)gzi%Yd~Q_!tX;lN&S#wu+7 z4W;ibp0zCuJ|NQn;ki$*R~##^4>8pBA*@JWof;8KR+Lu-Lu<>yxBJdlFlpRg3>VBp zM75e>c))@57{cLqoL#HnB@-e`awh5R;Emm&sj9)U+VBplSR8ZDj zjU=zd{pBGhbU%u#TArUyhGS5^^fzApc_Q<0c0cQG5IZnLh(L@#!k{}I8@4=PE$^Oq zbkw4(APb~#fCzvqn*4HuzYoak(QLQLgY0?mcP^tBld-ZWR(v~7mcK6hkDMWv+lB<iQTQ)Fq}4 zm3HPSj>18U+q)JtQ-2E?z%2+L7A>6zfZ?cxH;t>e7`_KZQl&8k`T376L?xXkLf!b$ z?>JRrT=CKbR-vAGO)x3`Mtg9{O5O2U`UXq7To)>rWQ#B7^00=qeug|WBF6WNx8$H$ z?1h5LKF0_Z{c)nuMy!|&mDM0lvr=%NFsqH!Ajb)2%ARoniZbf)#Hn3&oax8yxw>`5 zh7{~c)^G7FFN2G#?8Gvb9%W<_djV#dXLZ7=zncp0H88%plCx*YUYp;r&+1SiUyQ>< z(>D_MW9Xm2=wZ$|D$wZ(0iI(_x-y7)@g&IynO~g1k(SzxU9+W5xnY{LcD^?LY z>m+{(M|t1-Z8EL08^u_JKzX@jU2CTz z45BuHVJ~Wa{Ge7?9A}`?;uusF68ah6caS47Vjo5=6tUu)I78_IOGk@OK zYNhoTFZ)(4nEyy7L4-Ei-9ETdkoj1wKP<6mn&R2=c4s*ven9AOq_mcs})q<7hx*;x<$!){+T>18@=4&)>F zu2{#A&0Rc#;)WExuNHHc|J-qKb{(A4baO z@{UG3xBAph4|9MVaLuyQ;7asab+kDo_z=0X&+pS0^9vC*Xjrwh0((l=@jQdl%CZf; zI6n<>u!5;89@$0-V+w(H^g_K|n;`*5<% z5b88TFlhXpLE>IbVa0)Yn6w1*$AV1{ z@9@-YCg=(qar9SUKV>bXlOfBma^UsG5pLPt3>{i*_ zzs((>Z_Kh4P{Xz6)BO$o7J-b`x~Ys@f87SXG&M8Z0f7a$V((LF?Ka|yv+p#`>xw4QV1*lU5S#|;sfW)G&`q^CITRNVw&E2%eRW(_D~KDOaq7IUYJ+eS#&AOZ z8#y1dyh`{~xq%PLrcsv8u?|M&#QU%qVFj4CqO|F=Qg__Ch(u2DV>8kT(A3mqNsXKX zIct4XO4LtP!*uv9r`+DP>=m5Fpcvc^P53OG+hjbk8e0LW)?;*Um$oyh`f(}xcKVX_ zy@Gs*>YCe(T+2vO1|vKSH!e3&oxqb>o`I@DV+w>G&YCvWt>0m!e4$+=JZ&^Ty3Bu$ z0>kIU9#LB?tsj}ewKvApkXq^c{5bH|P`bEg!!Nh=A}C8Ys;6X}Ti{r@>DkX$=-14# zDP7}myZ|sZO;5#z6PaobtR*o*kB3LCpEY);557@X(KjpEG#<9L`<5GZ}~hz+gji2)QTdC z@U$_MHsG|Qw=7c%yAQaG(C!Hrf@l_;v#MF}iF^9@%0*M5NFqOtg(}>s`Z`Ij6X|EX8ftg$YTDOCF? z!L@!V3*;1j-xp0 z#Ey{7Ms3QowH`!Zf1b`^f?dupTTkg0;IT#x>pNWzi3M8mzWGhk>FR$9e0+r*JVVle z$%Mf(#l0_F%4%Zz+wXS*pQAD~2|lwAy5-)~6xw(NNT}WY9#RO1=c{&kyJHp>+lyHw zu+*MgMlK~VQNB+l6MU$iAPCDFY|S}fW_iI&JS)PRC^|D@(=91uxce35-$9u_{X)^u zvvI{ORS&(>TjB-hNiVi-6)Xjx1?!cEyRjP< z&6Z|v(I@80KaoyGT7Z;RuR3OF42e%7U-EA>xfK4oX5`3;c5$u2-6PiNJYB7RW4jS* zOtJi5dew{TfRno9hWb*)f^JI5Ar#?}Q!R8Ae{mVXA(g~w)#yK>FDTVHqINJyJn@~i z{uPksx@fQ%BE-3yv9>sTtXP5h^is_x9#C63FmHG(OT$xqZ%TK$uj*j#|LO40?n$N@ zkq3h^)UfXTq;fUim9WC@i;+l$lUHr?Z`R*syp>1x3bK%0GSH0WoP?oE4J48q*0{ID z0&wL_7?U62Hu7soHRgVcn&uRim1oCIZ$hlXqsr4r)brqCW$Y3#{*>%L7C$WAxYO=5 zvcoNJiu3Jzy%~l;xCoXMapRDM8Qm5FPW@0=Yj<#-(C`6KU-iMU*wO6TIIOv*YICP1 zjs;;rADpZaN?15#-962qzr58Q_0#;*QK$T}F&Y&P6C0E~wb1iB)#?UUlHczFX+9vw z?vwe$cugH6kYkbNYv&WH7Tx8g2JO32`Nys#N@kA3l16zX_`TxK;LjN#WcRus5Mlf2 zCXvSTXUHh&$XPjzN$BLpd$*n~pV{>x=nXT+PZGc2=4qvU&8OJ`ZEZD8#oln)rB;QU z#UY?Qu4K`IeCtKEE}u`J{xcUZjYSMRiV7%|tDxjUq!!7Qhai;qgfo z){;dA=JGsv3$LDk2p)=KZweQ~vlhz%msO~RId#6uipF-#QydLG$g4D%T23>z{kB<- zXNF+5NdoB~$%y3Y>}gEfAYs9eQcE>xi}1}8#QhVrV#q9b>0lm`;g|&S=^)~-@_|U? zW`fSx6)TGz%9`16P;foJS0ZSVaTkqIgc>cUyWV5&oLfl>MsRy}so2~kuinzR19Qv$ z02&!0s#aeo3L$CTGBqr}aBzU__Z!48u+aH3farP-f$_LW=y2vfRYO&TA*fB4L>e-VC)SC2AE2tP4~ zpOyLH5c-?QaSrA2f4N-lsZqA82j^w%9?F5jYwN%`R+_uRrt%%Q`>iPv5$Y|~I)>(E z!w(-=#|(W(AmDvlA9-?a22db2J}AD3Iu8P0sc-&H71SULlV8oVzp^HNKDKs`MoV$> zlOL5}x{FOo(jeFH${Q-&lYgu7hSVqjJo9Ld*)-Zzvkpc5^5mAjdYQ?BDhS01N3JiX zCE*Kpj2zLtF1w6EY2viGN7+>BY6>@R_$QKImFDcu>AzOhriaC$xF-jdJW+_+4e1QW za@(02KEka9PT$gSF$1c~o6<_uW`+K&O>xUOP>c>;S`U3+UXJyw_8n*gTKp&<6&1#R+_J`BNeJM=`h*R8!$G~Wo8S77{uK>A#+;`F} zf9yY;CumS*MwFT*QE(^qjmSm$>Pu4hT%DtCh%ds5nqC1B(!JZ8y?jnziR_-T`6m~j zXugc;P%CKZ*;48zo+-1u0zT6^5fr5WPs5U<7sWTIImBUf=|gc7R_l|qaarz(g3KR; zDaEyA&&KA*nO^|{S6oCK>Nr1hfBlDjp9}Z6Wo}ikpQD9>ooIA0vN-=6C-w#X75+a^ zJqcAT^_5nAn0>)Z2u9DG_IQc#>NFT#>PqHYTU0#UO20ihtr!|*_Q#T1y4PE^$o<|8 zk`Zdh#N)iu*f^@)v+P^C5yapep76_NrZ`S`P(IB~iC&A&Tp!C>;MkO8rVb*1p&EW6 z)S{NzlIoDn9AFxX4b$r3q3NH${YZ^@bpX-XB|}#FemHa%`tcI#!dSm=_wtVcA0J3D)eX*=P$LWzTBb-hj-h%fO71!zGfCi3mxO&77%#mEc<2Z zJL!#)qKjq~q>=I=_|^-R`t(y8*m{=*o#G*42PxF9W;NLbt?h=y*8C^~n6t<7T_5wZq+bE26kJzx zPaD-+U!!{rVby0!hNJIZ#$Ew8cGy|xImX^E8-_ny9i6xfNXIGjb`thiX$&c0liTC) z)$kd~;lIJT9z_1Ch3m`5Y9|!w!7B{Em!GH^1bZ*F&8?N`dgmLIk2Q`1J}(^-WDN|B zgef>aM-EaN+ei^o48yqfd$<>Ev$23Mw~tEOsO7U5l?@wC^A}3mRvc7nmyeSrew}_qxOl5intsX+zH)VVpyHz_$1|vdxM$Z}f>bw4M#J<3T)-P_2 zfenghg{5%}vjzC)4OQI9I6^-0T&{EMKGR}NVf87A8t}h{siDi;wO-*!GCx;XvIzE< z=tM5MB;}Kw(tMJc7?uMwIK~wyG@NUuSy;T)ox$eR#IR)f@+nzW@x;Y$dBs4xjV}?> zKE?6by#a8rosCwZpL! z>aW;8;18V=Jp)TJ!R~@vaSqYMD~Y^)F3B~3bi8(LryW|7k%7G)&yu1 z3rT*-3Gu9uBwYC<`M_>ly3}!U&|s=aUI%2GV@e#_)fkfLZAN1d=Eopejj1aECVk(C z`Qw}#Pu_j;QL;DLzKQVuc9x7&hi_Xg2+%QW!rpDNI6WzfrC`ML( zGJlmEt8mUSiBr`u5UShh@GrRJ=T6YB`EZUR?reiLnarVXD?JHC%l8M_*<}IsJon1%{g&bJ3zuj7dWY18X+?D%%9Qzc;1OQcIP+SXX!BPe=l`I9A! z>kbl}w~azi1&ms~3T!YR#jl>ni`$|{+XSp$0VOwF6&!0H(l!#>p+Ur5g9p{%f<$jr zMeD^IcDYzh4&5|yrpHaNf%uOwY}~#aRi9Ma&Yr$f0=jrOrRisTT%AcPhsMYsy&Tck zs$As{EdK5hbiT6l)~qKw`K`hu@6AzwuYiIVrF}xT+rnk!h(V`MC^!3ugt;En zE8pFJEB!kS^NuOHPijBX5s7BrcK8I0aSTM5U-FpPI_E}xi8u9wOSMRb+?nU^Zyf(K zKC{m-Mvb}vMoEjR$t7PJo{@c5(O-En*`T;P&Dv|l2E@xSY)!M+9Ju>1_vxZB$V;0P zlzb9%@xD19pX@^(!@gHVGSQ;2pmY?b5PDsL3+;hr-?wyabA?yHbfM8Jpc~RxaQ9CQ zqDYB{Ik=>a|2qdX^WSTMhxC-izd^SQCjR3x70U3&!-H{;+L!1Dtw|!EuKEB|VyrML zKIFK4Yv_5hQ*X#;Py&y9V(fD6=|!~K(M|$cr9eeKy5O;$@4@HLFv*P|ey2_R4G$#H z0mE}zN`aN`BIvs=ZXSZZ(Z<0iZ&4FFau8-(;(mtR5<@x8lzt`fQS|P**fAlfiiIdE z1gWVhVT?zY3bWiW7$5_+fM2oJ>z@;%0B4lq4P?Ed)0-kP=KV(Wk`rW#-8>Q-!t-(b zsJj$HhjYCwYM({8i1l$XE!urJ8`bJAEmPNVHu~tN%3s>6HgPgkkLrnJn;R>2)hM~t zDw)e)0fJJqE1z_mPihP}Ff=o%4c_(dx5_nhrD&a4eCqZR+J9$*i7`tqmQKB~Wq?_3 zlQZxCSG0@S#zn};wQgVb-RGQ!s8@hq?|3Yd3(Lrw&hz(_WA}J zT`qI3i+^r9Ejiu0GAV0#Z`|X+!Ob9tJy1p2*kPT}pLuMZWt}o;#v3=)Ay1qUqz9wB zU@zxD!cP~`B9iuGj{Fz;ei64EiE}@DiJ{>Bso(gHnU<3le-M;U3{;m<7yf4n2~$a2 zGm@^N3TkdvAM`GSSQq_n7m_e{)asL+4F)vr2#8kgR(2xNoeiJxvZkP^CaBhYbK*8!q)hV+L32cAQO5fv7#QfHjHP7TJ_ zk>?T8w5s2VjUTEDqjQPqTjHQh>0htu*L?b>X4n7B8K~U;>L^McO6SUSbMww%PXD@$ zi8E1^f+j?<#>K*ip)39<4IhPLvZY1T)c0o z@9owRq|aw`mgV{bE}Z7zlmy4{`I9$|5rrGAKjOMILJc%D<2H&Sm7qW8)tiy@EUg$H z{}9OsGVfPW2%?~b&{6;O>ab*LWH^_J=Q(AqvO&rz6S-PQ{>bYZKed|AqezIrL_B|! z495{n@7*O+_;ah4+i*tPKOvLkp7#o9t&liWHmh8pCS99(B(#B*M>{&A_|i8O>HFLB*S`T@+o_W43kGU^KT9Cg2F~x)Dk@?^A&IbN$)yI<4JzGTIMBX`Jim` zjxeVsf>r;+J~qMRQewx{2kmQi8YI^O68P=e!G1#;IEm88qIMyYy;HpAithUT)govA zU+$!UbJ=;@4|xS>Sc^+9s^9naxVv<{=)!Yd<1}xugev468=@7T4oW4qk^d=L&iPMSqH;0gCOwr&_CD}@-uc(z8?6K zIFnPOTu*wZ0_#jV!`R65g>qD1;+G|X3jC~pZau;@1w)?KEHQY7t|oc z996t%B5pn6+i3Jkj^J~~!#bPMF9f5pI`V79e|2P-Ww7(f)4Yz-T719hXLcxaA2i#R zG<&P&nkkNs98HbiNL$o#8J$slVKKoIfZ^%nSearvM1qVh8Vs&mZqV`J)W1Mlf`3G` zcUn5cw7*XgW;NwA*F8SLy^s|9F=^PAt$)=Zzh~WHfGY~sy@&0$?>ne5*$imiy#i#L zC8-{_N7K--k}fq*=9F{wWHx#5d?u2z%JeA}pysXzY7W?)kdRwt3FzdS*F);HimQJQ z#WQoqHlfAHiSbz+9~;gj(ka-#vSegbwl8_yRta&V@!3>ZL^3jIE5_xBrlEasl}Xu| zsR`CVPCuqFHrPP}Q1hpv^FLqQ5gZ#nsrH>aw$G=H=KOL_-z_=I^{236qiLI|x=a`O z=g%1Fp@vfTYs<&pRU!G+>**vnLh4}0>AFu&f)bKl!d-^O3YYpk@-gvM>Ml6+JNk^V zOVaCo$e>{;z{H)pijg&)XK7GIU_2+8kE8dD>pX!-yQlrjUz}>h> zKN{+llv!hIK}}xD`5ZeWy`cAFBO}ejaZ`g+kHyC1vOXXl%!i7+w>{svWw^EIw}q3` z3bL?gpLJ54d-$0te#WdO075lGzEA_wni%7T?jFu^>M7fTEpOxpWonwHHUD;BrVglFf7Bz2rxH-AG0lk9?nvLdP zJ+SU=L+;lyajFAun7jn0odGgkhTY6r4lrw>vk=F=R7W0lg+eoIF_z?5xTyUg%Esq^0EEJ zP&0CfUU)L>g35;5N`0W*OI}r6US7in)7wC#$~6UCsZC9<-MwZc_mM6+5tJ&gwl2q0 zT`Jv!G4;GrS@u&$_eW%7R9b3U4m&@PTFu#7;)t4FK1~IfWp-B{mjx#SO14Zff)PwK^hWL_yIYPbJD_R~x z;=APciO|;YB;wLTV-A;k{!d|ZI zlAFn~G8|%=VQ>S$SB4P?ag1yr^psm-r(aVA;3qf?z^&0+ZM8m}BpAy{u4PMpZS+b1 zka&)5ro=!jJLJ`JJvqIPtM_ENV4WE_NPqy z>Q+hf3ZR?$cCwFmf~ca1=4j-+`0p03f>j$o;a0o6EpTZ&3E&%Kk!~OLJ7o9gVA}q1Q`^+zx%Y9*}%KRLEYnW@R^~tczQW4Ngm8Um* zHHc@ML8ao*D|g&V?89T%a9bS9%oo}O=s@N|N7#udf5gJp1fLt)=&)(M@%80T6d_I zT-6#_jpWzW>Xh@Nwsz7Tj)|7aHSbu~*6{3zhZDuBg`VYq>5iRTZHslCsACEL`us;` z8;R-fIB=Z~T9x1UJ-`mJj?zHnD`^V*LdXr4Sy~!dOA?@-Db{ne6I`g49^zRet zg(=u)SVV?0yRPq9@i~P-6A_p9?3#>js-S75|1VV~$3V_!-vs24E1jsSl+a@~%Iao6 zQzm}Qjo}lG(<>s)rS)1h!C0yW&T<9y!o~oU;Tc+-e8)!Yg!uJ&!#-h@>L-Q_BhDRB z8^?5D6wW{cU*<-$1D5e%DZ$SjWbBhfVr`ngRHqG@mJ0_08O!Vz7+E&~0bZ)!gu6l3 znbp&3b#hP_V5Cr72m)jB!Gt(#7K13p5v7iF%Z7}Ek9i(R5+&`~b^@FjCyIV*KFY77 zHV;g7{(fj__WZW)z{(h{6u<(?{+xhi>g--C-nMW$5o=b19a|Gg5g!Jown@O%Db%vy z=e&?LrfMueE0>2HgQ4+dRJxNRD{!wduXMX~>L_AomIp&j%nOAvcyF-_a_a<~o8;*W zw&zzO1ie~o>&rgHQR*rmQi%{HzW@#6N&<+sTxWVC-(eMYhZR=RaY-6XvlqiLW?UWd#oF|dB$Pk$PUcru382N zD_f2d@Q!rSq|VppgBKFgRAd4@8AkW({}LSuX7bfUI9D)ZD;Rt&9vKP?w~pHyE{o3^ z+j$ILGi~qV>^8UAX^fQ|nmdR?*QBcKM&(sCF<@?Dx}GAD7=xD8l{{<9k8^D4Yu~I2 zpo+9Cb;piayE-M!-H-HhEc2QWua5a@C7zCrP48dYc-tpXH2vHKFSsS~ZkM7;fIgm~ zBVz)1HP4r<;4Ah!UX){QOVQQSn{vGIoZAgmI^m<(%Cj~u`o{v{Sb2icdTT{q}NLY_{MvTez9u+QtAIugGYiSY=H&IkJS#&yrBMZEzPE^k^n;YFm>F% zj7N7{BSf?;K6A~b1wl!oC<}@I_y8d-x&r#0XnUiJNcZt}+d7%sPg6pf$sqP_lEZHSiBFA*#1f8(}>7omiScdBwhuOR#ZI zic%`Fam_7*N0&xVBl^)V;FeP4_oci@>8|leU5-5Xzv6bX|4HUVY*H93F*BxY7`+`T znLF(7onnWK*H9Ri{%iXAx!WO8XU)C&a(R(@6X%M@h~+N2zxAty2w`_J1U$q|Z|%yp z5eC1QP^%PO|FBxMFPu{ul3udDGtPEY=V5p(sO&kDW?F6Y3ROkM4^xq-IEKF3J5hJY?_ zw^e$J7W%TMPjgIxMmK>qFYvX?Fb};NnLOTTSgMO~Pa>7NKrjut7zyerl>xr(gFzyv+m<_ zHu1}RY^^LK)Tj#?7y7wURXzKAzOEmbs_)%%@Ocnz-7b_=r4DJ0pT?yVlaq}9{1^Ga-qzo<|T zZVBOO9{Q+)xQA|#qj7S}D`c02G%IMj~(<8K0ZVUVrDTD&-9VSpcIv%jFfPtECs4?@wf)Mb(KE8u#H zUOp(EUUyx}ZXmXN-Fp@**;Ye$cX+gvZ>p6XNh4fqdZWGH0cxa)#y50D2`bFL0jC_5 z2{Kn*I7J2(%N3J(WPoP?UEPIWn%#mH2VVj7Vcu^31WXrS4iZmPJ!vkeTbV;#7A-J9 zW^Ie^Fa~(`GUG?_#6yIHBI2p+Y5@7@?Tp`djC}#(skp1oEx(fNU8%c#SZkABIw+nC zyN2QqYu0#Sljg_dfkN5_OP zu9d5;MNSnQ`n;Y|S^ImXLTGMzKJXtoW~8V%QVf~T3dWcc-yldwtFQz~-tbX@$veP~H z=@_$y&9~tkuMzKbppYk`o;MZIFrsHgC?7RP!ApqX0e0wX8m#eKLOyT_7Tq?Iw7qs% zz%0jc>DjT~O<&&lX1Biq-342=$-G`(`fi` zHu{X@DN@A&0e*OlzwXWz1ZaaiAnkZYsN-_j1Q>TL#6zG{~;0b&1a^z3Y527zeW?*u~rd zc}|x}i+-9#PO-WS8meO$^6_5jgjq@MuIA8Apt=Zu(Af$vlmMCbW1OgqW&Q95hmAv{ z%}=p~v5sS9<+T6h4w4#1Ms?CEmvh5T^Mr-6i8)+(qb-wYa5bB?LXC6D2E<^6!oN6h zxq}_5v?Hho{-ka&E;K?Tk$99JDI2Dka?UCI`qzOI@N)co%vF0OK)^2U#VJw5DsW=s zD(N;Fz6W&BW1Z?ai=N{0Tt5B#7+n6|37F0D{2>dw(EPXQk=ZUQCN-st-}n1IZQ(G4 zqojmKfj|K(vRvI?)jMu_ArCz7O3s=Tq>NGNkqdZ7;8@@cmG__hy!(sxDwP&VL;F@* z3TLylL=P+kJgd6ztz`RWkPhe)XA2B~kXjYY;Os%bQq!-wT1o;g@Wctj7z#hz!&cP? z)OhE3$k{ar7V0GPAOaCqiAy$yC6TE}SfTO70H}gYO(d<4%PI0g=8C3Pb($n1igh}% zezbBcb_>aAo-yW$moXHT?&91q%zZ3MyDZ9QSRf;BQZ?7|@*Zkz6>adLhrE`t z%huOG=@z4+xhjXcw&9uwHzmZ4HF)tTYQwq=Hj|%WhoW#WYy~1VEVrufxvl~{3Fm~h zspj5z36>B}l@QB)8@UfMO!3dI9nNg0qnAgxI-qnEwx`tThozHnhgU~qpZqly6@Y?s4WF$B~cHCQ5V?-*rCv=$Vee* zYnN2Djef^ioS$rfYIq;sBL#|V^VVG1Em1GJ3dLBq*($tH+6 zp_dT4wLd;1idEcDp_Hxh<^J;gKF%xiXPom!fyjm3iJ~A^Y68i2`yI`*t=)wCBDr|# zm{wS2nJV8?BzIDBcpaxqepR002%LtnP<0oZ$m{efvSk`qh3we=>-(oKXjS{VzM0sj zq!v)?N+nEalP7@ZIu8nWM_r+Y?c-W7{jn;zAwHGO2m+MtX~J_Poo6bF>F`wfk-Fu}y^Weqb)r z{&-$T%kjN6XMDB(kO-RWF3T$bnO!)CAJ z=nYDjN|&rz?ZY!jdu*YFkGOthQ3y6smg1X^m6Cv4QxsZGL}S`NBi_I2dkvo2D1{PL zOKJnWhf#?50xAGs&k|hOGgupKPBI^dcsN;?fXRNyB<_N~*;HfUtJ-0|JOhi7N|kw) zXF*H8xp}g8aXVYq4eMGGB;yuV8;Q!4$Gv?vXDRW0T*KqrPt9wK(?hcI0=fHN>UW0= zqpBy@J0ym$4fgxzF1dp+;v6F@P9BGjukAUF{6F~xm`ag?8wg##{8xhZKgdoYlQx=s z*g$7=Ji&Ef%1Dx|r%i}YNKXdEV3jd~DGl2n`(~^X=qDl{IC+|!@LZxAjU?`0Gts|I+XmyRjg)SA{3xYilgATVt8*;?)HzPCg4iDd)g|aBWB!XRX zJ~M?<4*WX~DE8j%EPcfGQKm>R^y4Nn;|FGe(D!MrifEfbNNQ)awGAZG^pQTbEtjCn zLtB(D&5shvx!#zfeCLYpnW#OLexmc?V|fssRi}i9}(^NK~!>3@fdt`UrYo%S;15cpIw3dfhkkr(M@DNC)4t;S=M; z`c=Vg+_eDa^zHE9j0aO0$hcVG$>%eaKa<#qci+R+S$_|s69%#qB0c}-L2tWISY#W% z6GJvf#u9o_xRkv(tbv$o=h*`TEMKYLGGv?Um(hFj7Gomzp+DE%z9oU!lZ^IJqZYz= z^a?0(`DFeg`_gxgB;5>v7cDz;Q=YcJ1Wz-f zgFR;&itijikV<#td?V^ZlK%0o3{v|uY^Et}Sy?5S^KE_}72l$v>U4!-Ol|g!QkLs+ ztD~;nv23phZ5r(h`;JeACD);wp*j>4q1}HP{~7%9uD6M-Zxy$j|JOXMCqZ5fns`u} zrAPzj2-jaO*;jo}Z+ODhnEE7lJjJ`1FurIifunwLL;vN zl&4ME8MY&*Sw;Orn(G>6o)%SHKQe1b%hAG7vx~zubttl)1QIg?#Ux=Gz*NqQF4^Ys z-nr)S0-~xqYMLR%8$jl? zk;aJL+@6*n)3Vs_nW}WNu?vpFzIwnN=IB2$(O`#0Vj`BVpmyKzG={^rfPE3F(ka>L{3SLgKj z0epFFiwThhXF&BxUL*M(%SdH%4s)w~QZ;0K=w`9IrZII-3;-l|@a6+|(jwJ(EN0z-=>WnUjM*NWyS9T?&2Jk1akrvu1%La*wVu0e zs}eHQr!5fG6I^p#$jri9)bO!N_T9SNQb(!HVLzs#ClQfXeQql%*(Tf68Cbx%gZ+zz z`J$@dY#dfmE!BJ~t6luO!!`leo`SegS+=-rmL3RxjMDk&Kxwon1T5PiBSc zpn|+|=ZD+nnx7(Y7^?57o?ViWv?!2jKnuBE-ad6BbHhC!z8|Q;I;JoBE9Ai*Y{N(S zp+l5e;ZfIXKVCh?E3A@RR9a}qerv<~jU5`JS?Xs~-$zb_p_(zy@vG}|{<^Z6_|>TK z;{uYMc^uZJSw9Zl6Q zc-N&yc{_9Qh8{5{yB*LFj4;V?a`oaWsd8w0OZDSMxtx?FZ*8RFR)anSqy52JmTK;+ z>r8Ri{`l$|FOuSn zgqm5pd@wQCkr*p1(E%piIS?rtra(MG(^Ap8wbJ!IHZd8eyaJ-s;}(DMEU=e2xKtc~ z*aG@w55ya;BWPPF8xnPN#^l%JeGWg+=#@dF4wlECzU*Uv0PH(Jzc%hTT#bj|-QuF{ z59zc7)U>bBRWEHy4{JPqu*@)MCF-@MOTVvw1=JBOg{%l2j6QGhGQJgc2VY%RF6|R) zdfc44f?HWOLNF~!LYe>R_dA|Xop1>{ zc6Z?4HGFqo6^8U5!qbJRkh0pTyvC4TPEi;0F7`&rIUuIzo5y;}oQ7U*p3LGkvEGhu) zH;TB|^=C!pRAW;T67+K_F75}kZVvRp*86t672D{=ay5;hIUG3SNhl0<8`2Tsb@GZzK#HW}=#X5r85%0nzLMvj)eTwJLBwBNChjBweW|FhV% z0WZz`@`mC-SJP(Z$vJtSQ~>AId$r7TcSA|YP~MVbaABrl)%r?ZrTZM8KjQSu_}U|s zO|@}Efa9v9KK=M2N_x;Es#`z6EneT#*R?P>^QPcBEXj8)K-PyKl{+b0N%pqdVcRkY zVPwzYo}w;z`9m^mL$EHLdG-xMEqv9jF5s78w`UzRe7V^C9Qn^QRX{cGQ}&Q3CiUzS z?Vi;fIu-(dFW--E?|a90iBi{ocoT~xzT6lVh=WOyhbxs&-poyL*=z`ZKtJizUr-0f z?s$Z2?ih$AYVs?kJV-sHckm4=9@(NatNpuH>Pjkt?HWq4$VXukEa!O+ZziV0k`r|u zmjI95VaUi5p?2acVkbT_J`4Tf7Q-(yQwfVYr)G85%c|T_F8Q!vDj{zoR>!G_EL* zbR$uP45b}fpd8U*6Cphxl`QIpmkKD zk`l4DR_$rsHI#eG>6QGps{uP|DFO=tznl zF7lY2s2xheH#}5|OUT*?VJwnU6Z!IWPts~1E}O22rp=yjVjMX>LSV7yhJ8`FS;n9` z%92&!C9haVZ!NA5lJ_oX6ztnc4b<^T8SK;PVm~oD!;5K@A@QHzJkXo*mC2&fE#l-M zdlce*9x{{AuLV+9vt;OEzS(oYBKM#decxp2kkN-@^|9>Mp=7y+Gbl6)&w9Sy276Rq zUh_QYk|EV?*OGM{o(Wq@ZVEMMud{u_g5}>L-u5gXO>DGw$*D43*y&5}<$6ysS7^pU zKe8bCx5yWVVdt|`E!z~sMJI8-eBFob?TKvF!qoSH>4Kdrre1Q}Uez7uW9k%Ddyt6r zmQZ9(&5a;Y8DnSaCITylw31@I636OvW;E-;75d8e%lo(tw4Rd(TJlcodJRv{iuLK1 z`?U*|jbnD)pM;hx8G#UOZqteVhG6uOG5_7R=PN+u=g+b-c?Y*DcKVPr$=ZX=ks~A!XF0uq`S$)4df8V1 z=4soW?k_n!qUc=xk}(bk{ui%A`9)|)p2bA*hHcKEkPp)~kKWrh9iryV1dS@XMD;S- zd-3TB|1PoBWL|KMe_{|^uFX{vfn3yP{n(* z(V{*iEnv}COqQ|{J>AXK(z@027dNIHhm%Y+_cPudC_A$QJ;qP-ZJMof*HN?_Sto9U z+Q-3U>|N?5&`MGZIWMIdk+!$Ar)D2V#&AV!MUj*URf?A%&yG!o#_ZXTF3`MnDqkaAl%V*pYJ1ONU#k7_NZ(c@C5mzspR@=u#YDVwVsTsLu=yMQw+`5YtO$1*G~ zra0b;8u!9^CKbJl_7n=G}8EP2lDUSjY3Z%oGO zY2^w{su^xdB5gxZIT``NY3EeiupDW&hH&7=e*EpOM0j*>CG8j+5vBwp9b;ZIem^H7 zXmHH57|cs_fWZvre6mf(&Nqi%%uM(yp>E{f{_oeIbtaXgcPZhl~zgem^X}FuL zp6OU$$eKr;iUcy%tZaxLOW!nfd5xx?R;W2wZ+GPaGcvGWVhdu|mW2L}c-nbea%vq8 zYrg`jO}Ls;c?UWhB{L6^vk~3gMEPRw2~&-F!4XkNO76L*j&@g(lLlP~<4kb_DTg7TXtL5SKqJ1-{^6E8c2!~;E?)tMdpdU!ij&Jnx9$8zQh0pcj=x`jsjb~j?C60}2$`C$6W|pP z3krP&TxfN?0zxraUjc7LD)ubN&pX5yA@s@rEIwzPg08QU+c&jy{ z^B}vH;P*xyUDflv?o&eX5NQ&`y{)h%nSNlug%LsC8F;~y8isD(rw9ItT;lWwk_Mbn zV~`v^Ezq{g{{ebuI_^UzoNXUgs^J_$ca+MVGC~Q;Ee0l{neF0zmSu-q%S3a+K+*%mn6X+THYxd8bZXa%r2yyUmRCD&3#Rj@Xa|N5ji%K-8fFhU|t4b z2VkvIb-J)D;26u+tr+{jr^vj;7i1wh=+tE5D1zptMFT z*O$h4G!Hr`SLDZ)8_-2Qtur`jjc%slnA)qJ<;-ksLNOb0yxQ9>L*^es>!vRi$B5S? znrbZ9sU}AhrBnB!3T6$%Viaxh|CwgSF*WNvhV=>)4ASy}=?hLXAP2RSR{XyU;yzHSNrBPTGpjtz9VDZyt4t z^iLZR?%W~Ku;66R=d(ASrn-s&dymFkEvE0%#jc4L(Uv1(ZD z&p1yFrG8rL()cDTys?+RFs7Jb9YyHuoU(k>+s9R$KXF!)j|Ye7V$&PJbXA zdxR~9+mt?ro3$plmqp_y@->kHX614%v~&r;39Dl9w69h(d2bw0A%i@XB!dx+n>ZP^ zb*g)3;ZxM+_@q~V#WTGJbttxVrROd$AU+`HVd{STE(&#BAcR>`$G8O`9fPa1McOw> z-8frhSvk$3Q6&H6c6EpQn*Q%FQg(P)qK1rj@}-rE4eWLt@OkoiD}H+1 zGF7Px6*FK19`$dB(VrvXu=uH6ZQDEn}?iQ1z|nPJ5kwQLj_d zmyK5Jf^WlhC2Qz*^ZQeB=Y!M7Zki6KP4Nvh7c#IxdP!Z*FLBrOEd&w7J8KKckpPCP&N|s+#3!;q}LE4XO!@E>M3j3wTx=tN7%{p8#N#L z0w~vgQ?#!>~k!&LLvA8X*c3KPNpL}sBik!TY#WOmm8;y(4wAjwwS}haM(R>BS z)~^a_n6!@8>89ac_)Gu(Mi_s(J_7y~-j%^wvwCz;FHdY$8MhRAt3_Iz^-Ii)a+fH) zRf7?jZ*()?->T9a>KTwjSFK0yY@i?e3gFaD<^3u=RUXJWaW`u43i#Wba%^PW{XNed zQ_M^eN*6VxCh%QUjNo)_xql{w@5y1JZd1YhznsYO-zPMY>8GKb-^R~31V-{hKvjx) zgA-KLMz?Lug$$WW_6EH~M;~&rh?XEcc>-yy?&l9VswfF`4HG+>WaDY5sqOHv<+;vF z-vi&rsS)8kF~tnffW$8?W4m7$>;{^_Sz@#kt2ZuQ%H@hvo0$-WALU@GJ_Rk>6!$i6 zrVUhKAd*K?j+{1P{W)XJgPRtJGkk&YSXVW28T{AfFnox3#wVCE^lZjSWJfPf&n^K) zlxeBxF~@X<@+xYqO$jA~o-$KqREWl%Xg(}9SpwM+C#2G6*gkg&RHB>dMyI0Vn^36d z_k+FDx`D+5Gl_XLFOMoKC_Q7gwK!)Rk#y=xfm2x_iYO(Yn{OCGMc$CciHxSFw2B&% zBe%F97(qAMT05?HW1#gjPnPhuPrR$T45+O5?rK$Ntk+}Qa8C67oDzw;IA*qx(6s4w z7_aiD2`e(BWS8`>vT~p8U4MltrsKQCKCPokcj8gCwf9-%SA3!va3c`p z|6ykB;yjE>&@>p`0tyTVdf>hJ;$H=~oKV#{@RX_5olcjm35I!3SHc4;2Y)lyaq2-C z=&g-$(X0-0uc&_QCGK5>$9IoU4D!*U44PMknfPUCz-Sfc0K{AK5Zb5weR=Ib>T zGTFMeQOP+@=JiA{gLFhAyd5pU=?M9o29TI75sU@0e;w!MPlw~cxgG=bLc zWC|_3Uh6Ix>82$tD?BD6Ba`6$tLslws^&JMf*_}LHvBs{4|#!z;UE9$b!lWcN*8a+ z*v7fU`R^R$;uKlqsjl^CqW>y%Z6)DRjI6HKyX!`Iu95dEvH+#rBlNLwW!WI@Dc&@` zuoVGYc4S<-mtup?^Ytv;Kjf&pcp!=#-H|7#?_o9L(NHhGr>jT%n^E9jS(!#WV3wQv((3UtD8WUdxY#X+G$d1oFoTajkm6aA@{gPHr@>!QxpjV)agx zoTLN&{Ox5~N&EYq=FL*6T%KJMW|??faFtM;4e9o89{54hPznoCMR%6&XZLq1^+AW0 zapxq>e`mFF7l-z~=FMysCV0>By;sAx+h54D%VME17<40%DeJ@_4ZhQFBb$geB(`Sk z&3j16Qs`1U+95F|Goiv>4>c!)6)0KDqLr~Y-;;kBLZfv+2Qf))8jxh@ZZ|E9Z!4u4 zliHYmJdgin?XqyqN_7wpE^=L<=Mk4vy$yN$xU+d~qfxO>G7FJc-(zk_+=bo*o}~K> zQr-Ndywo42sA0SM!>Wiw5b`}QAvMl!xktVOOPE&5=IFdLx@x1W!2x|2e>B5+z>{ks z^|ryVDoTIEwwlxTAk$%7<=Mo5R664lmVT#z{74<|7wte7>NK)u)sk!EtJ9o$5N9Wh z$g~^{HLdr%G%oRf$3=-Km|w?|)3Ekg^icYt_$0%@`tcCH{He~@GG60mD#l^ga%%9~ z=`k54?}UNqH-(5JJ+0?yj9hyGe3=h)|AJGjZ9M~{k?SxLRH=rI>d_HetY;Tq`;=gd zz?0AAH%-#Y{Wv{S&I0~scgpk<4w5|(j!Y8I9G(ibl|{LG!uap`9WM(`s5)2H>QmOr z%s)pXyOAAp>B!cUFQS#>My9Sr+&~*S{GJWh%2*T_{sWLwZjw5L4B|!iU)rnxL1NbVE1>qo;;()n0YldcGtXg z;^MRt`Bz8!=5ogZd}~uHwd;ZMS9-Tx_vo`S>8Qp?4ll72^6Ze29XYSS^ew}lpNz69 zv9h)4z;O!+)8L7LzS+2rdl3uK6ob&GNy^MjhaMR@-kFm%@vADj6h+8 zp&T}%A*XOyXl`tV6O1)vo2jSECwMY0zp;N-N;BD%<(VbddSX-W?KJx9Zgo_^o~x(Y zMDbp#k|>2Lqo^?Js3pZeyP ztvP#AQu*8opG)OK%#-`Ho0uDp8>fk=T!Ryj4zuEF{4v+ET0Z%47 z=k0V23&hTb>b7Ug7ZLXfg*>7qe!m=rC6+Ty8(cI!n(6O6w(CORu@-irJ(Ir|atwQfOq`JD6W|SrJNh6}$yLe#Rn&AuJq|w))w@JN z6r)m!f=^s)Jm99r%@Y~3rqp)M*`%%;9Z*sHysmprFg5ie6g(s>?2>U3T;Ol2kowzb zieu_TET#Z*mb%#bzaFx2k)v+Wf3wL!(Vk5rIs zi98tGaqs>#EC3gmSbhW!M%@-f(YBguOzpHA&#>8fw z_dohJ_LCC|f?KNm({~yLYHA?c&Nr#dq&1IER1}Mh5xxxbf7}%*NmIfYVXGko%4kN*$qA zG#xLpX%=o%V((Lb(5ew7nxx}b)#=?qDT;oPBFPMB4 zU6bqC#<;*!lX&@%EJ&tWz&4}0s6T{^%$mP>w#rY`xRE0>Ns^k9UaAvW)%`<@O8d@t zgS}nZWfk@1Z;VIP;XhNQLuCqvq>4U?k&OM8@jqvXrqpn0jlAtL{XPYIJNrwEsKzUQ zIM(yrvIMMf8e@BW73n_Qsyvgq%qi}e-C4p`-g)^7@D@N$gYC>#w6-a>4ef&Rx6w_4 z`{HE$Ebj^W!t0Pf4aMOIPu8A+p*d2Z(j>5!zicvFBd5K?~~HVWLnK0X*jGax^YoPcc)~5~pf~9RIn9gP98%C)*%?UNRrSt9S^Ig|4mA`?gxPQ7g_|W)nfq zP>CsHH0VBR@)8m?E%IZo1288-B9%K)M+8`(_ua5NT;8qqmo^-e;fD@V@DQq9ERs)K zi21ivQG6?kJ?SL*g@<7ZwiKeIH0}~Pfw`RVzu1LI}1r7vz z8ImS;^k6@8pDVv-uD&>iQkp81b8IM*r@B>3V&GPAif)q;&nr`(jkMu5C`;7P$%^a4 zF_kjTXZNwJVV9>4Wx7q{5gZzni&jC5m;%8upXOuc*f8uLrFU?Mgl_UW&^a6= zE>5P4v7ZTB2^XEnn%q`Ts$Np%iclS_uCY~F^PFte9V)Z04bn5BBntHg8s*{@o z16R>wyJ*-hb9s>`?dC|c1Gdit4-kKaK3^PV&bHV0EttK)&+w2zle6iJt_q< z9Jej};-`2DK4`GQA5Y`CcctDfcB$RXFi|LMT?#Qn@A+<*dt51c1_2|H@HpB;WAcp? zY>db;CwkH&LCZ(+pj*R5#7)u!)oW?Ga-skgd9gdFq+%W^C+g^Q0kNC8$=~tNPdEwM zb}12M<>tn{MNO6amU80Y=tu&+JaCLWSuM}gqZ~1-s6SdH{XH(D> zJ%CTfswzu58jVz$?`9RmWs|8;;}g}jKz%IyRn8%mTt<;vF+E)A-6E@)87 zWU(mrEvkidm0K!h=g}GU_zu>;II$#H4p9+8X_07#5_q(Z2;7u`xkXHc2QG4injfh! zrYoQljSYu!1}OK!l{fUsaUJyh^c?UDi8B^pNN^eX47{nTLbZI9QDu*sGx+-vNGtwLAmE#v%+|yBi)V45YB`vRXIlY z-{KOMdJnn+A6}@k|LHz+j$59kV{PF9J-?8#SAdJuBHE-DyoiA#Fw!Lnaxr(Lz}_F| z^mg`V(dhS0|KFBA{07KW=piyLciD;vF@VeLc@CXLc<7nkHS8hDl zOQ2tFn_|;YgMAN7cq&tNvfU!UVqy$x(Xn+jmgCT(xmiDnt3SHoH~sNjC@8HngKYD~ zi9tU$zwSv|mmrMYcgIRnM>j`^=LhoT%a?}6gWdoOvx&ScU45s@81$5!!lOJU_~5{! zc=O-d^>b$z=G62+E7 zG~xIsUs^3TceA(bOXk*^&Kf{mYvZ00=+9pVA$F#RxMxMezdT&el~{YWRz-Sh?waMa z!wZZ>g$1srvxMI!ruLN7g|i!)eJp#CDk7};FQ=FO_t|x1O8yfk{MAu1flSQb4{ClX zY#X6W77ZpazEOH3Zp`w2{)T;Wzg}%~NFO3K0;G@jj;jbfClR(A-xXnh(ORQO4iN26lCL#>B;V9S?4-hr3U zyeLRiGzQ`KO3}ifiCf?ELph62(#CWl-_ErAnT{sMlWslXmPZG`G#x6QO)b3j9i5>o zrHpOrO~5a%>C?1nQOQfxF3^xo#N2y?4qjm9YsLv;IGfrn!us-I*cm<@MRSjQjkj9P z<6+@^ZMp$PTBx_EH^-J|5je0+Nt=r2X}Sq2s|QNC2~mVV(JUmE?fJl%L& zyViGGpTzTB2}7y;zj-LPRo6W;-RE1#Hghz8b6iCCo-K;V zxQp8G_+r@L+y_d(Oz&n}LgcD1Z|s6KtL+FMg~g=v1@KK7F2Y?fH+c84064(rA`jM` zIPvJI$ubD9>fZzV3QBQg*?E6_w(fg_WSlhQ-Ei?D8V1at2Ms)`rrHhnIG(#cYF6Fy zSO%nIBdhMp>bKXlcr#$%$jOMxL!e*LU@CoBZr((WYm~L!`q?$^Wt2c#z2#}sda@1C zUs}iA&sbO-MPl(i^U}G!-1fP9x6Qtn+=$$0Ep^&nNXsp^`q{c z(|VtheP#qn3ak&51?2@=CA`HbaPLyJqnDfaubKc&A1(teY7SiNZ$hel4L*&>>g@Ku zc&{UvDxf56r?DIhd;Ul~ut9C#<|k*+i_GiAtkaP&yYF)oQ#%=U1~uh?7@TZSH#4=S z2MSG;pX<(jmm$NQf@v!K0iFrxsM`CLGpPX)Afy zX?U(SnnZU&f^E0OSR2uwRw9Gs_>c{1B2zK^Bl^} z!BXV(D6-V=HdHWV=f!IqLH?40qnfxw&hgPsXB|ce`*65W)J^)v(@l>BV2aITxhI<5 z$~ucF8;m)HV^Ud*{$}xSpI$nFo=0OxQ+ZaYUj_I>`_IAcW3;LiuQqY(0+wkqu&km+ zK};$gqB%2Fx7lLP0t*vqY<7FmiofleRc*`C6(=cS4jVf!g@&k;qH>t*8+xbSI865o zlTh?!~OcXz>z6g__vfn~w(5m0!Ta>cqym0Rt=^oJ$ z=wAA=o`UjoF5fO^DZ?iqqSQ4(1Z$K|rVj$!R(m^yb^N67-PC4;6jVvHtrH^b^mkU^ewDgiI%Vd0lUbop$ANzdiO8C)kF)zPxuNK7 zATG^tu?Gf!FK{p=G*XePfY8!aRqDt4DUdoi0F`*Pp zFDHS(rBV6%`3x~N7v@xIS^7ru9y=4t0Eh6nZayN|&Z^)hzI4j@Xt;IqFiVkVc4M0c z{b1s(>sw@;!R8c~Z8({1ed3f^#C5kcmkutZIIvckU}dBCJeT zgX*c;Jled?x#DZO16xC~r0j60 zpob*Pb~%20^!b&}F7c63$wNM->^ll(Gl&Ral*kS>)GA7jp3&KOhM!lBC6F#iy@*r) z{EZ2YwTMuTi|%b8V@O;Yu2Y%O=yG?Tb-%NmhTnA^8+@QTaLJ27%<7NNTb}#AhPo8p zEE$y8DR+VOvBM8En;-*@Ca^jbX(G|U_dQw|;pq413}#J6(~*a0&A5bYf9aaD`4ZWB zgw7qZS?X2hQQU}De{+MeI%cjS5vQ|##9HyZ76DX=^)S}JbRA=&yV8tImPykcJ(<`f zwz8>duqad}emPYU968UZiIbxE3YaPG-^cmM7sefcX81D>-1%|LDuJ~?WKFN{Wbpg@Vus4f~;Dxno-MdGeI59V}$^T{iKG9XuShIyrzK*~+$zFgXm zwA7A4>i*d))sB;+*9f^hgQ@A~Tp1%n73gc_l=M=*(25AD4eWs3E?-AqjJ@E!G1DNh zzR);jxtWG3nlN|gM?$h?MhfsW3jA&^gZvFXCvg90u!v|e-ZU7zf9!ld3k_;wnm9}fLSa% znoc`g|MPN@c`#xvUdXZ-XNWeeI6vYS*epK}OR*f-#L#RL!C*%`R|@{)NZXm!`>yEA zQf-T}hg#^5{HgvF?&kbhFZ*4F7*FRh&HY!v-(ph{dQZL1`zYFy`SLLOUzu?loGDww zHjee?-J|tDa!G5$E}Zp~Rb8F_q%gGA)i%pBX$~9yKC&H?w64t{N;ZG`er_*V$cWE! z&ZiH#d4`T-Ma2Qk-GF}Ac#3_=E7eaQ`>2c%cv_PtST%K9Mb65S?omUC?4v0XHef%eXe>SlXVJkS3=>dKaS$fp z?M;;Z&W=Wg-u%IyPFI>wVGD9rA?R^b)m-3 zyHCc$-wDAZFhSm7h5_AEQoO8G_Xlk+^yoT(d0(DQ;am?Q2T=0A(Zuk>=DIvqya!jd zG3ufkqr%1&ekt1`t@oH;kwlz0a^@q%Q^#~fzsAN&R;Q+ZmD64Tt`$I~H+VSmt1G`2 zGbg6f=&zKx2|8RvnVg;z9JOUGuHZ_udgz8JDW@~f9;x(O4FoC1!6qDyV>QVY9-Dc5-aAV8u zEx%r_ypMxRc|vj{7FwHhOB_~&S@TrD8#x3P9>i0$s)ovfuzJh#>~*PszNYuF;#1yl zs!~g**>HX3r*(P@lM3pBRvqAmS)0@|++Vs~^9R1J+AE;A;xU%u`WgOzB_5)2OaGTi z3;%uMAxYg$*`o-Da;QR1-fjy5lj7VSzPA;;5n+b!7*l%>Oq#p=a7ywLTdYyOi56j+ zS|QGLmwKR`G58c_LG*XfyG<%WKF?l(Z}c|BHV)ZgL5_q{Z!xu;aDH&c9qc$cU+0$t z_oXSPs;o!p8X;&-;%<`SX9*fr15bSK3iJSVsy5`h8Km2(IQtNiPQ7qx3ScW7{Cz1C zO~L+z8k(!0)Wd0b)}ZHvz_^u_Woo>opjbDf4yNaYTAh?(-+`DuiFK2@f?v4Px@)ei z3Uu>6EPFfh{O+{tIgo<7)2syB?^#35dxY_XUmKD2h z_01AcN*~GjJ6^_3Pp?DMifXl97BHW3YJr}aX5=?AZJCLb%sHr)J?I5}=XChl%D5&A z?nNQ-uKcLMrS9jXoD027948AlUXq=G^jdnhP%K9@d&;4FW$PNyd3+xyzj9C`$?1Vm z^gC4AZ6sppG}JKN1G4Se;TkPk_vGcB{(d+l=sTj`MPL6m@l!kHgk8s{^gk>MNoqb2 znTjzSO-Hkwk>BQYGE1nbOMV>}A-uu1^p}ir4~aAmqCxGBM1 zVg}ux#${9G`Gvm76@9EM)PPbFH=|boe)#?{&FbPs=G!C+1PA7G7E;$i&20@PTioE( z&)ehcD)WoX>U!o@tDR*q$H@aGP+^(uc#h=a4+mAU&y=QDr$hosEaq|RP1`+-pPG7E z;cQ^!@^ePD>M&%=Z@&k(n1$`tKela-aO-JE@8q%%85xYLPnNIPSi6z&lDz2QG5ZIv zvi+$``*6=flSe;pYovM`<}zxF@^a~cd+3Dm`wo;8#8U*`%u#4%qt;A~s7x0HF)i33 ziEB-=@B}VS8{fw+^2Emt*}%J%NJf04JyWU9j^(}hHZ&5qcbCZ3=9GVaLr3;1ygVin zw4!d&QHG(;LOF?_hHW_LVSY;8r9&)+n&Q6`=zE(n0igH^YASmNpH&__%p5**X4;3i ztCSg2-kwWb)4?MRsU+R>0)(=>7-03(fgFNd72%%`-S#iBC2IO<9cl&Xu89NDntfBA zfytv3G4tC$mn3Sa%Q&UJ_+xrdmAs6|fpYNgQixi9=8Yq?g*qaz$a{`e2Av}48n}(l z?3xv7+e)A3H<#|RQsM~7S!5w{IT68$`FWZUacD$zYOWhNkoOC3Vy?8R_${d z<96A?+#PW^79kD10}a-gp!sjIf4`p;E8!(#hJRhXw2^KKbr;2OvB3{f z8UtTSc29_KuXQCL_S+BtT(m&SP3jp21{*4U7jggZ2Vg@-0qoRJwMorG;_WRb)r{_i zl+}wB{6lQIxUX$DWPM-KF@56i#5ChII_p{j@cUW7r8S)7QW}yG*D6bG6t;HLb=(u6!m15vt(c+vGTkpTZR5qBB z^oh?2R23=iT9BSFh#@KZFwlqv-jEBxg|~ryN_(|I#O(I zK(Y5*oOuQC(plngW$nX#bDS)QgJyY_#G1OHTP?5S7GAD@3-j_^Ks>BUQ_B|x?8k*B6kS+a@5We2 zs%@f15Yp!ZHJbK$mr)g)Xks4lm@c|h?xb&Y)jcZPay-CRx^+3h0pNF# z^|W6bg5hacsh;4SAgCrSHPI(()CLY%F0BQ^4{DyJA+DJOptTZf$rmB@!rXes)hQuP z**{NwTIW3IzZ=xbh6V)=nYn@!aHwf-J$NF$rDO!oL7qCC`sE*a5_hm)qN6`@+x8AG zi=}#newo^t`~A6rfGD&Y3u}&T z2T#sALd9+B&=|D19kHJGDrY#aKq!*79?K|FqjQO#Pqndl=vK z4E0}%<(2fCX0r%G1yR9eoBnUC{RLB7@Av%=Q=u(T+={lvgSNQS(jo~S+>2{)*A{mP zf#4Jg5ZobXixzixhv4pTzyJ3m_{Nwv%fmGIYJ zchxDCCE3hG6vz%BZ~dGKh}99g zev!P(P1$)_M9SGH)X{DCn|@~-VSm@4lYoM(cXOZ;6d$l6-&6-%c`GQCw9wEr|MJBY z4@hgnq%!}if6HiYiQCxf^2T~1FV1}@?zLryP_?ThRGU9Ea4zzCo35y)=LzGw1^ais z8LYZdH`R(0gQGPgy39 z`s&vjU{S);H#m6T>pU`J>e|fAD0=1CC|xG$x^{LmySJX4zVIUeOPPG!muJ`;25Es`@rT z!CB2UjsuzlTslNLdEnl-`0G~})|!K@3NC&$?}yJnH8Fv;rLKOyobTkHV{WG~WsB%# zr_(-R6VyusQrF9PR4xwcR(xLka3!}Y*?hvaZWeQQU}x6;A1QlV2$JPsPHzS->gBzu3DN$qvGPTLC4 zH%lDF?cSp&44BKfH?4)#gF;@POzpU7y+jDFx@UD$h4=SiZcdWbp7TRF&~m?JFsH?r zESC-;ms7nS)%EfvU3pR^QoVB0 zCyW4_SQ(U-As%nNWcnC-ti}hQABK@wd~YS@=LqduXP?;vauN*=PbaFb`aw_Qp3kbK zMVL~B04e$Eyb+p9A4-dCO;Jlr>^&ZRDTfP8tLR@;ZTcz8o6MB+5h=|!?Ko%?*k=p! zb%FEGq85DrqZ<7m(yFjxDy0l5HD=4F$n*{y4OTU37j z&NyWaS0X=pqE1cNL$;yj+ks+qC`E_c&3#m(+0=o{CyTnHuzTW8eQ=Xd5c^IZ#|L7;z1UghK?|o+=e8#9j~sx=$l6L%S{n$PGqN5^ge9M;OpJ&%eu>KbWpzw^ zO=(Kf9Opjq<^G7Y&^+e~KoN1UJ%X=nJk!IH@k(v;z08b4)@hoQ$v}r1wCijfIxMHC zx=*qgJG49`(5UB)64k7`c7>W^`Iw*Is-?hSEFPo|yg$oYyroZx)BuccKbiFfL)Dk{}{{f@6f->*8*vD=g-Z_#2GD&A^x9ib} z`pBxpV}~{J_Wktz`k63ZpEa&tU&3R<_7g_zH8&rG=bc^A`9wt%Q28pzZG3|LL3n;h zu*iGg3&&%_;Koq3V0xERbwp{dDw54!g=vsOU!j^D_4+=e&EMa#1P=YpHOo zAVM(1?j;h$I^`y?lNqu-+-$sRIoR2!Hi-SDIj?S%M-c<9JgThN-uG&2#TRlGc})rE z*^*NQ?6Nu)UQfrbeEUjFEEA zV(AQjU(}!xUB^>sYf*{W+zz$E-wZT&)`>bZSpp+mz^aq{2V`nVZz}AX*azmji@Q~` zEE_Wf8fz|>A-O3{ndaUC=7o2~KJTW9I7C}Q;hTg|VQbsM)#viR=#f#*ETiUISnqA< zhc)ffK?VP&G_Rv(JJf@6xJF{P3HCS!r?N&$Ps|Ta+pSpl18@Jobqsj%;|pH#pPO;T z`!6~V&SFwdMf*~!Z-mj&+zR@y7T3RNu899P~7X7xQAlX?mbM zAI)mL&rx!PRKL%Dz203C!!Bp?NaWb48`7>=aJ%bp@p_^3oA(l)R2Q=X{V5dogn^yI z7<)k#ZM7ck%zodmi+>!esKP1-{9ev_#Rl=|C!2mGp8KeD@;1@ERz7>nm*wBFR3H;c z@tI9E)3Oka%20P3Z7x`xz)+Q=Q_vRhgfaCPf*t)g=l?k!Msb-fpB|TCo+SzmpBm?! z8{LqztUvyIFkuZCk80aA{1#rUL~JvFzQWSuU2P?3^=7okWe)SobnviUd>`M%C^^}FZ5CvPXVBCHv=cI%1iGN*)4$|(HE{S zwkZ$rYm_)iX$MpL_ej%oQ(Q};2C3@{sx^8hHpj<#p9d;M3PVf(Xo=w`$1Dv!CVDn> zIBSCE96<8Lo8m?$Z|n3XhKSPja;pKGE&UAEvLXgcDphVLS)GvpU{Ab$S8|MbX@WYj z=&qaN@$Hh%(4J1X3iBQ<+ktR@NEOuJb+fm4v-5bmVM=6!g{w3$jw3f2vb$E}iTQ-V zGr}vgc$4BSPL@?^dT#3y7A%$a0MegO{Y0E5P~0nW^QOvpag|~mQ+HvG-g#;%p4N6| zWb4~tQ0(k%G0lzj10C!pN5TKsEhS$_dYx12*Cz~pyUUh!e?u?Xvp)R)2ADP>=~i>Y zeY-NllN#r7{q?qL-*)@wGB}L5gIO7MI!h8!q+|bQ?(bB9Qs{(X4d}}I9dh}CBXJ-{(rotmFBe?O zH!ljLp3~9Tt+v*Jaui_bk~IO>57zX+d3&uM&}xp99Ha6syi$pYfoJ$vb5DDJs(iGy z1!@1_*mj?1cv7LPDupVWjsy`2mSm4h0%YB;l-?5}CCtth$te!iBp-yEBYA^;iE^Ogzv znUb+hDBs0;|0Rk|1Y9(%zI=86lAtV{+2&1L#Sd5}56XDUpWs?2=*-Ef8eo^j`OQwA z5vVa;2m95SIM?Ak-mHfN?1v>3?bat*DB_BxUo@|FIW5RIS}D58;F1b_4y~X*9OvQw z_pw`D9r$>VmjYiXo%u4Dll+=Qaz*D7>hWSc}5d|mUZxRoinSOtL6!Vd~JY+ zo7rjj8UK{_s#Qo5wYgcNT(R2yJV>#NOG$9Ebb{es_w@7Cz3Is(j8BT$)MZ@nv+;2> z3LIY%8wVk8X$W8FnNlTq6+;%hVTLa12$_?>vo@eJLj~6ne#mO$q zK7NjHm}wlMCizf+pq!9KfRQ{byCEm_fp4nOP*d42l>3As?JpwDrWBR< z|E&v#j?^1jG5rx$T;7Vw)QkO`cX^Y>mw>mFz`R*wxmP;7FeARkv(WV02p-NantT8K zyU$Rx_}-^WokFqb?cNUot8Lpv=1qlTv*LH=l$)fc4vX3gym5$_V+g4Q+ z2fL|l0^X9;LcvA*UCnLRKF%aDEuCI|k%g=ig2H*uS5rQug3Ut-W?P<{ZqNTw z;!?IMR~*A_Kj4~{*5Jy?((0T1O3Yid;wh`U7lbzRI~FHs3v?aQw7W(aPkd(Eg+a3t zA(#P%UVlpUr4k)XkRcfwEieRmjc2YxnYvnW7vCU_{J>k_&&rX6D>$W-Ji-iP?$0ks zt7ETGX9v6NntlDR+d&-OLfG8PNQl`)N2HUn<4=K9*<=1H;TNdbZ~H+>y7tXhuKf4> zyr~f#u{8w$8mDcn_NK(%hjZLg_Io*{wQJihGWn(o{yxT69V(h@8)J=6DLREpG=X&4 zB>jHNgkNr}M(@vsGFI-0*k|b>f+Nqe7%mfT}nT8|Qac?k-p(+`qOj1`?h?Qewf9-Q$p33+w`ymri4sF`sR`Qs!~skerM!gNCk(AR5qc9WJ0(!lW1xM%$-Qw1>q6fn zMW;(2^`4#5{whDH)cZz8HCKFf#bDG*XxboOAy<7iE!o^?O)H-e$-`guRgBNQc0D_C z=2WL<$&Djw&o7as=ks*Oct1n^Cn$Caaf9-1o8`ZyE(qNF?VoB_<^pEDw6SkLc0nz# zJ*KB3ZTC3}-gPm6^Y;|wK2C3IT4@jz=xd1)#204u(RW7`Fp>@?tUDmfm@f5(s{Z?s zDQh0tR&r@gn4=RUlKG_H$?k-hBEj+q`b}qlt%Gda!uC)4;!U*vzOJkndYjYd`nbJo z)uC9_haGJ;$~RyXoNm-k+V%LGD!U;#FF(S4v+ywZ0%Gv7_U|M*Ro))C4udn_?&f%|h^%z92;( zTx=ykmI{6J8Qpi&B(Vn|yO2`K6MS&^3?6K&y(Y5u#Z|+Z}Jo>lcFs!-TN{PUlB}HGs{I5F#qz}Hj&3A z zJ!h}=*iIqGH)dZN_PQ^7+26=M2r4=4Gut!^NRtUSToSIO0f1!GiH%6Jb0&&aB&mAO zb=xw9xcK!QOC+NOz`3gd=u3mht{aY_&|`}Ip7s`ZW0lH$I4eTD0?6sq@q zo#h^2K~z6Z0#C!vJoF5xTl{+@?o*Jeu8i#{*Yo2Gi(y*5(g>cnb#q~xu%OcKiF&T! z@P$NoF?)hE0R^5<{ab;s6D~f^MDDGuW-XPOm`kepk!~;W-w@^AxSe-ZO8g9el2x4= zDluNu=+xA6&-weL&1x*LTu{9W_0NAOXnPB|ID5ii=%F(erIsW4wlhq&<}%gzm_cWf8W8IOB?#q;vH^r~H^(#$?$(s$2_E;ZVv#AxyUjL*qG0YfhWw<1CI%VoPKDg-?3LU6%ChYPpnTxrb zV;J)t*EU?^k-#l_4iw|K|NgBcdN5#?G=^PY^8&(~+4cH7lEcs^q4*JpS2`?*(%S({ zBZ8M%qm*#{2QrhF46EFjYYq&v1!J>ZpPFzmvcYY4J|*&H!eiZ#c(?kdc+w zx_`fykmAjGNk@cQvW#anZqrE@FCllLG?Rgjdi4EOXGTi=;RepK^dOLam4WSzXJ`K7 zg#?kl!T}lZ?XG0nipBG~Y?I54zYBy@S$otiub(h(gxk)!kW5#P&*2Y-mF0vZ)I0y% z()|B{0+aKxa`^RqO)g~rlPA`lxhC$&d&tPEyiyXNE8~|~dMaP;n~}mS`N&j|qtFwE zB)rSRFA>;2lW%5h2j^!TM|H8HQ076PDl`7}CEqai1l zxV3zS`HaK^eSB+UXliWGh>Y=TyEPHTesHv$RkvQ6kl$4A-8Elpx6A;(&5)&0Nz)w> z^bUGG8u*)>iX@M6a#5=}id1RNd&A(;n}!SpxMp(?R& zXG5#U*Ae7vru&yCM!Qmh*Iy-ArboRqm~xLhbIs-=V57`7_+tQdlKPFPCyZaOkYs=M zYHx48Yo`fQ|I@C@OMN*qw^=>CiS0AWRNwx9`!26AYSp`f^X9huw&*T*81VmTs1LUT zQV{c7Z_A@wjF&&*lD7CNF;{KK zOX45)6{yc@QR*qAoqbHT3V!yUrVY?tnuL+80xdMJ))oHi%aG+3AjLAd=BLgWJ;6z= zZrM#MP#9(3`TpyS`&?I*eNV`nR=xz%AI+7cYdfls(+7uv_BSTCt? zz^JGl9(Kle`$|vEeCN#drGi3#NlnrwwuZXN1nl}dgvCnThJ8Imoa<7mruI#jebe!$ zOiS~ZJ}@rEm%SzLaf|68f3dSYqfHH(m2ChLC> z^-5+*l><4?&GIwIzWq}Q`*ibsQ{Y-1^>w$lQ+kru%PU$`EFUE?dqX8gtcT3kSkoQ% zC-HaPvf2`IQ^n-;Ql;t>;ZjX%QY~$kMoV95tlS3UmNt9hZU)Qg#H3F#v}MH~Tv7&$ zG+HBs%(==B&_AO7bfy%i@(O7EZSd34#HlPYTKEmTD({T$k5s7J;hN(>u#9_p zCL9~0Ty@0HdOfP}zId~qnTRs$bX{x;Iz(=WU3LKjnHef%x&Tb<>)neJE?&upE=xm8 zhO1NXM#M@hQ#1T<2w(j`JtlL(lFL`K=T@_LZzuY8^)8UTdUY!Z@D8j@(Zg|+FcbvO zCle|8RdFgiBa^o~=Zk~wcNpc+*b2F=?!TasURN$YVw2;_*=ZqHGBDSVLkO_{`mKe7 z;EJ$i-tP4`av1n03SNY?!``MF*V#0ONcat{>cB7?D#XantvWr?HSIWz2 zi%%G$QO+T?9~f7BtjIx^{9++b7Th{49repN-XemmsHj~dGSSTAu8 z#yHgVOpo;s6Qp%In!lG*)uoD?sOl8)R}e)6+HffGb4+smD!fm0Fy=UfLwsvxuNr~2 z(^ofXVr=@&V~AAZ`4WQ@f6gqrT)jUBlbbl#OgUY^yl6zGj@i~2$N1M{_Wyd=1ATb& zzWyZBB2$sfDSYDz!`!Pi;efNd3bZQB{e-dke$YNvy^=#FPs38)yVg!+Rw!7Hk@yYE zB&3eH-7iE)u1TlvbhViL)h%COQ&nrWJ1r|u)EGKbPT-TPkhM#-U1#GV-1X z&itN@w4_|Qa?Oz4CEmnGCbv}gKT+rXxtERyGIlBpX>MWMx)}_9uXl{u>l`^uC?kv1 z1F%%Ynl8el%@`~X7(1BmagEj~TiJxdKlPEW+WM)WZ~3^~?P75bGKO1Q{G|H-NK0#0 zW*YUGRLzp0y)+MpX~W&7Ktqf8vcbh@hrEX+cgry=8$Okq1+dXRx(8*URRA_0rw`ps zm2b+TFi{y$J_25Md)czm+2k#JH8+gA+FrBE*N@$2|Z zdlRVV6!Qj;mv4O>MIqKk9zWW4qC-l$_gsZW?7J(k8T^!SzCiTTpP9F>DF*=#z&F}Wl7S_{Qb7}Z+MqQHNnHplCYF+Oh$mSfHR zC|4)?94P|yuYf|;9L(T?prE??iQhq`ZD8S;6SnHP{k=AKu|d)UINr`Ud(ldou;bPX ze{ROzesd@cFl)X9`O-j?ws2!uqp5!&%JF+bz>nEvM!bK#tvR6Vgt|1GotXs%l;TG# zwJ=KIOrAVXu+goJ@|m2&b*sGqn$=RDN~ql7XOE0SOcY^YAXWW-+mQLO*=x$p^Seef zCMx5ic2EiDGo$$zkNdi_{aNn(u@X-h6R$1A=gmNkx?O^8MZ6KUAK7Hr-ctg5;}0WB z2V1-96hq;I@2_$pMJ7u;o2>(zD!}uZ@@=Pn4|650eSV2_>%t6;yt}X*v-MwcRsgCv z_K(VSb8>HH&UwAk*HqK-iQAU(TIl`N zeMh%l+GuXmCKK@X{&X=L_18TF=VqoZ9NFruFde@LM)u~pO*)=6>#OW*1Rim}GA_J3 z<#c2EZ*oQOSm2pn@Rq8Pcs;WlwF;>OgynBBOtW9N#(75uKxmRe5<<2E@4jWSm8%Y1 znvH#W_zWr^I8BkANP_05`=4e!VF>Q-?y9qvwC?*D?#J@Ds{{@UY?#=3clq%aDO3MV zb~BrEd%JwcW({yXIC7tgB!K$kUL8@TiE$^k?wT8L-4T{?n(1$JnU{VRmOG0$#vKWe zZHNv+3%#R%>%`vNS1fp6UHG!d%D7jyi4fH8r;y%u3PYAAfdC@vb7M{=`Rbh*+e7kr^5 z^vyLgtZF;ZGR~u%w}`pwb?zw>jhZvGWV1>XEbO;E%xZta7!H&+S&?Aya2jOK%(jrv z{E%y2`?c0-sZeFX@fMdG$ywac!Og?($;GRr-Ffz=66^Ws@a&~dW{OI8p5KMXSr1&^ z%g=HSy@+g>SdTg$KQ<7zN7jt%Xw1~zVTfT+J=>&epig%M+Jf`V#_V!1?;u2LTB9Gi zg{*{SA}eSe^-N7yr_ZAw3QJ5%zJC5gD?5zr!{pcyNuCrE5I?vHnzck^%ZlvID!J`s z2_hEl-Dxog&rW$}l%3(tK5(u>r*`N>BS9i}BuzM#R!qQDwB*8KGu>uhGli{y{c(Z~ z&SkTiZ0)|{pi&hTXhJ%Tgc#EJSQ%ISa**4z-WD_b!!=~;2}6RVjgj*6CA}xu`mx`?N2dE2!l-+5#3Ns3s@TtPmaQKg1xF#B2IiAe!Qhe4`YUJuZ)F zYyCHBi9xy`oL24Fbb&E8^BWd4#TC`(6a!~uX3w9>TC8dgi%K-YK5;;jvb089JYhUv z+A1xX*y(MTD=~O~a}$3;_T5PIpNE=5;#W0L%SDlnX-Y36)q<{6rhPxeSV-isb3F6% zbanW=WxDrF-W66Jz263ijar)QQjLgtD9ZxA2@O#G}XcmbQL&Y1(RfM!ED|{{y!V*Tw983ZN?&Y0qgc7b?q<6=?(6 zqhvFO9bZ~#7Hy97EsU0kqiKu|wXu~xoeLZ_y9}HOy7IFz>a~#R(Ox!Nm7%I@#tj`Q zRugwRVhZY6o-m#fFw_f6G5&dY%OPEqlP0ES8(@;@EwD3uaJq0a16 zDBy=(Cw&8=?6FbSCFkGiNsD-#pQC|B=X7$W(rxi#`ZA6FtVWV9cks#M^Rl$><3gtHBPoI=b2J#=X z_4koH(kWOhnnzWr@wjf{hc$Cd4+&#u1;vVluJ=zY`9NL$D|Ivay_ZL1B=}HETK6vx zvD_aRxqsTPNt7^Ygm!!MR3$i9_kJ(7)`h>^-N86x`XlYl2CByoNvF<&@)%h2PpPbx zgL%iEFjVEvw{MBcw_d&LsLAV{FvLmwB0TaJR1-f!>e2?uEv?#@vbrknRW-oz^f&cP zLLC^7C9Kp0mU3N)@Bb(M`6f|)YdXeOu(9JagTtj>h)MUweNTMMTnDheDh80h9Qog< zVAA7S>bI_5gx0X)dQ-x9m%L_ItdRxUMjs|B7A9Ic>{Z1&*`uOU2QsyDt$q|B$~{G= zNi+~t=r)6^&zec?Goh-UC}~-z$76Sph~7=YyG?cqqM53bw|}n$%*{#NHNAgdNWhKW zt@T$1?mS`K^1Rdbu$je7Qsu1Cb*dEzFTPU7DCN-#Fsk92+O?Es-Lrkd5Kg^ZZcb7R z+HJtH)$bx85;K0~BY*v%sO4?gQvB}rmu}%k{o5{jR!g4hcmD5;Z`?_8wL8HIjJri| zQxOx?Z+jWL2j<34N={ozO4!Ek=x0;a0id;YPsYXA0RKrYfl-$&=hRjNi-KxFvsWAW zN;4eoB%T6lzgIL7-ug6LYf+fX~EQW9y!F<4-#iL&f*b*K5JAg z4VAsr0-Yh>)DyJ(M0J@W5}BNOY>3C;(Mw9YBhJ0ho2?<;O_fUi3B?8vurJy~Vp=Mdc>Hr~Abz7VG>@`u>*;c~~zORAD{OOFWowyGx17RrZrlEhM5 zafb%;K6hV$Ktk=e$)8&%Q!6aK2FtcgD3tnhE^DDk%-|`{rX2fuj0_);Kbw9ESCx|S zzh9%Rr1f0mjHN61=D0F$>DbSp>o9MAt7%u$Rmjrpn1`Cy}1JYpq-&%!Sab9sXj%U|plN%cQxRG9R1z zfrmE_gFU`dxebh6zB$Izj+R44sL}Q1Wm->Jm%mVP5k$j08{0Lo{W}w%XXUB%ac?Eu zaP6C_rjwCuz+qYA46J3iy(Jy+4dV#*Mhh58yn60S)oooB0qmx4Nn3ZjI&Z?rbr?(g zmPEO_MI`zY_>*X|o2z|iQ8Mki<%i)VBG9u2e$BLCW(9A&>UCB*ALJHTKy-^pR{4GN zEd7W~rj0KQRW~T>yU91{Y*6_yrDVKcZ?}K+pY2^#-)uVUA2Y1}>;IWM(Wpn$JFmti zemwuAi?$|-?(~NmsH`^3ykg#Fv&?gCL{qWH=6>C~KyI;B;DPzY98S5jJ-;p4^Hr{w zM${+>U-uX)WGp;LaEBI?tJBU`-nlW$60N6aCQ9FKM(c@I`;_@n{(;F4oK?Kf>h-;` zkL$`>@xK4YEnHaYbr+x&lBU3D&lHWlmP-X%r`Bo~kx8U|WP+hW34F-$XnKak@D=Ma zDDj+qZr+(hBtmr$A+FYj1gkM*EUw>??2YsCq;bzx8+Pk;d&G)*V%}cJ)|8sWzQFgE ztftG3Zs7W>=4SRM2Ye#>$H^84Q@%xZ^0$8@7l8=dk5vuVM!wi-W|}#XE@216r08RJ zOpk*T-T$@+`hSS%GZlR97(P(`mkqAWka6>}Ro!c-(y;se z&lRQlJRl4O98-wR4if-d{n&NEa_Hr?&U%lEm;1x(!yH}VJKI^)GwY`QBQ7t45eFkn zA-8IuMsPAMaAUOcGHmmb58$zHE@9g3wAOsmsa~QRMZ-B%mj|kV+9{7H78`Dv$B!)|>iB=+YDh#vFwZdR{6G{Ez=95+&yuA)yEVBdd9 zTL$c<>}hG*630Q8pXcRFOt|l}oNTY=&%>Mq$nN5t&D(=NDIIc^`{vZ#Aali>(Q&ejY(mLs<&RcSG8L7DXzO6u7E%>vEz-z;uNvhnM_ef% zK4Bma-VdJLP|Xib%Z8#=k~5-XCi2HS{G6d?_2ghOfk?_&Oup@q}%M$^K2LcV2m(rJ+)N-)oV zL1HdjAB=mH8wovG5>%rHFp*=qpMF0FUV9Pz_G^=k<>A)i(EF37&iV8H7W2HW9s`XR zv=EsE)2A?TA1TrnFZYet21P+HZ8tP@8GU*-DirV>uP{CU(8cd<1*M)adbo{6r^wH8 zPZ$~|!}hOh)@){b|K9EPP0wqR3I!g$^MBTl2I`AZHz{h6Mc;G((+qgf z1A9ew4$}23D-6kf3XNsvj2w0L({&{$^>CzK?LSl5JM)1Lo(o$1#V5Rp{aMAQUebX; zg;~`Iu)?gB=7OFbxgskiBtFeYf^K4k4V(J37y{A=!1S}cmc_*sQg!KODSXv_6c!%~ z2ge%0a-VImJiG`HPUN4jW1QMj1r@hj{)z+6YsDbkGm&H6xHp=sx%zWk4?JvnfFBEr9I)tE9=o zxj(Q-sPP3JQ>wRpMjOYim+PmYI^tNoEtSXE*ji}mA9oVfbhhpE)0x%xpILhoGOf;$ z!1fu$T8fZYA?jHjjj|l^&qo?g|6+kNT%7<{Lww#RjMG+M(f7q7*xus`*Y@|P51Oem zqZdZNe8i*@3uFFQ-563^A~7%B262_Lyz7&gJgvevgeZ%J0hpOciul z?H($yq60ys>ztbl&I~-0(*6!fC^x%W$hCWW!K4N5^jH@1#g$Z^nhJMW(Am`0yZz2` zI2a-e9c)Yiq_qdKtKMNL&kxW@Ojq{1k&jGWR^&SQG+)ONwc@1}=4g%#U>abELCE z)@?4x$?3;cuHD|}pm0rN77dh?oHj2AWXVt!3t22>lCojx8uK2t%s9B1j* zE%Y$Y;q|Jv4C-J>mP_1N?5*4nOYg_l=-k?I9Ht?^!K1QwZo6C~H%F$@UBkDJ4TSJH z5>s}8dE^L5CGI|c{`Z`r%}%Lek4Q9g>t!|ED|J_Paz z@JACX^$T#;B&>~8uBgx&i?$!ms-mo%dN&rdrTM+;F5mS<^vi$X;a^W+tVpv9Nv(qJ z9u(KOH)2+$kNZS-r;#ChQ?Y_a*}^{0rW7tT)&eXc`kd;H{_u2n%DXF#zR#a5Pr#)~ zc3Y6K6RcHe%8;x+2f(78tLn=|jPP#nsF<+YiCrpRPgIohybqPK10~f&*4&l#PKxZY z6|bTD=JJ5fN!FM0R3Y${n$=0;-G<#kuq#L1}G>`RpJ~??ILeY@dDQ^1Ru;8CxL1W;ruRcxZ z7k$lF7`?!nS(3~%ptQ;o)qa56Z8EtV+oDF42U&CO*q^ftVHUMefI`

&u|N7SI9xtkq6lz7j zUnPRvRCir4Us={UotPvI9@#LTwjLKnxgi~LO}S47UAAg6{>_czDP=cguXzsiT39g9 zz*r%$Qby6)V9PHJwY_M_E)@+AsG&JkZRI%gU_ZWex=G3+A8c1Mr8CFoDe=9{`8k-o z2jD>V(VKYKx_s?QiH}PWBOG9ce+k|dP@%OWK8=;H9I+gUP(7+$??2r;6Ze}G(RXwf z@=hO*JiczVFZh?*DfCN1)u-_+Qh;ot9IvC|`5*kRR6X=C$9~9}uUArw45|1D-IX#~ z%N2duL;2f{oN%d3q!2eB?pNR51ss2tN_gRq0>ze3#f`st*_>of_IAm(8CAZg8s~LM zMMTroa+ZwMerzJe%;^L$kAAc4oF7v0+R!gbXR{PP#l|knU^~&;o%mW0#vl7{KwACF z%#F^V9;r|+MvE*^xwOgo=>NE0#I1qzZVwkZf&004BLxa0D^`ZG=)sDSj%rJBw zEOv^tm3(I)ZY#D*uK{T!pSL>oE9^7d!MKAWb$bmGc@wP70#As~<<{^ESNIvlyW3Z- z9CJ_y-?Cu^@lF$OD54PEg8ONW#))aDGjrM$bTFISA^yDSs}Wtdez|8okY`Au)MI z{Q&{)NP@LgJMbD-AiNN+L)2Ma_#9F(3JQz_Je$l0sTys(gbII8X?-Jl&Z)hAST)#j zzONd@%HuyF=GtS$1GY4_t7RIDdyfQt>iKb;M5Sy|2ialErI*`0u7`<}5M%-|n ztti!JcZs&jUGVeFuJ0RV4Th_k^fmP+!aWKThY(8iP(Pl66M*KSNn8}Yg1O#c^yEbT zg0YL{9DTP2-Adc6f<`-U(foz{MEv(8z16uzDd6{w;W>rUBt2nw@Mf~)-`XKqUhxq9 zf)v=PB#Ecj^9e(Mz1pc{lIbkQFYM;<8;)5`-MY^t)dM;P&Ai4c)+=(q{$<`44b`Fp zp1oLrL#bYp+PdHR3)3=wpdflwG>v-lm4o)1r)1SM4h?_BRef%mSsI2hs z$Tf>lJfNK(pjO&%YVH^%I_qwP&5-&2O9GVY1#+5vvdf*}rn@_l4>L#_mX3n!0zf3{ zg(NO}bKG@W5inRE6(}7HrB;Xl+u5Iq{K-pF;vqJJ%%r)9YaFvOVl z3DB)_OWB9dU^Nj>ssFX&jLZ zNx!Xmi+|e^l!ZvyKoXzy7xcymuM($9}KoRu9U&A-$fRZ&o z?Hilx8~>STX!q%)s7z%7o5a{60UEpH?Wjr`)K&$~bqD4u49K z2-MDw8VI}%YZQ(4wdi0^*SVZC?H97|vTbEekPess#)=R#GCf0R7MuZ;V}%bG%Q0Q1 zRnJA9Fcfd^+D0<~V!LuUqp2b~An6ZZ#a#LA-?2Ypp3&Xeg?ho7dj}3x1Uc0rf^FE4Mqt)QrJAJt2*>m_0m7JPDS$Bk1I+EV@;*xz_Mk=8OP2 z*VkYQaDcwIB3c$}M_zWED(@P4B#PLmZhC+Zj4Zcuq|E^YwvH>>oaMOb1EZM39S@X9 zJ}a8X2Pp4VJLBzsvy}sJ%asWt%Kic%6+q_SgcFLJsz6s>AMQe@>c}BF$8P@4cjJ_U z&HgE)t8CrFZp|>VpuelJdwq@j|lyZ9RSVS#Rp@$k)nmNAwv;7_4PC^!7PSmH% zPs_-rP+nuj$SGvn!b!R{#C_+{kZNG#sKVS4Ok|^h)t4_@j6~=oqck7JxAvtLK4-qJQVuGgyXGl6Y+5@#8K@;#7-2x=eMU4(Lto?J2zqw#QhrUjO#p zQd+G+Y&~o&4PE#6HC;t*@v8^SzkH3waiLzKWDsztbK?Bcd}_Yvn5q~>{^I%xL$Dki zU z()axSTi;3e0E&K6+%Bi0fO6J&-e0iDldY1QCX?&sQOQ0ZP555JUX4EL8;?VIvMT_g zv?->(>6;)NtOP{J@ymD69pWU6S~k#+3AWB_NtZU;IWFg&WR`Ii^d6e$R`-Ngvwv6} zBHG)HPr}QV^|f|=wW|3NvLUX57`Hh3NE53+#*S>F0Ora+q>k-tc+|%E6dhEY#mxT( zs{W$47u<2(sKi`}`6h|{tG+(4yY*vQBckkMqRv0Kol56VypF>Xc^fOvlX2_Vh|xCs zFJd{wGfpP6q42RWr(Fwz+GNtN++R)rZI!lAC1f~13}(6{V1eZ>y8dT7&-Ts#IN<&7 zq$AC{6&^kY?tUJEl)-2XNw9i=m$Ndg6&BN!1AR9^G+-g(C}J^k7)uBuSNM^O_pPzXfNZ zu7{8NP04-2;4V2kOtTkS~`%G!g?A(g%x>cleQ z%Q0DTI)Y-)nH~w#vlkO$buypx?|Y-xSs%jTZ1le+J`-~+dpmEJerltpUVhm3io1yC@_-E5 zck!|jl^EE%V>@%}8gr`z6^+OOXS1HUv#B(esHNT9uT2>M)e1TafK1Cx(;h$Q^gqxr z%GkkQ{Ijo7;fN3jpxla>cplroy-mu=R5;{+iQs656UA4&29*H;43If&ZErF;C(DnD z(A;Aysx;dK3LFD`Ho%v;Uh<0Zs+i>t%L2MQ8lb~9Z&2nh3!C|Wm$!cdyAi?}AocV7 zOO;9y*XJC)>eU|sE8ya^qNx{-(+vUITmWrP$%ooC9GnqJp+cU_<8?PO79-4#`Y}KZ zb0CejLsvEW>W$g=2b>U}rrupRwK6n&T;Xe%HK*uZRhZ5%R)!3WAe>9GsLP9bSgS!Y zue#-a?k0RYA;Vy{mF;1vSaF4%c`3DEV~=?3G^I&D2MKV?<&J|-elzP#LlW&@ZxN3w z&Q>4Cc|8v8ayKCoqi)1Q zFW#>Lcqv;Y_yV&Oq+VLc{JYuqPD;Z%wFW#;bz=*Cyk@kVSOthA)pc{ z;v_Jzth_uVY@f7_eqN-`g6a+`=`b6T)#)RllFVSJ|NYZWYN*FGd(?4kH}7~CC3Cjy zx|$$|ROGycoI;h(|F+Wh5yHoX7ByiV&fU2o73OvCjz3U`B@Ij{=|_g^_HVT0t0)-Fb#5suZfWA2SoWB;8;y&z#U{EIopu_L>O8@ zBpMK_x~3jp^GQ9oBc@Q0VB~K4jjSvcQI2dqTsGGOFmPJWC39 z7@#%3Ht<$#1;_}7u;ZUiNRm7QOZ^gwyhJ^!D1Wz->0CSCm7c0C@%=IKF;o%+9-LSU z+l&)sKGC~*MxvW^C@-&6D;Lkr&IS&j*lP>#azUM+5;%2tP_R@dG7HEV(7Ejy?K!?P z6HNg#>blyC3}`dgWu~1sYeiITcEsjr4-#+v`w$IM7?Zjs+_M)NZbUsNJhVaUP) zeU&{fVN;wpJkAWQq4}==uF9XiQmt};k_UG6+?+b~+ZU>6z4bl-e{1(i>BNKuL|wcB zKF(zQ$-jOp(J#pEXQCLc2tt*3)IZsHwJ%@0>sB(V8++3$sy5$T7O>c1at zty9`nJWee4&1pwh*v^)uo|d(Kgh^P;fEvVpc0eTIP48cwK%uh9^O{IG&6Cj@?!kbN zT%uDovWNXAJEsCy_%U-T$9z0lM_1_MzgontFG#@>_@^PV&j2{PE&_qjRclAjAG zIBiewypes)yRAU#*)ZvU|IgFwXmD1zH~|zHnYRI z?ob+_*W>nFOZj9*5}eu@_Mz>OaPN~YcJ$+T|HWHxe=UadcOA5Ms<}#QBltz-0SyPwR&nss zr{$XQ4~%DbA6c&<3g{H>Lo&7aM*S~S!M-FfZEAJxeHF@IjEN< zv@X^64=sWr8f41JsJW{7**l?OAwWE{;&s4U8I{Vb`7|8&`Rl(h2{jNY!o=AL>&5vl z8LtW~|G+>$hl>34D{c&={Z_p3P3 zSPznT7YB;q=_rq}j)|=iwc6}UZG|$mjgC?f#_Np5h|%k6v`mZdzq)`i)lb;|`3)A6s+TBja= z|1|Q`4BM{%L+uY{I;YA0E;0s#%`U|C3n#3VC}w8^a?}z&-Sst2fmD$iErYppys2?D zAv&D}{ll-B3&#`tSCuSNtC>ST#qhN&G>I{O=1Z}gq|vfgVAGA2E7X?4{DLWH`(L7x{cle$Y) zRQv52{c8{;T~QVx^Tk9riYj3#jnVG+&rytkc;CPKbYaB2)*L2e#(=lXp^TG!5v!r(F8@6{q6Zcke>0!x{qhd|? zyyaa4fFIG7-n`$`tS{hAIewshX<-&x?G_xYNgrdA9Qc0iHfSL)n6`~s){0}?s;bw` z*lISyqzqtZ?V2B-r~N%cpI$0&kF?CX0mO!7A+p`y?+hv5Nt)taWmScxXEARM=lG}O z1Ke~ZRc>1pbO8H!#xQ&5mf(cD?#mZL!r~0|7F>fX7ojXXiL3#Y26T$42}q7UIf#`x z$FWu#DT`h1_80(!FACm1f5M=1+Clk((6Vrw3Qb9c5^-M>aj$p7_#Dp|@iDqb$54Se3)TQePfFW85Hw>Bc8u3STvK(tcI*)=By>fGw6&r zst}mtfK{l|cjjcsqcYUT>U}g+7MLrZFuEz)SuPouT#!aBABw$84xH!xxu)2A)z|#w z`f_C*J5XrV+MK|!hvZI@A}|OVZBzFT0EfeU9WyP1fa;AE8IAFN8Jeg1)x_=od9?qb*(YUXk%axTn)iV~v`=-Bl++&j^phX(+(+t1vhX zJ@(RJ>XVvPyLf25&ftP_I(_TFTGGqz>-y0BTc-l)6gAkqeNyjE2Af9ygbXCg6<)Il z64KrGgw)zs>(yK?zq&a-ViZbKRcBx5ti?D_o!zJjC2>jQ#ZKDZGJ#f?s{laxGaIh3 zouI+L-l{hB58iCs4^DcsCMr0%6}PanVp01?NS8i|DDRGfIP&eeyxmAoEl$LOA_KA zMu<33&k}MM4ZP3(J-UoL@jmr7OLPHyOVTMa=1X{}5bsT2)WPTZG+bjdsip_1 zAkPPczq>2r+Hk|2ZvI1?w1>81fPeP+#{~5$)^j19z?Wnh5Y~nrwp2t zgrhHot0sWm7@&z9m)sYHM$)E5N^4YA$YiUJL7`hQZ|}^ntxf0}avt(?`TBkve+R6= zqJ#R~ru>%(yR7+_wu;Zh>&KfkE@Fndi9IvOW8vS_&z_}dR!pQaT_s9F*Tc_1uX0`w z=jkFwuBHhCHF8D@ z3a)0nf<(C~Kif$|eh;gc@OZ#}!T?=xP72zQoAf`Qhd6Fhn4qj)He%`jtx|OYVjR@G z>%GP1O(ZtO4|1F5^LRr#lA8WpkO@ZuRCRbd$ zN`a0vdKnQb+s4>npTc?Ks`(+B8M%Y8kMI1d3e1sJ^hWYa14o)3%>=s_m3_0Cg~ds; zG2=asTeY(N5|wFV9O7j)6V>DoNjbO}Zj-@3tJ7$5P23aS3e>8ouhSrHu09AN@Pv_& zE||t$jTaW`#KTt1S@3$woczNC3YRxD;ZvKd)rf(tuHJ;dC*nJmeM789YfkRF61({B zxhYuQ)od02o4=%rZ+NVVV$4Q23tl!y4)!YaBAlyKi6h6>;%W-DWng)EXiBw2EkXha zpnEG4X13VFcPBw4WqyEWm~vkK*XUrUV6O3gb*Q{M{k*e<(+dSw+f(D$1^DLECdZFSe3=qArrcVdr+ZKp#P6hc$J z!9nRbxF5fcnh7E}bcjY(Cd!{nLD?~4WNJuBYhS-Zkize8(iT77-OhTilD%T?N-Xh@ zEE0TdbX|++-|{k}y9G;vpHoea0={597j1M}^Ay2$P>C?WWGP5qc}u(o)U`OCc_gAI z$MT>op?-1rPs|Pv$}Xp%i)K6fCi2pV zd3U!xB-)|A(p%Q{cUB&mPN(%+ZYvQk$ei$K7pMzH_qdjL>*VwAM>DyTDi-~|5H~Pe zxf|@~_yhg9HikvR{~}M{-UO6jN7&)>ulBG&#XZws@^8VX&tfV(fcdxMN#9!ztWxrb z5{133zBVY8ov`9W-*6!*4(c`ZVg-|wzY4rWqtN{GkA#d>?UZ`pKehcmKfqr_x<37S zl~<;Ujr)^IoQilD;^{{q!(@@u!Y5x+{? zJRB>|fwg-O>a&SdoR-?(mX6_Pg?y(<>V7KW=1!;5(v|PIn?(5MH57k;K~Y6M^X|6d zAzCQl%nM>u7Fn+UB6>h(`Y)ceaUZD|+Dg-0e=8*zu~Im;mkbPkA-+^YlL*1cXfZBQ zLFWdg84Ykf#B=*puWna-|JM9DR5a6rVd+t2a~fpDw)WKl|AI%9M+`i`VuR-Zk?%|ax0xjeR8FL<}F?oiK8OXK%|K}-tan zK5Jxc$2@!@vN`MhaTU&g`%1%{Kyuho0FFYLXfY{?VP_=`wxO4vl1nf zOuhOT=O$Y~Xu42m3Cz7yC*9ft{aH?`eaY@8&6_$|-yBN*YnLW;Dua>Dy-#+7Otk7a z*UWqnfz7mptn%lnv95Vj@BY?h@s@@Qf_S&CfH9$p7ztTH-OUuJ-m@6`#?@~(IN;WQB((k@f<*@Zuo{A%2 zmz}h0x;onKovjjmu7(3&@Yg7%cjNs zjqnasg!*ML_G#+XZ*@m8Nwd2kIY!hsmieL&T2?&R8jl9?WOX|2TbK9!)<(R`@@>7Q z;oW!^KlEb5!m_yQNRR7X_i|Dk%4H*y|Mc%YMQGUD$ODtn_ShRm{?#Gr2OU)aP>#W&t9>_Iv z+T-LfYv0-Ym=7=hlc(Pk95C)cr(lCr4k9thWNv#D74^D#ht$2>1rF(<$PTdh8e3k) z|N3msmR4Yv9{m+}e3vx^j>{X}9(m9JB0>e_<=HUsBQBSir}DFby_{#?K~C$bk-iJ5 z{aud+EBEx7Y`H%G+e{Vx9L@%bzMI1N`FtMAXc!T2e61>BRs2U!+-22fD`um;kj!sO z7(E`l(@aI{0Y_c{INtQo9jM%>zBe`6DM9a-gwOdA009{WuSou6n$z?qbezBWR)}?6 zqf+>@^y>6wyyLCZd+s~|s9vQq85M&Ya{~e7liU!?uk6=4{1~{gi9uR~N+ocZc1O+j zbJptBuiGl#m29Iu&-Rttvv^LUoE8hq+9WqlnszTZDF77K-vhzmU%35suxyU=3fT%8 z5gi624mjOFogAlWsZ>+K4|QX2_sU3J6DM92(IDXaJwRBhe$1_{MDG_J`akE9_*{L0 zuwD_~tThYvGB)G3wNkJ7W#sp>YuJ>CJ)(>~1F1PJn;qk-_b|{X^S$(2=US`+fGRmu zIK@M6U?hahkgiPuRXL4IGZ2dnaGS`rs+#)DgZW%%lK#WCuqs|XbjWXOKr!2URafyg z54m#w&NaxMEJMaeh1Lw2*1TI9^L5&IwkM40`6J;tPoiv@>;+LZdB@by_E`1(pfg8? zWr=;~NNzMya*Sg(J3=4AKRI3-eSRP1NEzj9IQ{b0K6qG+N}!$vAmrVyi4|?k1pO7Z z{f-i1XA{$y7OM+2sf!5Czzjw`AItEy+f~(Gi5(d;w9gk!>@paR%1_Mnx-Qk~%N61N zE6%{*NC)OW7cme>^}TAzL$=!Z4{VC)&Td;c>`A#(tr}41rU!)=F#W$r43!qFWYr57 zD3woMQL0#@0Kt|CZ-t}(=~#mIUXA8YFih}^kyof$Ef`9Zk`3lLntbN*=cxu8@w}V! z86?1vRSJ6Mdx_dx5|XvS!x3D#%^9!aEiG8kB~F%h%KqX8LdtrAFMu*CJnVPUeUR=6 zl0^K%Xd0a=SVLoGQ-RL6eRO(~@uf}YLH#p>5o}H`he#2bu5v__iNcR@Yj)%pf@Ayk zHT!D~f%kCBLzid;wY+ss%Nv|I|7uXGB9aoOVWn`wH4Oi^W@lB3Gjy+27d^)p5nTF& zamO0-P1WCiKGyVc`A6CBrin|x47|BI2 z&&t>LfE_Vo#o&B49X;^%R{g0y`C%_j)xnDiJFx}BMO6<2`{IZAST#iHmxH3MsTsYi z`aWS{*|p7pc}O)!>R-tF$61CoJ*hIjPkDv!OV_EVSh;xhvPi@z927bJTw{yiSkd&- zh^r)5zYXdeLq@V0E#3L7s4|S>E$aMj5Fzx2%mf%C2JRi=wmNNE%2D;37_r>_qbia~ zZeul(FBbHqh!?7?eiZU1IL#;|;fLX^Z%%QOIS@W`#6W_c(L|T~Jz>Zea3g;_fS~Ne zCqj|Uk0Ys*5{))Cgld|&FH`$SbCybK0j>GYcE8wdwYB{Oy zR5OciynGNgl|ne)VFHTqXky>ASh_0dBWa`Pn!DU$UnUQLjPvc^WE+mNu6&;EEM=m{ z{k3Qb!b*3uMePYVHZ~15_8Tx*lI;0yWVgtzQ$wW7+_22JxF6#bG@62umvyWrN>05z zy+W0n;>Ud-6NPcEk}KFYJQ~mhQkebE-&->l$g*hH2Io3jEe1qo-ala+e0*T0s zlsopQuO8X}KDpuj+ZyKx(=y~$tY-)TPD9gZ-ZqYBIy%h6` zYC|p`eI@8^B;5H(73?QQG4{{z$7_W-*l4Sym5Vr3Xw5QdW>V(2fJuLDTv=#v~{ z=bwA{BXZ;aCU?7`7i;sCvRYtKI~u#LoTZjj7MAR$C>2_)E@c@HSK{khx+FXBD`Ga< zdv#pZt|Tb4MG%+a$z=>`beXdF=PJv5my}Q~A0YF+pd9%LLBX-iuiXi6y~2|m3_C`2z0dEg8qOmdR213cIdB z^k&Cr-2(Wl*5tcTQe6s4@1_|)6`%3|idefCw~}BVIWoSZmFI+LOsZ82u4ZSc!ihIPFy|fA5#q(BV zpbXqSH7)xNrYe6~lV_{9?%40hX~7y$RJx6Xf)$no4u?v>;p?K@-{Y2DYOrbb*!V5q z!il1xxr}C%w4xz@g%hO!^Z&}z{~xG5y%{(s!dW1+L?W1ph>W&<;$=|gRkqv<%K22k z6Ne14@In#Rlfp)x;Cs#EP5sq2A!|X}qEUs9J>-0HF43^|M5nA&tolt_|H64;blz^$l;EK?rdrA@Xs z40$tz4b7lB)D@Q?y1ET@t7wSQBS3VG#u2f;ZmT~7}`5TWkFLTdI?$frK zCU=@;V_8;}dHhFI{x80_;hZgWUE`Aaja(IUDP#hO-)7 z754w*y2VN zHE~V5hC@>lf~0*Cq(fKT>8*di3XnCt)LCj9!cPIR=lsSVK}23-{`+vWJwL_$E@=nQ zPC}G}*;T(go=so-yM4p2hKRbg8232HHhxK&!(6W%9Z1C?3#p3hj(_ONv%3pFN zg%7DUuF{*Y5v|`W#rnsD13TQDgxX^(@k5Jj^AdP2s5`nmj%burw}_JF9g(2ODt6Je znqrMi9d;L}Opvaz@KXD(^;)eaCrEI(xOc!U;y=}EK#X&p%l2F}Q>9Yy$#kS^t_zXH zP?b#J8bm9=z)7fZREA#F?W+R-fOYdFQmU2#prL7K7CgunEKKN)2)#dhQ0Y>H?KDjJ zr1jdHbUVUnwGQrO0t)+*VA-G~MoB|VXGYb$da92D=zlP{+4?kN`e>bjj-X zs25_3%EUtycbOJKY_wiX(6l<5c)m?AsHc@{XfS7Y2mT^S9RFx=+exYoE|9%*2vmcxZbLZ6Y79opBn2_~e!o9d!< z9I59T(OLa|PJUq7ZJ?x*JTjJ4OY@zIxvlI$KwQNKhk=<08UB0T>KOM@#I<4(ia`KF}_)sM1I#LULeVW2WwTM4#z zxmqlKf`=inInl1c##?-tTGPwCxh)c2x{af8@4#u3F?M^#&SEV^zmkJV7lbZ`@a;vU z*fKHKRX8~(O?$NJ{FlUMiY%vB0rNxypjvyS>a>|B|I}!XE$NTO=MaF!Kl_%U%2ZWpHx2PO%$){r!knwGo<84Yh32eW-JZg zne~CatkN52D@`BvqYTM?>$TFm203?Im@B(|CoKLoY&2xH$%*Wc- z0o2U!g^-;{dudE!<+8Jj2AA=tL4}6;ug^G%NH&OfB z=sz9FF1k+`*U#@lgfmlZHvX@8_c!}yqfA0Kq3OB`p!`i5kdMxoIFXDQZj`la zB%=m}Smf+$2xGp1bhOMu(!hGW)g_skIe~79l~g^!@2{YdViv-k@1FO_hGw2aFI8}a zjmxOV=Fscp0Usb+pcy>-reYNv?RS7_imz%p2m$e=f;SW=%M&wT=$7MFL~WXTl-X=v1{=}4u7J&IUQznY^?N*q^dvPFB<&}(cU)vg%h?ENG$Po znM3Ws$S4)gvzh;@r`tWt6vhzw?;D=A{sjhUx$oTp`AU^uAN%<}s`LEMU#8)p_$?AB zyK%3xln-c8ntxp=ds28RGef);c%or=Aq zpZ23V(fMLy+TGf;gzk}OPt2c4Cy7!RG4vW?&{ z+ys^Vm3rk*4wu>$j)0|u({wEiW7Avt3y;1B$yu)stg*zjgHRAM3XxN<<*}( z`X|t(7lKseGm`t*y*q|f!5-n=t`x3)O=2t?FxaWeXO+AGd00t!K!x%xxRz-pTLAQn zyz;Xd)wMd3l`H4r{aL|4-T@;ByCA%b+IRbsdHB8`F@1tFR!vmWkDR5K;|zNFGGD9i zlm;A;6pg`OzCczty4BJE@ChFRsK#DI`=mD5uCR$N%oQ1|CeZBW=7$#hph^2;}=5ANf>4==U0Qq+liJFU_vH^&exb^-A=UduJ=@^n0$@?zfnYCnae zOHbbmW;^mRvo|ev;{N=oDfS^3We?5;@D3D*>x`SmM)cTognlFceZ{I^Zcg-j7Vq%j zk%Yze^;yK-b!Yt|NS83rS&nqQpo=wNE-gVl4O`)#dMMcUrOsZT9*VpOTd+6bA)SxJ zoQu%@vk(ia*V=fX8b@m)-fX7q?ht|hJKA?43+po@nL8ETrlAyHQdA@3ND$(~2d$qk zoil)(+EOQJf`D`jcy*22IPyvdY!A62q*BQ~vJy-wvAHWMlK#0l&`q_PD}*K$9ZQrX z3qXJxFLyZ#egj3`thpFafvTbnEYIiWqIHtcB=f62gZP&-j!GO^uizm_4w$lOG5!Q& zA_PXt+{-T>KeMZ}G?^&X|L#H2u!ci2SlDxs{|Vz;R%M$I;s2Jy{l9}-@3qp&wjPN2 zt9r22ca<1>bh1Yg=_6ms&67F|i%is|8<95|eoY9)-opMOr88hkKgEdYi2}B9sAybW zmTj)1@4s+YZlJN(-uSneFY&4N-B+q$8t%t&3hOJ?PI2SS|vU)Bc z>YCzgxIU)Gw=SSLU`a&o^0Ul2%-NfI&EFE%s;Qn&_6d);9rhx0qkSkSqMx;Ju+1)Y z(d$oUoh%Lgl&x{gg3oXWXEkLsPe$y-me~TUlfL4oEVBGbC#ffl*gj?pCx$DFoid=B zwrDLWJMR3BR1<+Q3P5t3v@w^o1SlAIfh_m1Zz;k4N%h?;*6E?**r1leDc=)L=||{{ zsyq3Zi#JcQE97T7BAuP>#z6CDU)bfgE8hvpD`cB@;Kbji>ovz|{_2X+%+)y(UX|!8 z`Glc(VwA?99m$cHrd=U=o5j(o#>F!!=)6hcV6gb+oUW%Jp3tc`y7b?AQU>&kfN)Lz z{H^W~E{T%w@FXTazH-Ke&x5$5{55%25vD@$`*8PV-^97gcH> zKjo`I#T&(p|K!4pEW@C*iwWIR)|)=K_)aBVow-%u(KAV!uul&@I7H=WV7W!3VP{AK z&Pab_XKlra&D&v(ZM~<(DRP8|@jDb)MdG3tDb0FCte*R(r)dpE&ml~Z!OVZfai5(w z@U~~5`D2Hhxn@2CvN|T*OMY-ZwP2`kN2j0b<2)%+pHh7yZ~xW2H0bOK*ih2Hw|T~% z)3Qa+Tv_ZnFomBq^j2faKydx~l}@q9M;X^`B6K=D(W5VvkC6ok>cbv4jSf4aB`-B> z>PJp`>aH@6a#VO}q*<2kbies6x2Wh*FVKK$87_>L^Cs5$s_7^k+;4qcxZFR)(jSj0 zOU1(QY#~oCrhQo}_Y2&S(YiW9iGv~2hVj$=g#$f9T5k+XPK*tjpXQBl zblF=i{eWVHc+81L1jcXDHr=ZSy6X_d!Tyg*1e|4^1*RFzF;|htD$N9J#Bcj%gLJ=A zh8^7tM4GX`IW)QVR>rcepM16T=vNn(XbQ|Fn*KY!KaS>7YK0h8io1u^jUDlumQt@Y zY%mtR`)ggOC~wKe$GE921$*c~kXQ zkxUe_?v6Z{EPc1FiiGms1IZRT1%qJCazt6mGWRC5dXc9&b*wH6^{^CAd|@@j!3pbz zPpC6_5&cp%kJ&4d95C2?i-cG~_Lzkrp|Ak-^Po*(&6kDYd;0Rg;8HL45=me4QK?zv z>26A=t$ah?0LcOeJeDu?nMj;+w&mCoGfD60;>Lcy4oDn5`kB8>mlb1oN9mt*;LH4h z!j6x-rv1SD&2I{m<9ieuIKs9+@VFWb|9YkXIh3lhYBD}JKbQK;?CdeVv`eyg6g?Q7 zSqxq_b<~}=iJORuPi{Yj^*`hL%*}sD(C2`RZIH!X?k2d(LsMOO9qQtBN_22c?_3y$-;8+ZI;ZPUR98#a$yn` z;_Va0iHt^pUIi#&+H#M{=SnFhMsRr5=Isez{N!6M4LE2k!!n5d84~CDu8vZ&NLZx1 zb@906{vGaIUn9*qYuA2Cmpo0PR{h6@`rOSSZqsV@f(bB;<4aI$U+qibv`hNy9*l!U z$_4hj+jY&{q8(S8F{^In`>bDM_uKuw?4!K`EY;DWUlWxp$!BFh^q2_yxQw2-g%^s@ zsD_KW$B1b<3`}8nUC4hFst6S%Ef|>K zX%fAcVxR2N>>IMkp<$)Up~#(=ruWf+AxnyBrIX52a3WIg)MA2TXF+!=*uKLd0iL5! z*L=w_#CM(8?epV=QE%Ai&&VP;e<4FyQ9ErXu|U|h!rjt7mXK_c2EekH)O~E=XfB=`}}1+(lfe=lw#OWQ(~o=&Top6PmxQ z0;+0_B7s$K`&ZKyRQ_-1>Fr-mpw(bnI03K2^t=vE1AA@4U+E=HVAbUvK3kwVn%*Q^ z=_QeoD*}Mj;@4)FCp?!hX|?gBAjnYVY6vHxvHcHrFWkm=ZRWZRqtxtiU#p&d$Xrfr z&OsEj^AmAun&!G3Ec@lfJXXq7I`xUwoS9WUqno!C&RlzJS&6VH0B7m2ss)BsOg1(_ z-Dd1bvgpoJ5(S+H?`u1uyv=Q;a(7#TIjgsJRQU~K>Vrw&T*J{(qn3=qNxQ@t3@^;A zYr_e5#D|L!y1{(?{=+*?Ey$cjapuW``xE}7&_MW%9HAy#`j>ItYzhkQ2ub~X@_dGr zF`PoRMqQnx6Y0QgtCx9H1n#l&lj_#-k$F=*@Owj5*f`>ikat(?Ohi*!@oc<)~hQj*QjEyrU$KuD6pK+MwO=EV#)EW+#m3H zdT|`2aZ)I;w{S$WNCPgx$N6(iE}-w3kyIxsDGLEOw`g?GS|HK(fK9D%3f$@AKI`+J z|8tgH%J4d0#nW5T%`?q^tg5J|Xmdwen)u5~-%*NN=y6KlA5Q6HkF*KrI9K zcqmUrvRsuePPpswSLH+;b2_WwhBL5z?LK0^hbiqtiq#KT)l}RZp$!AG(X{t7B4Hyk zxo251jr?iwB2oO08jqB^n3WTAMkiA}$$gBP!W(iZ}E z9jEn{xn^n(>PVhQ%a5$;a3`tgpY`xQuE?1lI)r558nv#9I%g7sE|p_oRtj60AB!xi zvY+LyaAqNSfc5=bR7$Ax_&d3vKDABVJW+HVzzfKz@bUY3lj+=}N}yVO67h+lXauQ;IYTp91+2kb-6ct=@aRlj~bdogU(n1jHf;zQt^ znyrcITAh)LNXbK1Rt?W1*c67=ec0qN&~P`)<+_7Bf0k&|rc-{t<;5=Qk?8oh|L@VL zy33bSjGL8-pv>CY3nm3K=6`*^pG&Z;x_BDrK4Bmql#srqDo&NzjQxu}j}vYblQxzF zCY5mdEo&R6gB+NU!Z*90%5GrAl}KqVr%!{2mPa-H0c~^XkpHUoTJGgEiz?9f5bUEQ zm$3e|9X8*z+07PKVSh^87(T(bBiH7bH9JX6dfe zQ!X?mE0jqI=80@{R?2-aCR+2k`XbW#@06}SKtDs;Oy?Pfwq$$fuS%x&sR-_!jd1?o ztD7u~V*kBG*|C{Prp($KvX_e{zG5Zfj+SelblKQG_KiOOsJwh~kSlWLUnD(wl(-W; zH2A2V+kOG;zWF_m1BMbHTqXe)uFkuzy4Jd}fGh7>Tpp|>1pUrHmMn54Mi5=IQn zSI{;rXP^KB?MrW5K{i3@bVQ0r+qUX3T1c;VBFZ5g zaQMOD(&ZQ-)ypm*Zn~9l{ic*HY4I%=;DM8_j;&>i&zvs-NOmw=D*`e5{p)f%?msQ|&nQRKQ{9 ztSvc}QhLL^WB(~SaJO#JIGQx_^Ac-{?sy-luKw$MAK&y_IA$f)^S--5<0P@(o#zk1jC@cu%JCSx65q`L@Xh8;E|h`!*j-Z6ePV zQ}1Q;1r<&f9bFoUg!l-=MD?R0?mmp6(C{AyUGShCXb@b<2<)$C>o+l0I}5jcKeWZ+ zXo~`VPltBk194TyPfz<_Jz}gETwA?9k2v;(IS+>mMwsK8wKyjVO%H>^-Lx!mGP}HD zk0Y)$!PA|~4EkECg2zdiF!@qSSpC>s02CQ|#f-SedVZ2hZk@%T&F0Q&n>K8~wDI{8 zr0&3H3>$Y=f6bUkcjDVaBR4h_{81KESEcHJ@S5XMZ@D^I?H1_ur80EqWKRp-Pq@rB zgX7HdcjI0RUu>CVw$vwNg88wn=xc@4Q;4oR*z?Y?^S^=51{phT zW@dlTK4Az>HLNBo8Ebwnf+379)HQ0t8EJqjBHDB#N&l+T1l1qtsRhG@9|<~bneRj= zI_Iq%aHCT|moX+^CMLa8YtD|#7nadvWf^beI=4t%w_yLHu>2pWAkIr$i!tukVuiI{ zk?AF3s@94iNV_eJD=AA zuDb!GNidd<$BicX|F!nkPi_3)`!^L@q(JfFE(uoLp*RU1q&UUhUEAU=A-GF`V8tP5 zad-FP#oeX%`}-ffJ~Q{vJF~MpJNvw@>&$talMiv=ufNx~k& zQLEVM{ml&>*gpCC-Ms=r^LF=hS zZ~lWMz3zQ75}RGq*R<3M9!|0-s>fLVd_DUKt0W-Dx@RLxnx-sZ?*RFjhM^Iv9d zx{*)g%B3lZpT{x`0})!TI+8yy#PCZWz2nq~hqM8NL4;dNdMe7L0pMf6R}6>k!uxnr z9aoiz9sIv4b|hh_iX@qIN^WtjT+z7x|4?Rwk1y)%tXexsAENNg4ngk48hV=XYUU1k z2&NLgBfn-@$+-(J2)a{lOjr-cyrXvgB}c?|0}NUhL+vjAS4@(5ID8~H?$q)%(N^x{Y$1T|s42iPfS)4vtPA1c^;cATV8w*?FY! z(kjQ>VGm1$IUoQP5BD2tOX7l$j6y~yc0B@usactiQq$~qfF7w3AKl0(K>7PFgRkjV z@y-s`oKE_V;ta!D#vY(S-<*8axWP}ES$ki0o~hhTG^Sa@AroE)8RlJjey_ETnPIV0 z7S4LQA!$Tt=0Ia(=#Zn4?T=Aelcbae3R?yXjt;Y0dofT8z{)&k*jB=Cgqz*UlnKoz z?Vqj|E)Q*Hnfo*z%$|C6P%}s*GiiFrD}-d4|ALElt+`HGr)^R{R8XG$0f489b+Cg9 zDS+=D7i1DzW5&r%&SL}G-+^?8f(uyfY%TE@4>dQbcxGb{A&7VnWw2Ikgp@e|!xXYh zwArOrVe1cO&{e;Pqdex7f3BDgoh*U{_^ls|9m+@DVnFtoau%v|RGT5+TQzT)kSBCG z7Rw3yGNh5@4H?=vdxssFdOzZ@7Y&J~crjOuy2NqCB?zpLM2}O)TjY>Tu^PDXlH?>O zcWw;FSk}a%C~NSET#k149kgF&e5M7!di`Azq3gZP96D2&0o(^wku87Ew1BckLjW;+ z0}b<4^)n7%C>k$?dn3$RQh|>4<&)Lxn;m&Uu^8OLd9bHx-SN_8H9>qgYptU zrP=!4e<&f%UrJX(A0V&;z=V&Ea6+B#5R7zGn(?CCf2*bNZPba(t!TyJCpaAs5hg~{ zKa@5Nt-8CAW9O#=9i1I$!9bhKAsR0WV>9fb^5IBIcwd#`)pUpU!qUWsj+xM`%MH6= zq^NF57UfxP7dtGk&dLsRsXKRn50BKYr#R?5nwWVCl#wA$9M1Q@M0?^Rm^M)uixZmu>xyqpVnlHVumaw^tnOAeyaylSz* z_?j}rGp_95?H3L8)@DgtKHd;0pl0^Me1(dPBSLy)qc8&xv@z#=r4^!No0?g<{<3a( zL$gZztN)>lTq(~$ zqFJpr+@E8?a_!jx|FKc)zr!i?l^g*}%Xs#K>$BQqU?{-Kn9Fy}Mq(P&Xvo%PBzi{fc}`dxjEJ7?ASWbi+k zc&@(T)zf^`WC$Pb?ng}P3+y61ey`Y5*z;Fk!tRs$7w4HzOI7wsKIqg2g9^#EGviQB$0Sfp4iOrx4)GLVPAR-yW67g>H64R64}riaRjUZwXM-PoWzZfCcBA3KiS;t)g9BN2NsQZ%43i3Vs2|?w@&uSFQbV zySNDT!!%06u7X-eVQ7`8l{5xK& z=DVXnP@Dk#Z(nCsqP=w_8|zn+z4tEqa{Ml!Z|U!3$JC;fRPV}XMQ0qHUmZox`)Q4d znyCth&~!a`odUyV-~nuN)e_T5mHEB>nB2ym+g_hllvdw>ZT}?i>%H&|x5j)mCxpPH9!crq%_Q zD+OQV)m$nVh;DeXD^wp{5TZIe%j-oJOz(o+-ilphewl5%q1Jbgd(+gEWCJa^A8fpD z%}!E|EJo|^{1!TuV*XJ?%Cu~Q`i3(sEit*j1^)h|=A*cfZv4M$(Eh~3-yR5~6aP8o6*>p^EcMBPn54@$DlB6cy~vTb@S){A1A zfxb0TCdXB{WP{yhgFrQpLjd6)aEdUge<)~R8HhpxYp_=&_$P{8Mm(#Sv#I$AEVXQq z2kbq_zQRC5tV54=L(FLl1t#na)f<`-ZJT9c*|{rPTZ@X!x5W(sGNWnnMom+^(~_4| ztJ!Lx`|^k9%y988wqY5MGX^R!)-|9PVMvgo0akCA1M`FzCb;Mk983lA%r-z!M4_xX z8`Z+Hg7yRLxH@d)yZ{c>-VX{RE5j-55OJcc-&EWmOW?pL`p93n7COly$q6Tg4GC4~ z3nwB4ZnlHL4O`u5pEr}Ie1fXT^xt?@7wN&&sr(P}lbD#HX_9Vil08{3Zq#YsofVCDo=az;JPcYFAd)dBYqAB3t_UZeYHlTVZ)%pJW~;dd^(7DMvL z5)E`&s36+Z!qPUbudJ2aJ7SR}q>Y-j6xhLIO8(+rRugZHwk@<+bn%+!;uW|NqGEen zXBNeLHk7{;$mCA&ym)}VqQ7E4$G*Wb(JLbJxzIworL;ZFy2eKNx1NM9U&Cqv6^?(0 zl&I)tl&pSyE}Q99bnif_vzFFlLPxn;erx25qUv%_8d9>L<=b*CK#Z=s;qlp2Vu>uy z)mG4p42v&?!Ynq}*P#SG*H`euMhQ7H$`h?35W`lw4|>F}Rbxp5 z$N!7!Ui#(D)4pkc{x7L?q8FiW+bF&bF;EBSf85s z^Z-E46y`v70`AE)Ukb|`tPQy^eZ8K(Z|n;yZy`l2r@X#E^BV|03owpX{BzL~(eq)? z(&sH}^7`#NJ*?MAt4s`3wiA?63DpNEI6+YJQJ=0BOVLwEAf<5^dHnKf;Sg za6cY`dM_3dz(56t97qpDQySsI(E{KRpHZYxDw|+-5Ua-1Ox!*jR4eW>JIc-EZlYA0 zAKcATX{cda?2g7#&ub9ik7A7pYMS3G|>gY8+zqi)u9ETVe71qS^O{`l@>6bnin_ zCog+pa0XP}N!@v8XTDX7xxUp^WM(drdecF2U{#uIB~AbdQ%L}HZC)fBMfjLp=FM`4 zD5x;4y|H$qS%U&R7YYrTZd{5{w@Le4=dbURZJah6png^MZ~F*~H-*f_ zs)K>C9H?yFOsdwl?u(xEb8Re=i<*J@Y=x+-KdR?M*Xr~FZ4tW!-dlOj=jr9iodWE# zU)f4M`z)a51S_y93%m+PTEl^KbK9A!A157D5j9XqMgsXt#+z+@_$eK_Wup0tEd;AStg341X6bxuS%zXwm#zk#|7#aN@ZkO49)Zel z?%@go3YND~*L-i65r@-OJVOYg=}WxpBd^9HBC-Cwz-6UkNd34#MtN}5x}*o0d-y*T zg4-P`gzt!HY^#$e#8r2_wzfQd(IHIc`_Uc@*gZF(&Xk#{=zSIlm)K7 zWLdtzjj7pI;AH`Me-B6+A@ftIkp4akdd*y=Y3?eTZ_M?O2n??GuUvAAe?Neo+nw+g zxoKkGTgS^9&(D7%*1$ic-bwNrPZ;}|mZGE2tn92-Xva{`0^8kmNmk4&YGcg5SqUufodVeeDj^HsOZ3V%R+9Y31D$bSr=;(SG(Xl)1I5 z_vZrE*rXg!q*HDG@hpaxIjveto4?S_a)(uMn+`{eND=~Ex;K}CC&EHZO#M;CiN7ft zv5lvL2;Fs6E(>ufLmy13Mx>-=I-kY64ZB9jH?1tUnq*eBK@Mv7#QSd3TFKBcO&EBhrB>67sxM=yW$KyncTeR z2vV3Dz$p(pd2QnQo zTS;MNY&^@w?h;PO{*b3^r7VAwictn-M*(l6hfm{x9r?Z2PRw8+CpB38vR_ zf~~xvE0}ae>C&YgFxRD-fa3?s`yN-_F@w= z8wg?K*A97BLH0v14Tw6(t>2pjK=o;+XoFQDTiJTlVmQ@xNlkA!qTg<0q*h&tIy+AR zApXTv4%o_j=CdQpsX3XX(WpoC59KPlb~i7`i|04FpsK;NVCw9SrcDLyQi@`?J;P1M zs>aqpM|SG|)!os~#mio-QTq)xxAG{bO3F^P`Oo=LGcs+xv-QiEQ2|p|9RQ=+E~7=Z zOWz+niM4+yoa@H>B$NsS5QY?5*S*{S7DMJzZicyZ^x`{=%LmiC*RRd@NfCuR5Dx!hAue}uKkHx_w|+; zOs*Zd1Mg@N`d-&R344Cbp{_DTN{Di#*I6&pvo?(kv){?)!@^J)yMJX*1|%_7BI>8) zn-4@Z7;`kn;^!_KIe?j5kk7H0-HAA+2iz5mWo$r+6e$H&jZq0|f|TUCz<5}(D&~}9 z;ip?4wM@N|oz%L9=YJ?UbR${vO(hKNN@nAV!fL;HZ$FbLM5`J29w!$Tk)i3=#^y5U zqW-ii(I&{wpO2+$@FXX&o*wUId;w8l2PyC}FikStr{2$NjYrWFF&}7Ula-uor zthqs%+rJ!bFS4{+$vxo(z%!c8N>^w5RF$ck0~zKnC z-nD|jq)j_#qg(e^!=zCk*nmPAN)b}>=<>qliKHy6HgK!+LC4AO;H!Tq>Sw|+9FigE zzNPlYPbv@a!%%!Hd9=OpPZ;fS>8WvIoSY;+jF%l%eT>JkL0Jemrxht`PD9g?jL z{Y|V2E}eY>$FX^_r(K>48qOcO{bIr(AYN5l6epgiy0{~)7dTpDD&qy4LbCjQRF^?4 z?z0qEd1%cr6{zg_G=Fc06fD}yU!k%-Mf0hlj03~8)NGfkhpWVT-g-9eYoX=G=>cpS z^E6nV4^g69hIa3ajXHEh*S6Qeqzvrjm8t9KKl#vAh_{quc9LNOJdn$@*px~AtG>r% z2?TC~SGds8jZFNWMojFJKy*Fr1lc*~Ldq=;c{A%)P7UN$243oH+slS&+dnotCu*zS zEeEKj6GR!v4wV;CY6NO@p(3SMK^G0TQBV>g-Xq10psO4!PlpGc9KNqM zfNR+BL?C?HXm%7d8MGoIyhCG>m&1jix&CNT=6u;Jqbns{75pUS2v*)r58FkiW4wg% z=(jHaP$(7Qt^NvD+}4D7|Q%Z!Kksk;!pG3OaRsEu0l}#osnRv2v+kG6ed0BZ-X0J|R)f2*s6c(Q_XFwHalWD8N>(@_b9lF`jn%g- zMPo_d+U-`%N>N>P^cu#>-_{~!W)SM7pb?SAWd5<`;Oz;Hf=rqcfj%hNF6O<$P3Qb|n3T2GI#oosnZB>f6&CpT>v>J+Z)cMNs^^7sD2-hID=vvKAL@bVYG zhA!_nBsx#~H~auxi?=0D!A~3{gyUH3iTD$NvAFLNxc(~V^d%yh+)+5J8n~9;T#W|iNlLrRZE{`w+g>Jk8$Z=u?|6cRC+G5AS9MBL zb+nAEw)76Hub?mnQ&+P+SGkw^*tDUKPA#XUjKqugn(^$s^B0}V=P}ID%-}8o?(lL; z6%&)lr)BO7JcPfDE0)mu*YYej5Lq{GON{%4oNL1ID)8MIg68m#6; zVclmOZRjnh41x7x-)r8cjrsPBjHPFBfg1JB1XcpjigdS7d9j@;aKo*d4Z?h!r`28QSe?0U7lt>ER2%O;@?TtDJSC^Cw4=YaB@TFd;eff9d3PDo(SAjlGaNV!Wrm2^?|Y*CeP9*7Sm>fi4|W@{OcstaT)_ z2tRFp{+lAM^>ClA(6>jC33`+~h}&+&;t{HFWKDBYPzJ_!BNEeg$lj%Jx((NK*}Er| z6FxQH+z`2y7m(gULH|$`jZ{S31Ky*T+7LRr+M5z>J*yk|)lPcdCEsGT_k1`QosVVT zB>u?#C6^jT8xcB?Q=(X=)NNvJjdzUQ181Qtmzim?$v>CHR=W1(7|vjydc-Vhty>QrqtHAe2=%E0%Xx&67Gjqy8kptaUD$I)!ZM39L@|1ykKAdz3nijDQ@ipIr{gQpA(U2Ef> z9Rpj>j9T9YYUyk8_vQ7w;xw7>Dd*y}g307!LFGyNf_(9KlPbZl%$XVz*61a^^$gY3 ztz=k(-nu#lBva6y(2n5S%@Z9=v)Eb_G06V*b_>-p#4}E7{K{+Ulw?o0$_x2tm%V2b8w--c;%jN>?h^2(6%Uh;G_x#iL zv}Fv880kY2hK3V8%8Srki(J%d0ikF)0zPB+0iR@#nGnwEkWxb83!(x3!ghD%pa z85SH;`j;*FHpeO?ie2BBE`=XF__;iufy8+?jb#ZvJT77=6e*E1y9~1lI*kz*3ZJ)Y zF)>7n4)AOM?saAI-XT$J_1^b!+RbwC)I*=@9fOCxW4eiQrQsmurMz zC+j8j6Euw-A&3s+IE~-R^K@c%1H4T*qzuVdjk$K`yCk*c7;RU$6;rw?_Wp-b=XS5V zTz~377WsWY@~*J;o?&-{8V=Y=848>$s{qrNK=U=qk^*QwncB7{5jk}7!DU>M$*;cZ z;m}=1FLKs_#kRYn&hih)-o~ahN)R<@8sqKe2wb=eo+aAT+2%aur`i>CDwmFcrz%4N z5l~-`D05a~wtE6nxbvPK+u)BSHY6FIm_OYcUlN)Ru53D>`YBGk5GB4ZyD&VCCc{ZW zPa8(dOt0J#_)R3n$1U)b^0pC@B5sFFcGPFlg$AhwqvqQT1ff_H)2P`dcw*3Why*WM z(J2iy^l0CAGb8gs?gxI+Yr)HEWQA0bd&OgHTRP+<>_CP6RAcs{9fo%U{G{c+-nv|J zuK0?@CM0f;HJPtZ4%89*NTrytf$%~kiAgw4J0m>ZPd_jo-0;T#H__D++t?y=G6}Du z8*_+<(oxxK$A2hjr6x+W4(80a=9rEqUnudXi-*|RwDm^}QWf7*N_-8re=?)if-ZDv zMV>zDM-sDM9_O}CR@X0ANdERtV=*1FF02LqspEAJFyCP5_exA$#myn<6}Ufw=kPB& zhSyU-CAgP%bDzhC4#@**#G@rRfNy_QNL(|29&L_U@0>Uv%Y?1K|iGLN#D=CGZu ztG@Cd^tHFs3e^9Lb#?uBHrVP^GgibXstb!E&bj04tH9|_)Xs?ukTZlU%v zZXakQ!q&gGJdHFYLBLr8CB1HX90Pq;<9_@p)2=)S>)h=UwdM$7Nc>uixj`swCAAwP zrj-l~gMd<{2z=!iMp(abS%%rT%TesQO>8qp1e4;ln3aJvhW8=5<`jeOJyDKiGPV%g zXU=n9%f|$Sk!l{t^?0Y_3-L&e!0C|7R<5~m`hZ=a{boha*sm2^uMWn+8^A!qaB_AhxF_nm zHJvbw@s2hT!G<-UqugW;Dor-E4L;M> zIqB@mA;l z6@ekxIq1vUma>`jv>3#7g`}4aMtwD zSFBZcTOANq@kn{>_{e@P>G&~ud@|F?c(yKy>j8sN6-=}g5~-1Bf+pG-CtQ(wbf4p~ zq@^DFm2M@lyk5`w6Lgu;*E4kZJ;g|Z8rMD*){4Pnl3j-XTc33k`?l2_vOPiZLA$u| ze<;r~2kX|yIE&LuAv`(H>Zjs^-|~6>N2Swa|E1DTF)qna?QshV94#Hk{Y4b50#{b%GY&NJgtfZ}90m9={-P zXoVkBUg_ul{{9}msd@yXOeU!%GBVf1U$N}5z$sxSXm9)5qD^d1mqmP0*AKb@Ok5|n zZz?%fl^)D$yUA0f5w{7pEBN+3+KWjJQ71pDPmEddAh4zWk{3BEXak)R-Kkcx<2AC|?4;k@`X-{h z$3+{yr|msdygRw`88S-vxwoSJpeW z^KWc8TPROZ9Ki@WG2Wiojx7z*ncF69xS4a!-sSpZa55;C@_Xktx8l!qBb>I!NkK-7lLGcKziuIk(T1QadM--7pT}&qvksH4_Fk_X;T3`a~R}3gC)ui3v z=8X#Bsn)#b{?d^XQ0=%xU`R_?Ogg-6oN3gDuF-k;$YieH6v#X@9Jvm($~ek@XHno@YG#Z?}>PDHpKp?lW(OuV4Tew2(~zQ zH|?0~ki<~6qzG>O)mhJco1^1+&u+wLTe&Y(#!G)kEPtHyb(~UuiJi)2`jt}xB(2CC z8>evE$z}1@O@03;U(!&mZ}h%%cS<2rM~t?mb)&kqw#O8Hq@~dtR^45y-3L0Y}fqT;UP>iHj8@i z&*z~ShT8*tW_3}l(}v%a=YD33Ai~_D_zRsoD9#!{RLFyhc@P@jvv@AT$hi!$o>FH6 zu>UMv@!u`RYHdCxI$5M8=`@Fb5kdK^KeUa2*sAjYd$KG*vo%j;qhdruGfPJyAs-1c zC;i)Qlj8d4o(xY-(ljT$P(z2KwsZ6GfUHCqoWdTH+e z(&Rvm^M62UsN}P|760~-Bqrdw9UKhDO{ZB%0}L><&6z<|CqK!H5)HpHk-~M11 zckha%j(s5E@O!V}2FxFpr&eg+Se&%4E6Z<S z`>nT4w_bjnRR0NLG)>UKaS@>A5vQA6U827>oMls!GuVFm4`mEjmc8PASK{#Hi|D3q zB98zgg8}0qNT^)WgGg9!ha^dIjAF2=ilmB8xxatiAwyO%nHQ;0$}`i2q2I`FOZKWt z{NcV&usls5&0aHkyXFlGzg3`LCC7v;y8I|A^d~RR6feuU?w)<8C>_3qcK&ovZRf2< z*3r8@n%QI*P_R=(bVu>#3P{rt=i7tZj&5ZK9zDWkT&X=5jTq~567z{)0fB=i!`8bu=P3owB^t)?XP|xKjW%|rScx#=W2}!D9bwI`se_BF^X`N}W zt>Fu$m!h#S3rC=1XY^fNlqa{B*k%Upw&ID!pS0b-EqSh2tGoa+mWrWmg%G|=+O=x- zX~Vi-br!XIjWc1Mxdsz!8fzk2|4^iGW^69vnsSFKHwjHNei8PYuDyivxu)|@1j+0N z+M;KfXNeJ`4z6IvzgKEcpv}AppNhqp*||4!gi9Ga$ti~}qYXmW*oLiy7;F)=4~#$N zSlPk+WDkMTrj9Oq&A@U1TR9CM@V(Nhv;E_3RAv;}pa`W3tHANKoot?Myw8GOota{F z+wdFi_@BUi;+n=4%G~QU8hsbD;Q$NG0dx@oFT?UVTfCF9kLbbjkX88_NF#+rb62E0 z-50~n8@fWzn-qPaH~T5whsfbowXu?%z(ISBqR3qha;C36c(48nVg5rA*vzbQ6Of)2 z?HMXq!>IcR&6mGe2hpy>w1ht`4KrVDzf4=uB_^Wy#t>G}&P^c@F|!@t-%W;!KBVna zA(DNiC>c6MF7%Z1t28`N)|P#+1L@mknHiw#r9xSN_s;(GK2#h#pFT`7Csqbl09FBi zzmTnQ@)@Q`CU6y_2|dK6n0_*|h@^fG<7oU5^ZTx0-PR*YErDRZh`o*8>sKTEQI>?8 z9W#!l>K*&2$nRqFWCQh;(ygKHn87K;)Xm)G7)2;t2Ov)jNzQ-$g}%3*od%4p?gcVy zPC}nc55~8zZ8jWxL1!k%+4dE9mx-{cblXI2RPVhG_L@S(5bf~T5uQvTgg&Czq`2Fc zb`M`0NPbOf>VUf#ArDEF)5tJHM7Hlc#Bma8Ekc6NW(nOf3eWM;Mdlg769zzeN)~2o zI{S*MebPPpM!TFfM_gI75%T38?>x2ig6-BvD_DZjqN7Q~w#&|^)SXqx5kuCRJM z+VowVRnJ|i!#$U~I8t9g=~`qv=k20Bx2h$v#VEKSh7%Yr(p&~_wiHxgf|!#o{4{;u zOqybcQ=C(@cceuglq1_l6HHvu{wzu;7#A%j8Z$J&cRq$LNq%>ysV?-VZ>oWyovsNRaMYv_LfnmBpB*^srq~^q z@FAEWh9W7u)}fXb9wwITjGxANfd_~g5=eADLAGM>4#y%3v&pM?+qx&4^kHw?*ckd_ersiSbTOW@hyMD6R~HF;xUbbM_hhbK zc+;xM8q1@#uT7CMi`dala6-rZ-pqVJI0yi{6DpWrK+#_J{PcCJGgFc_3qF=e(Z^Ab zx1e&CDEcJ3qrw`TN4q*>#}`HlOtkoV6Z*(=9tzqxU>7c+ADP!K8Z7?Ew*=Z%eD`_{ z;;)*o-l0kdofeiOoh>Fg%FGtU8nu0o#@Ncw^Aim47uL?SP>Or++AL)LwoZBc$$}C$i10Mh|F~+igY5o5n_IZAZ3-PCW zrJ^?fP1l)^(pZtkXiOl6=)2dm(aZzgGAiW`y4O_LbIAcgbf*FW!bg2fq2Zm4$xsd6 zq@s5^2{7-O!|Abu@9yfT`)Iv-ORzM@3Ef=P3?->Qn(NS$Z;A1-FG}ld7rBGiNp87Q}dO2o?l+%E956Op<47CbU zc7cB=>0W`~%Xq&i3O-NrFW^tCEQy+MF#X2F1+j zQNK*k&i2=~v;U#Uw~!H?Qx4h}wI6;HqjMaaOmJb08{j%zZ zcyT!l>$XLiYh*(-cyg`fo8CLm{4cr0KuXQRA$D~(NmqcHEt|Wa9j6X{FD5kLB9L0> zhx4PuKWZW-Tm81$C(c6w$`B$fxz!nbUXedF!L>lGf%o}IbIY&%`#CBObIkQWuG#w~ z%-;Cc{zGx_b6vu`v^3>FH)YV zd1}gKfn0E-D_{^WUZGSFQxN)oI^n4Z$m(}*mEYhs9G`7i$LjB?)$YS{7nDSLsy51_ ztAc+5*7Lxo@DTmX=};2cidlspKt)s~p4>gQw~0=UwoLxLn~MWThQTC8prRP8a>zbk zggWMi3O71Zu{S0%x*nw5hm^tp4jpTaQs6XDs8L2?&rftzo1@cbO9KXa zbBMH$)M;E`r2N#gOyO22QEGqOGT9_rQuqSg?F0h(L`Q$eRA7$mpB3aBCO#z-&jcBU zJuYX9!#QROk^JN7bACyZ&0PVO%*!*$;w966uhqVF_+d)YA%aud#h(lRFE=yqGM=1?2ti-Q9cEh^bXd~ z_#rnoe|kt_N%MhrxlALWLdY#--Sn!xr^lO{M@=?8lTQs&$>Cqo_!6wWn8r$QAC}N8 zQT0FPb{^=$)h9@wl`b^!7V~(%QMx20t??QakT<(66 zj?!M#{2UcpNMW*Ac>gWfYrdgi!Mg}iuF;Zwiu1u3f4U~l)7w4es5Pc3zb;eY zKrex%WW0iEX*bi3Nk{nUO&2VX6;IfwuM)qX&hqPbI=nsITB&e9`)Ab+l1BQ^tXX5g zN_hsXcRI?uk~(HyMdVbGH+K3N8E7qGF{jP!!AZ=w-Zcf{KdtWA6IkTB2NH`mBGr8m zQ}`zr@KkCnc{QOTrEH%O z5)A6H2*UxK3DWgx$@>oYUvz%8J7x4AisQ(d-^<3Y#l4hF5)*c^k|y>9@IarnCP~ClUXpR6|2|%6 z-4K((J8Q>0YlXZu^J~mE_669*2wn&~KCCFGH@h9PXm?t1k`4$_FaFfoTTJz~xpA|D zd-w8Pz{Dno33+)6zMRcd;~IHg>#Ic%;f$w(>0>xRMv$(ztfWsRBsm;b*DoygM(qMwyDg!cxBYgTtbF$-r4!8HzIkwSOg?KqoR#ozz}`ECGE>t0b6)=~ zZruzw#O?e!1@qMVfme0LGk}s#hD`&-nk`lH|5*S0@9-2wm_g4=j57~oUIj0QAlk`5Yh=6#e`E2-Ur8|Eo8f+Fme7NV*5VspCN z_M};A_6@tFl15omcsPQm_{o==qrdhk)etK}*E<~I6F2^(fenq(K3lHNR#%I_Ka@wY ziuv*r#h zrtj6tVi3q+HlS9oQj=NjKf^W z%q)i*%^2h*D_`%Dy^kbD@vhyJ$>)9kWB~5xz=mx)s*CHbx6%J}T_|O%Vj$&S0 zx+K50*J@e@hJf4-jL_+qfkQcpsUHNOYwfFd;twfEV8+97AM#b zkY+`FhkW&To}b!V<4hHGdG`+t$x!rn#uCKH4&{z6!0*MB2!iG!;vQ&oviZ{VOhnq34IzZb$wHiCNQ^c+jjTYn6^D_+qP}nwl!@{ z+qP}nwr!pHPu+)8_uf;fpz^S?cPgn$Qfp`V%sy@ik2IQ#Gnq~v!!N8=+5cvJMk=IJaR{X5_n}2NwS}OWIEh)k&* zkyE6N1Gv9NJceaZ9@Q4yV=Vltx92*wG*-8Au>#80GtB0d%O9f*9&-)`kC>=N#_x=Z zLx#)w2VM%$(cTaNLcFzyoLTxJKy%mk@+Ip1>%LkTo7c8xcy%5#gjb83c3RELUFF}a z*5T<6UtRAjzl~SED_Wc-fMicMhqwsyEd++`F9OR0EG5E6(34$NC~1i)eq%G6x%@i- z)N67&-q8MpdCRcRofRwYM2Qu0XmR71c^l^^m<~`&lPw>Wp1DvL!%Y+Q zbiq#4%w}6*WL9gDn(l2i;|>xcMyMhBUT0Qm$7VR^uIE3Ce*&mi0+BWQsaLW(GCbLj_OT_YMZ>M!2eYOOP`b4KS(xCpAAu(OqHscpv%~*F*#n429Zsr z;%S(GkC+{sOn>iDdr+E??u10wj+p3YAD5BP-eg_)A(wE(2LUqz>KPJ%Q|M83eBo6r znconpaLaGrjr1Mrh!rI)J6qUCyvEpaNgj4}Fd!?oy>KSH*ZNdDt4<=0?ea+jU}9@$ z=!I&%5gEu$;cV~NupOb1yOgdkyv`@*T3y2^DH} zgg;tiOio`%1l9weKgP&VSmV=ORL=Y-OAW-ZEtLL`JI!1+b=|VYC$vjWoAQk6eDL}o zFPR883Pe<$T+Vr9i%L#mHN6HdB(5n(7Sns}I97T!s#+LkM0kH@9$WsUu#ghfj!q$# zA;+|c4kLp^4-%(=?&N#14n04$_tRD=E2R&}R^e&#DVP zhMr-1>~pSU|gFjlZ zt9UA26F=CD*`v|Wj2a<~MI(J6;WJ^MBmYeKl)F+zN8iiao*1n*vqLdisg&UvAAvj% zdZO-u_9r3Z4xK?O{yR+d@d@pkeS51V^x~@owqoLuQVkHRmygK%Lhr|x^TrQlx0X!@ z*F~@12E15HF*(>)#TW`=%RIq}gJUYWr?gO(S}ud)K*Ru+IE#zgT}?Heb@QPrNj13F z1PQ)uS~&e7 z-C1S)QhW#*BIQW&ja}Z=`GW7~m1|?rSLLz8yN6%Kg((8`XQroNs5Ps62aNTfN(3Cm z=4fi*_HBdRSZ!Ea$F-|}q6UGlB@UZvk%Re2mlzoq6~wZs@sl_dQt#?$ubg{*o%!IM z;pD4BU1otkGI&ZY(9^xI?lt(iDF-#VS>O|))0_Qq+8waQsm0VLZ3}fJ#et&iuzMM) z;q1CP)q}GFV}Y*wN031jeJP>|k`1#anVC&$ZXR=0O6uE7PS60Beb4AvSrY?xY5Ky4rZWg+^k1>#}S!lo#T=Muy(W zvdNBCSFIq)xSRDZH-y!=hf`X{lP;P2qz!2{d-r~xJxna5>#XwqF$%0M&HGL8z%z#j z_#VN_B>M*6ERR}M?|#(}v|~szZHEi)kiRk=(~(63v_CB_#2->+jdOj&<$Pj5m*x~iY2eYIe24G0rzjF$V5Nk=W z%b&%qd0%=0rHK;csTtu|V#i!Hv9l)Jff(Rxooo>$FJ@$yMAQ@`y}ZvPl?ozuY^0Zx z*r21n?)B6uO{!=Bj$U1#SjsOJNzLdGN4+9}$G>Ix>FZtJ+*LbxWaQdf_%8OsG+urA`aY!i1IwO4w z(q2a%Ky|ae;3|#yR(fL%J^W&4T<=4rAE7cRBMQmAw@`PN1E)a^)du?WQpa%|)V)5Mq6kNR-26nU6)`&^DygmqDLv=dO) zXbN#b1%6=-QuDdqB)^3bO`%4&ogEE)i)b6s!jX;jvjmDUZ%ElScj8)z@MFDh6KZn5hQa<{cjC2p&kG|-Y`w~i?H>lZs zx_&+XhaF=Tar5FuYfKMsa(t(O+qe=_rpceI<+5+dl5$%)bs&`aPOG`2OiJ~ReFu7G zH{4Tk`A7Qz(EdtlMe8NJ3*btr(<$odVe@hDnDc6`S7(xOeObH@_)WYj8sNiSvfj?( z1dcE81~G$Hf^cA3)Q#U0HwSxm%%1FuBCVJ0=VauFfTy=S|j{MyTLTgK}sHWwQG z-CrJ*^`e58%#M6qa?w#z44NuG!-zw830t!gRS$jG%0SzL- z8mDM*sf%CM;$(9WSD8s;>)~`9oUPpUS#T)Xj)x3`SMc*gaf@TpLn3d(Tx@1JN=_KUqO_b33y*lk$tumJ9UhGm!neXcju zJNU#nQ+yG32LDE2PteB>{-O+`mnS&y>FY4#N#>m(IBa|Ppgw7&G^LTp4YK=YU6~me39XJi^An-01#{0{;V9{pK?#PrLQQ_io7PSrEF#ZO=4?ifjV0w(!LbAZ-UTYF^XLMua> z20v>T&ld5%jB1R*USwR~vlp9O&5!9#Jo~99>+7!}9dn6`yLQ2w^=Mj|;v@Gvzlt;=n0Y8c#dMZPfAITKR6iBzk#_m)gdeTNoyGc>5&R7;G;n{U{})AU63~GXdq=6p1Izz2^r%IrU>D z-9@_D8DfexMj0#2-O^dXJxPu!aBFEbvT`pA4*2-Y^=#ziR*jjeGc2TKRAOR}P~s>> zZ~C$5c1YbqBmB6ksF{9#DaUp=meu$|{E$-GZZ2&u;+-~g5im;d&U=^*_JxG)!V@R; z$JS0}q$et$r)*u2RmtlTw?fm;x+s*(bg;qG8xC$DKRl1!sSUe{fVlkr$rIkF=l^zp zSr}hj^#io(Moc!Duguz8IwAbyL7G$%yMrHoT*#FKR%aFNFZG_3K%mZ^shD;?P;zuk z6Omv}`e4_r5*tsBNb)l+dZ+fV_I9k?9E)ReOS5q&m|R$4m%ew$J#@wg+zw=&sYo>3 zOyIu9HKY~)ARZ0YWxCM-#<2JA1*O22)eX-4jlOQo-AwcpF>0!i|8Xa6g^+#)6oh?HdZA;R(<@o^}hWmR2h=Uo19`j z2dW*;$W~21B4VFZT7V*~HJim;<*r~EcyySeNi0&&W$n#k3 z#{b#yVaSM}Nc|+oaA=T``9nGoQG(Nb&LVc!NbT<>jPf$uBUv=nX7Q@md>P6>5En4%V` z#K%A;JZ?9bYTcICb0awYc1>EVM5jnGyQN%dT_1q?j%f&RH^a*&+;*OF$Yi;z9S4G? zn0qnolHbDOg4mPL+j!zNEtCzsn^O`SS~(K#9DaJ(noQeme%Px~FEFTii1l9-=P!v! zK`kVaFH3m823^DEY0L=jAu-AGBv7(%Z2F_|_XUXF>Att4S%e8z+I;2{6%HZm;Ed@2N_PkeKtgs>mBFqzO2ybodM7mwYd%hnLRcUG7VL?=zCzk=KsFh9!U-9|{; zFSc~y?}aQ_!{32PpCA1w`d-oPw{{44lsSYhC*Lsw*!c#gKqD?$thc&;@(Rb(meNGk zB)sp&jIiuiz;!Db19KW<>DiO3#@Qa|e^;7L9Z&AGYRbV%U;h^1$&@vBOMC*kszU}B zq5rdH!`t;CuuvDm=lFov1D)%es>Uxgw4>bK_2KX<#c8KP#;NRSX^YUn*5xy5%#fll zpoOFKYVMS3evh1!D-dcQwTARAzu4zZ^`23&xfZOY>LtZJ0(;6uEdAYO4H)@L(lrz= zl}rsm2K+5wN$`Up<*fNLoJ6bmJ!J-4Pzv-~8M&8|W20oMSm2F>K*8y$EJ&~ld)zRO zq`GGvk}$0`#tW=N(-T3Vp3_2zK-5!jLm(GMa9s6zzm9C&(S2Wr@3q-I;g=j5?bAt^kH+^2*&jU18tR>VL7*P=)2jDNp#u5=H0+iqQVbcL7-xB zzr~u~8&KiByGv)kW?08n>?t*U%&@+Ys(Ra`24-XY0B3msQ&CgO?0t)0nZOk{_vnjD zY`)lIjuOF$MbYU;&u|%U&nuBgDzQ9 zaV5K}ksW+dg?Q&_>0J@AenPP3rDunKmidq3DN6Cm@x#QU`>fzvlcEplLrwn-QaL;z z{60t1bUtSK<+10VN<2u8AKz)illb$)b}6#gAfSQ8)0MD74622 z_?&QW9DjQUhTMRjGGXTk)RnGtgpC))J#U5<*BOnAVmnP7|#ZtQ;FRS2gn@t6e4!QX6;>2juB5%!{zN$4b5aHiG`zU~<9jmg> zYTq(#5ROSxL2!zszCnFA@%JoyTwF8B7t&{iqQ17z^{L}9y;oYXA0=L;Mn#!!X1`1R z1;me^iHIgK$>S%aDbN_uv1}e@;*f&nBCSP3JQkSt()jf{vFlg4e3ODmk5vP8=bM@xKzX84n>4g+Ky+Eti?3#NRO zeogpFrhJ^o9Isv;px&P^hGHg9YeI^uWpDse$?Tsoo|RP%EoIx?lHG!HrUp@5$#YAI z6I0?&$1B*wwB%X>s;R{o#n{axX`WoSLe*9(vf>ZEa<6iCg77^!%LqL2$ynb<5Jl&& zEO&Tlk8LZmwQpPsIms+Y67cOk^dD-5c|)+RGM@ZQ%q zMyhkVFH)Y>mDqu?yogG?ml3<57-!PPoPok@$cb7KykChvS@w*fo#Ix|B9U+!Xg<=T z2p>bD|4Jwz@5zBC?D%S9u&QyH@Ui0%q)ieR5iXTJ9yCD_h4f6FL9~3x?1OxPJa-%m zY)xXuPCXeApfARCUD`ixI>Yvp!-lzm-V+@`+OtF_$ziA9%=zD~4qBhnJIz?SR+KBiEc;%b;foLD0ew zq#eh>3xpBgK~<9?v!-E97?R`rH@!dt-(zx9T8arBc@V*xZ6&HTkH#=eFgoQWrk@Z} za_iR2$O(N1EQ^3D`e7u;on(BB9%o|#w}kgbhE?-PM5!;%6U-^z$C?lg(s4s%rp5Mv zXFIcBc!Uw{<14BSB_*Zd1fR2el>7vM_;;u92weEKx0g}BmY8uVkQobE&5k0JmI7>> z{I$5*>3MhUQ(Z~PU*!1`U}c^C<-Rkv0_Z03LtF0bfXxLv98$@I0Vt0kwcGgt9bVNx zADLJ$iz`|tED?1VWo$cI0;M6*dL7CtCG4p*l zt!kpjG-fTnl&A(iBA%ME7|mxVl;v+Y@@RJc@r%7{ybI#ee#Jw`3gD*LHIb)!EwB?2 zOOpsPR)2fW(9LGO##Rzg^ukYv#XgI8sFHEx!SUvtL1YUstcQD`j>T`H$O@(@&7siH z*)2|rpLw3R4IB*YuZxpplK=f3er8p$#CA9%7bx?A&t*>mPu@f0n=Buc~UYLGMWU#hd9>$_tmyID)3! zdnivl4=ip%%;M+rIFk`LSNBH!^?mR9Nf^R$rClMTp22_l_N(J|BBtYa0(& zsJRB`@e2yd!nqYRhdbMQr1_SU9tu%hkv~_{noi<#wVU?)d(k(ALJlpJ`kND?qu61pIYRa|Gt=4kZew-A{v9KUKICmt z8lp{;0ra#Qx)-Q_H2X4Y^$VnA17mj6h77Tws!ece4(TYIM?vZ-T^|C%uPM*X5CTEq zX}Y`LQ%djmr=N?V6(dd;3{6SA@tiwFf(_R94qp~31?YZQif#5$E5uqHL){0J21`y6 zc=x2N_U6@r7D3_b*=P1)@&GK~G-7a$_(WgYdBi*RY+_(z-B~espc6B^+QCl|*%AgF z7>*Eqf>dIK2oW9}Tf_*Qg!fuvYz`GRcuzS$vMmBR&rqwSglSxb$eFi>_8ij)Qr8eG zpOfoAuro7h4VYKeN!f3!jlPu+m8CiKTslzV4!<(=JiiayISACA!7q)7nL!l}=7n|< ztNfn=9yw`S81*L&&li*(-rEpW?GBmX(d=Riau9C(Xs(-C-Q-=OjGw7zpAT*kFW06f zoy}jBwbBhr-;D}@JVt*4_#0?gze^4qX-aRK=Qex7*ONvn$1d#`K~y&{szT}Hxamsk zYXZqUjNL8vJpJ1a{y#oyE9p;n4)ihZXQ#{CYub_BJB&Sa`HKPLIF3@e-rRrWwNrbH zwYXEtj3&L$#7~k9^3C-T4-83C=?tlmjH{6|YfBP#*!-HDZOJ#S(ldm6YY!3m#|1~3 zoy`PO|D-}#l8_3o!5*)68I!3#%Q*;JXFqeU>^#&a>0YKVLW|1E%$VZ%u;euQwx)>U znYcJheCPSQ0v~gc;L%BK!}iGYTTL4x>8O!!<~=g)pyS{%Au0dN5|u}kD!dr4%)U}f zrmo?7H?PXiV$19?c@|;Pz$1p<2d5OUrDQH%{t}W4xJMOb4X#G( z36waY;OTFL_VjDf8PZ6ZA#_O0{-du$cd`3E1!}2^-~%EA|9ou5YXM)ab=H zBjb6ow`_|asd;KS>mCdA?mkHAZD`}~aV9P!y1_F~qW&2Q5X&lNDQGan_^6<@4*_JV z+y2O_E_-_y)$OY@Ci&!FN*6$w*`BHT?v$+4@-zQ8l%_)fy^k3r=2lfAU{sL+p<^1tWOvV+^)o72+@ z9mN*r$Ap>k7k~qh1g%(bnAJmfFfX+jSvKUTXW;uvt5mFI1LSo#!I`>5=>@?t4Lo^6 zgj6J2P+w{1hU)Hc;-PXmER5SflDwktGS*J*Y9VhkZgf+Ai?B#%=U`V745?*N#s`&1 z=a0^)>lr)~?zAOV>gja>ZR>!{i;XlYFMRE~*j5a|5-L&I#A)nR6zm zxs8u15a&68y|!&GBqSp*``}p+EskccCLj;vRF)XX%+t@u(wl0jK;VbNl*B3vw|qsia% zPH!>*FzD1Qw8+Q}^O7Y3I1_ZrT|XJ+ESv|D@!<@sJf>G(!xSNc!7a4dJ2v4hG_a1R zEh+nj*9J8Z;bJ}#6_nS&_F)~=;NDOItEegcin=V7x!jUmc9aoU+^&wCmfB$fB4^JC z5z*DDmECvqCRo^^6aj1Re@$u`z|(b$wZ+-f=?Hv9nO;Z+c&e9#;p|?AUZsE34{XLO zzfu=>hm8$LXaBCtQy|Pl!iK zQ6ZdE7Wn?TJ}m~7JFBi1wlS!WRq%&`Iu+>_uBnX@7^U~5O;DSEYL&jmcLvUU+amey zvwE?iuxmPq+m&*)nsbfR1d1-lynnG5j8Toq!a-?Y=pV%P1u<}giYe(G zH?_TgrtMOJgU6t~GxOZ3eCSoRuccYV1xK|J)&Pp-r=Ws`C@siL?;e^SC$t>2xJ zqK)mbj)5|Q*j)PCHeuL=Fl>X%LY)mgK2oJ1u&UAKLlbYa-yce`@$|p12AXB)S= zVVp-Egq9Z#o>WWvpbAlq2h;utK!{stqvetUo3U!!gg=e8zdPYu>KB6C zrI2=wap#87f`x>S`cP)2T(i^#V1?0bwgdc^is0>H@1#^&#!vH94dQhrVuR}zdOl7{ z19W{0=I+UfC_U3ig%vk1wV%@9qON2)0JznAi2*IjqTc&0VN?cH@m>d2HHcb zR5-?zXORODNSMiSiAY?|mW_XgOx?(uuuGpxJe{A2($wnfV~48Wt4A)l<1x%OxZ;FE ze*2|HEjZBel&Q0yAqj)uUwhAs;vQsZRz(X3&pp~g++6u_P5y9U3JtF%(*gl_*GG52 zt}e+r$ikGjm4WTu8p=!5;*5HyE7lG~-n+x;gq-~@eIFq1`m4+pFtQDSVGekCtcT6q zq{XOkGfg{SG6}h_Y$x=}(F@HaH|??@JA{w1BID22gWIN5y*tYW=k^de!6(_Q< z0e(!dHHf3`NAxHo`mcnV*C0bXz@soRF$g=lc~#vVK+HYQ?b03CB230bc%I81?P>w) zzXf@+ROo0HdI(-0#VFN&BG=|!@WYP!INhDoo3`j6=c*dh55B!`$M&{~5{M^41tQp4 zP3(Y25Mt2Kr7p9y9s@1z-YfMlDAV_5ZKPW`mss0c2%BG(ayx98nHRZfj_Q0fQjFod zeWwYs6p}gUH}T1+qU`ZeUjSLkHb9rQqqAb@JaGdgI>>rSkcqr5Md9gqJqApAz8Y{P zt4rNc9@;Z8)&KOfRxS3Fw+S|X6<7T95`bXh!Qv@0v({uSNWF0R#>4AhvwRvs125AH5WiS+r%RL$OvZ}LETAM6=n&t2lvBRNg({WJy>Gv`; zS8P*C`}nxJZys6)zBF`SZ0dFrQ778?@D2?PH^Bw))a({>_w(K*e2p1WHv?<=`1r0q#U$J%J3lRHsDp!vgF;avH$vS-t!P)^WkDpU zuVSb+ktsD1y94`Fj&9}WCZjJ=Dlnq14+6P z0XfEg(r}sSQXz?$!mr_lRc-~bz=wAHUKp-3R$LJ6TuDd}2)6LX0f#+-7}4VnjPtkA z4znZQdh}wnz@XZTD)<`-JM}|LP0;{g&|;-MRLB=OYyBF4n+oAv1`03H;k#17ML`+B z!`Vf>O7k;JE6dT!*0ey5U{|F0g&U`UPG(DgL~fnA|K&0 zSuKvwyL#zCz5($~VXY$z5$Iv4j#O?u7ofJMoK?243vob8nSM%Y33@#bj50|TJoau# zEnB@eQiOtt;QTUPXQcfMm4AuL zc{93|@bt~~_cZpR_Il`SVv(~EG!8-dR_^^Zth=pG>^}A-i@c8$S|$2zWrjX8l7mV5 zZQk*S?lIsFHSY42q-nxs9k-{%@tF}PPAj~N+o(Y8-dxJ`T!<=DMw7xP1OYyA{3(pR|>sI{0|fr~|S1 zeSUQ{si~bc`VM+S{4MN{v)4VcfM>h!l(XGnm@^j7BvS){+nGLTZt#UsQ_TZeVX{@8 zJvwzr#E0Uo;Co=$$3WC5+Fgf6U_)SCoSGOr)p{rSPf{e+$%G1rlceH=8U4t_wxD}( zRXa7DcvN8Hm62V3M_07Pu=<3v{n|f0nKonAbJAor)6MG4OYUXg(l80%bwN5^MeuD@ z9qA#^m?qKl>W(Udz09Hkey}9Zt~p2k^fM~%xksh?hijXEKPp6cmEiJ3#cfln<9-@X z2Qk7(8(ZSYG;UqT>Z8T=ad#ng;C`QtI71<2;%hmM)~YF{GLq79a49oR)sFFp1ja`j zeF8;gS0JTMq_=akPN)!fWN^~XDryMJ@(5R}?ABGMhi=T}${T#T{7L7ml^5l~PxoxP zLQvva2;*4$Pb!Nik0TBHy3HT+p}%g8w(4G(%e=2fWs3yQM7E8+-A07%bfL>JgD=s` zYE1y|UCiebeLohHwr-M)!J`SEaM6@%VWFXefjic#>yYUZJ_+nI?8qK}J*bVV#$UK0 z8hA7)C$_6JaDr46BKWZvCpBSVY3AG3*=U~;n6YEI z&OOZ(%<9Yp?RAzT@R)-9gZ0+h+46e04-*a!!??MLDqru*?hNJXiYuQ=4oXjC^o7n zdktFjRcw^73uGEbd&shJ06z5F%a9DPq%(OZBvSG}B!AqE496RDn<{;!uK@KooiRW- zu95GXrN-ykfdmimygIF5>_Pme@*~ytCNFHyJt=93Zi>NR#w1|tN)H1xDy*+0SDAiv zU8SVe=;zsmJi-?Yw@cL2b@%XjrfF>v5~+rN@S%47y)J%dMqO=6HKIxnN{>*~=%l|u z3ceO#q%Rc|ylxK-=(W8%Pa7G}Dr4YLCm62EsQi+@fe1y#pjoajemuOVV&|c0zHwYBwpC)OWB!GEgH*MF#ab52Q}DY6K^)6hqjfEw93Kg21jTcQj{Fm zCTQ5Y>Wm|(yBd6^$MaY`3D5|wu7r=qyx>0JiZrS$HmUth8xyNQH<5?|A0SGoblG=h zB>`(|XJmLU($G00>n_1)#5(^=VT6zv3cmmV2x*KsBz!9I&eU^`KR62d2`Ns+$)5H6 z(WyrxQSowxlzybNMkZ0- z@}j+J+7*8{8lby0=&2!>jW8_OVS8&e;4Ga;uE%{wkYfz;Y96yks=AyB{>{*?oi_m>4MA4u9eG^eB-;Sb zg8DE_T3P)8SvM*Hs>_gAOBcCZq-BTww}K)qHqmI>F=vE-%Ena_ILc`_i@Il@l-Pn` zgzb!xe1ux}eq#`}Zm?az0EebtCY51W*;lZI9kMr(FYLtS@*K0Sx7fTF=*|y@-=w3& zqzVtk@q|ZwzX1Q`kJw&2V4sa=SlHuam@KFl#H?m&zNr zayq-ldi|9^HQhypMcH_?(roscXW)&a13N{ez}fbR(43zzmmmG=c7lH^TqrBTk_ZUI zbkmKG_3`+juiUBhJPr&K+7C&b#=ax&DHtUzIaN);ZIQdM52=a19#b<(|C=?heOg=t z_^v5KtI7VMFRbqq2(e_3aPM*8miLtU;v5spNAa)l$8p=`d^JKF0Dfj^5eGq7>!z2t z%!e61YfMyavW%MX$Ao^I70ApB*rKUdV8lISI`L@0oyH z5bbq_!rNhJO5Um`9!5xFAC(4uFKHp}YvY6pv^y7iHb>=6%amsMi?1Ue@Eu0Ppgdmk zL+<@om~kv4cf0D$;A%4|dP(D(>`(TWb2Aml521v@ zyF@Nl{DmDH{BVbV^N>pL*os}M$gXH?^x^>j0=+U#m)x_k*#_yZOiO)r&tv-+7w{5q znQhQkc|_U$(r5nM`w6<*G~BERR38}dY5Gr|KCjIqxwAy8`b7G>>C-cT=MsOb@gxYqfWdpg@HO1i zT@1@37?p{ZbXK5SXm7QG&tncW?n_)9eDYSv#*kCkIHy+9!oPHYm0F#*_NFw~p>09M z$F5GDMysKl@MfbUr~fF5R3N4Pw@-{rk7*={dqwhl=(iB$_D-~I&Lp&(cc-ww#2HE) zI>8;f-=q^Ls)y+$Mzn(>ht=+AxTjDZA>|6>ivXweQB~_tbH7{6A~A+X5}&W1qsYt8 zb?1Sj&t^z}wOai>@lGXb+(CHwbbVn6Fe0L67-Q{){B$A=BF+h!g^)VrzV_4QsJ&<2 zk325jaWuPRoHupjAIQrNb?95}?d@5nQ@cNYm8~Ro#x%@zQD*@jnUsOl>>z7PH~p?z z(zkx9BNUDYR~qtv9XA0|3$7F%U+O6yX8kZn<(tLi5`%WKb^=sXi9K~E%D~JDrsK^6JnHMyLz;=zB6YHikB15MQhEdHM+xdy0l9bWftOt9?3ZSWXV` z2zRUD7|h>8-_#_W`KcsC^R7alE}>j$@I9u2#dlSk>zZ-^?W;6;WL!|DHuZsDgEqz1WH~OO9&=ayGtMF3wDPCh-I{Fz5l7Rp3z0Qd8m&Yb^c*&; z`^ASewm04)LO!A|X_A`U24u{bD7OUfr(&Ou9H+yA8Lbu2Ado-aL6mTUp9@?jdUWj< zJVbFTlbo%!343BT8ESBz9vMyWpiqb`2L(&kXD<$vlkvavY9HGn5STnmk~;_RQh3z+ zWHsW;9u0T&mk+ZkWm2m9rk$F2Jo#Qc(xNH|XoHDOD3nAdPma4fHon{O}Bd#4?P|W zN!@hPnSi#V?>gdL+7Z7WYw%(CZOThJ3;71Ec9r|KNKRx_UYQf1KfmekEg0v$oh+v- zZ21>pQ{DKe4W|e8$BC(=#g`I1AB*VpG?@%L{&s4`fRJN`&S+C|skV3~F0ky?@1Mz> zDfr^Rg+Emffj9XE#LPh#kBaXIg7U+&%cgbAb2masP0r$6c5=C4DY@NyNjP!DH38)jLDz#Q3OY zSO9+uQKk@e!qZd0gr^PU{zeLgq5XLFlo{{*sNI4CFTfj9{>3#89XeetGLOT_814@{ zLd}%SL=EygoH}r~zXZ6LM-Uxi+7&&0dlU55yr+Czqclmt+4Xjz5gT*@WX|dA_s3jEg`ZH*#tvd}LLSEu@SL}(ydd(Bw zmO@POSkeF{I`EedjUqF$*I#AohCl{;;?OctdQt50wSJ3IBNK_k$P|2p8}O!1OyDAX z66SUfpLHFc6*p>oPapoWoo&q1)ZT|5^-wjAbKi?4&Rr_aYcR6&%NbDl54N(78lgW> zdUW4@@=~B+FhC$cKtRl*WAd^>o?Nj1+W*N&K)67TCJrtZjwUuHw$4s;&hE}C%FsZd zL^3$kQtK)gGS{2a03R4 zN&p6e{y%8{!D$Sr{5OW3p@EG7ou$3mf5Yu-{4a3-o6rB#xN}e7|IZBkGx9$(@PBzP VF9i Date: Fri, 10 Jul 2020 14:45:32 +0000 Subject: [PATCH 02/64] removing local configuration --- tests/settings.json | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 tests/settings.json diff --git a/tests/settings.json b/tests/settings.json deleted file mode 100644 index 87fe5319..00000000 --- a/tests/settings.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "mfn_url": "http://192.168.8.52:80", - "mfn_user": "mfn@mfn", - "mfn_password": "mfn", - "mfn_name": "KS", - "proxies": - { - }, - "timeout": 120 -} From 2d8a7ff8922d6ddb9cff5a1a7224f3e77a1a0bf5 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 23 Jul 2020 16:50:08 +0200 Subject: [PATCH 03/64] adding description on how to prepare and add a GPU node, and corresponding values.yml capability definition example --- .../microfunctions/README_join_GPU_node.md | 293 ++++++++++++++++++ deploy/helm/microfunctions/values.yaml | 2 + 2 files changed, 295 insertions(+) create mode 100644 deploy/helm/microfunctions/README_join_GPU_node.md diff --git a/deploy/helm/microfunctions/README_join_GPU_node.md b/deploy/helm/microfunctions/README_join_GPU_node.md new file mode 100644 index 00000000..6b247d1a --- /dev/null +++ b/deploy/helm/microfunctions/README_join_GPU_node.md @@ -0,0 +1,293 @@ +# Installing GPU node and adding it to a KNIX cluster + + + +This is a guide on how to install a GPU node and join it in a running Kubernetes cluster deployed with kubeadm. The guide was tested on a Kubernetes cluster v1.18.6 installed with kubespray, where cluster nodes can be depoyed as VMs using vagrant. VMs in this configuration are running Ubuntu 16.04.4 LTS. + +The node with GPU has a single NVIDIA GTX1050 GPU card. + + +## Step-by-step guide + +1. We start with a blank node with a GPU. This is the node, we would like to join in our Kubernetes cluster. First, update the node and install graphic drivers. The version of the drivers has to be at least 361.93\. We have installed version 387.26 and CUDA Version 8.0.61\. Drivers and CUDA installation is not a part of this guide. + + **NVIDIA drivers information** + +```bat +ksatzke@gpuhost:~$ nvidia-smi +Thu Jul 23 10:57:05 2020 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 450.51.05 Driver Version: 450.51.05 CUDA Version: 11.0 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 GeForce GTX 1050 On | 00000000:01:00.0 On | N/A | +| 30% 46C P0 N/A / 65W | 604MiB / 1992MiB | 2% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| +| 0 N/A N/A 2163 G /usr/lib/xorg/Xorg 369MiB | +| 0 N/A N/A 2904 G /usr/bin/gnome-shell 182MiB | +| 0 N/A N/A 3000 G /usr/lib/firefox/firefox 1MiB | +| 0 N/A N/A 8757 G /usr/lib/firefox/firefox 1MiB | +| 0 N/A N/A 11670 C ...ffice/program/soffice.bin 41MiB | +| 0 N/A N/A 16245 G /usr/lib/firefox/firefox 1MiB | ++-----------------------------------------------------------------------------+ +``` +**CUDA information** + +```bat +ksatzke@gpuhost:~$ cat /usr/local/cuda-10.1/version.txt +CUDA Version 10.1.243 +``` + +2. The next step is to install Docker on the GPU node. Install Docker CE 19.03 from Docker’s repositories for Ubuntu. Proceed with the following commands as a root user. +```bat +sudo apt-get update +sudo apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + software-properties-common +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +sudo add-apt-repository \ + "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \ + $(lsb_release -cs) \ + stable" +sudo apt-get update && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 19.03 | head -1 | awk '{print $3}') +``` + +**Docker installation test** +```bat +ksatzke@gpuhost:~$ docker –version + +Docker version 19.03.11, build 42e35e61f3 +``` + +3. On the GPU node, add nvidia-docker package repositories, install it and reload Docker daemon configuration, which might be altered by nvidia-docker installation. + Note that with the release of Docker 19.03, usage of nvidia-docker2 packages are deprecated since NVIDIA GPUs are now natively supported as devices in the Docker runtime. + +```bat +# Add the package repositories +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) +curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - +curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list + +sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit + +sudo systemctl restart docker +``` + +**nvidia-docker GPU test** + +```bat +ksatzke@gpuhost:~$ docker run --runtime=nvidia --rm nvidia/cuda nvidia-smi +Thu Jul 23 09:17:18 2020 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 450.51.05 Driver Version: 450.51.05 CUDA Version: 11.0 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 GeForce GTX 1050 On | 00000000:01:00.0 On | N/A | +| 30% 44C P0 N/A / 65W | 749MiB / 1992MiB | 1% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| ++-----------------------------------------------------------------------------+ +``` + +4. Set nvidia-runtime as the default runtime for Docker on the GPU node. Edit the ```bat /etc/docker/daemon.json``` configuration file and set the ```bat default-runtime``` parameter to nvidia. This also allows us to ommit the ```bat –runtime=nvidia``` parameter for Docker. +```bat +{ + "default-runtime": "nvidia", + "runtimes": { + "nvidia": { + "path": "/usr/bin/nvidia-container-runtime", + "runtimeArgs": [] + } + } + } +``` + +5. As a root user on the GPU node, add Kubernetes package repositories and install kubeadm, kubectl and kubelet. Then turn the swap off as it is not supported by Kubernetes. + +```bat +apt-get update && apt-get install -y apt-transport-https +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - +cat </etc/apt/sources.list.d/kubernetes.list +deb http://apt.kubernetes.io/ kubernetes-xenial main +EOF +apt-get update +apt-get install -y kubelet kubeadm kubectl +# turn off swap or comment the swap line in /etc/fstab +sudo swapoff -a +``` +**Specific version installation; e.g., 1.****18****.****6****-00** + +```bat +# install aptitude, an interface to package manager +ksatzke@gpuhost:~$: apt install aptitude -y + +# show available kubeadm versions in the repositories +ksatzke@gpuhost:~$ aptitude versions kubeadm +Package kubeadm: +p 1.5.7-00 kubernetes-xenial 500 +p 1.6.1-00 kubernetes-xenial 500 +p 1.6.2-00 kubernetes-xenial 500 +... +p 1.18.5-00 kubernetes-xenial 500 +p 1.18.6-00 kubernetes-xenial 500 + +# install specific version of kubelet, kubeadm and kubectl +ksatzke@gpuhost:~$: apt-get install -y kubelet=1.18.6-00 kubeadm=1.18.6-00 kubectl=1.18.6-00 +``` + +6. On the GPU node, edit the ```bat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf``` file and add the following environment argument to enable ```bat DevicePlugins``` feature gate. If there is already ```bat Accelerators``` feature gate set, remove it. +```bat +Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true" +``` + +**/etc/systemd/system/kubelet.service.d/10-kubeadm.conf** + +Note: This drop-in only works with kubeadm and kubelet v1.11+ + +```bat +[Service] +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf –kubeconfig=/etc/kubernetes/kubelet.conf" + +Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" +Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true" + +# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically + +EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env +# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use +# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. + +EnvironmentFile=-/etc/default/kubelet +ExecStart= +ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGSOn the GPU node, reload and restart kubelet to apply previous changes to the configuration. + +sudo systemctl daemon-reload +sudo systemctl restart kubelet +``` + +7. If not already done, enable GPU support on the Kubernetes master by deploying following Daemonset. + +```bat +kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.6.0/nvidia-device-plugin.yml +``` + +8. For the simplicity, generate a new token on the Kubernetes master and print the join command. + +```bat +ksatzke@cpuhost:~$ sudo kubeadm token create --print-join-command + +kubeadm join 192.168.33.11:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash sha256:696c21540f4de7bd600be843dddc1b362582f4a378547c2cb0d37f3be40d5699 +``` + +9. Go back to the GPU node and use the printed join command to add GPU node into the cluster. + +```bat +ksatzke@cpuhost:~$ sudo kubeadm join 192.168.33.11:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash + sha256:696c21540f4de7bd600be843dddc1b362582f4a378547c2cb0d37f3be40d5699 +[preflight] Running pre-flight checks + [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ + [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.11. Latest validated version: 18.09 +[preflight] Reading configuration from the cluster... +[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' +W0723 13:19:02.377909 27185 defaults.go:199] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10] +[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Activating the kubelet service +[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... + +This node has joined the cluster: +* Certificate signing request was sent to apiserver and a response was received. +* The Kubelet was informed of the new secure connection details. + +Run 'kubectl get nodes' on the control-plane to see this node join the cluster. +``` + +10. Run following command to see the GPU node (virtual-kubernetes-gpu-2) status on the cluster. + +```bat + ksatzke@gpuhost:~$ kubectl get nodes + NAME STATUS ROLES AGE VERSION + node1 Ready 1d v1.18.6 + node2 Ready 13s v1.18.6 + node3 Ready 5d v1.18.6 + node4 Ready 5d v1.18.6 + gpunode NotReady 1d v1.18.6 +``` + +11. After a while, the node is ready. + +```bat +gpunode Ready 7m v1.18.6 +``` + +12. Now we have a GPU node ready in our Kubernetes cluster. We can label the recently added node (gpunode) with the accelerator type by running following command on the master. + +```bat +kubectl label nodes gpunode accelerator=nvidia-gtx-1050 +``` + +13. To check nodes for accelerator label, run ```bat kubectl get nodes -L accelerator``` on Kubernetes master. + +```bat +ksatzke@gpuhost:~/kubernetes$ kubectl get nodes -L accelerator + +NAME STATUS ROLES AGE VERSION ACCELERATOR + +gpuhost Ready master 5m22s v1.18.6 nvidia-gtx-1050 +node1 Ready 1d v1.18.6 +node2 Ready 13s v1.18.6 +node3 Ready 5d v1.18.6 +node4 Ready 5d v1.18.6 +``` + +14. To test the GPU nodes, go to the master and create a yml file with the following content and execute it. + +**gpu-test.yml** + +```yaml +apiVersion: v1 + kind: Pod + metadata: + name: cuda-vector-add + spec: + restartPolicy: OnFailure + containers: + - name: cuda-vector-add + # https://github.com/kubernetes/kubernetes/blob/v1.7.11/test/images/nvidia-cuda/Dockerfile + image: "k8s.gcr.io/cuda-vector-add:v0.1" + resources: + limits: + nvidia.com/gpu: 1 # requesting 1 GPU per container + nodeSelector: + accelerator: nvidia-gtx-1050 # or nvidia-tesla-k80 etc. +``` + +```bat +ksatzke@node1:~/kubernetes$ kubectl create -f gpu-test.yml +pod "cuda-vector-add" created + +ksatzke@node1:~/kubernetes$ kubectl get pods -a +NAME READY STATUS RESTARTS AGE +cuda-vector-add 0/1 Completed 0 19s +``` + diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index d2f4c2b7..310cbcb1 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -52,9 +52,11 @@ manager: limits: cpu: 1 memory: 2Gi + nvidia.com/gpu: 1 requests: cpu: 1 memory: 1Gi + nvidia.com/gpu: 1 createServiceAccounts: true #------------------------------------------------------------------------------ From 9fab0e3d1f1cff444350dc702768b5d846d6a0b5 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 23 Jul 2020 16:55:38 +0200 Subject: [PATCH 04/64] fixing typos --- deploy/helm/microfunctions/README_join_GPU_node.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/helm/microfunctions/README_join_GPU_node.md b/deploy/helm/microfunctions/README_join_GPU_node.md index 6b247d1a..551c6518 100644 --- a/deploy/helm/microfunctions/README_join_GPU_node.md +++ b/deploy/helm/microfunctions/README_join_GPU_node.md @@ -193,7 +193,7 @@ kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0. 8. For the simplicity, generate a new token on the Kubernetes master and print the join command. ```bat -ksatzke@cpuhost:~$ sudo kubeadm token create --print-join-command +ksatzke@node1:~$ sudo kubeadm token create --print-join-command kubeadm join 192.168.33.11:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash sha256:696c21540f4de7bd600be843dddc1b362582f4a378547c2cb0d37f3be40d5699 ``` @@ -201,7 +201,7 @@ kubeadm join 192.168.33.11:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery- 9. Go back to the GPU node and use the printed join command to add GPU node into the cluster. ```bat -ksatzke@cpuhost:~$ sudo kubeadm join 192.168.33.11:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash +ksatzke@node1:~$ sudo kubeadm join 192.168.33.11:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash sha256:696c21540f4de7bd600be843dddc1b362582f4a378547c2cb0d37f3be40d5699 [preflight] Running pre-flight checks [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ @@ -225,7 +225,7 @@ Run 'kubectl get nodes' on the control-plane to see this node join the cluster. 10. Run following command to see the GPU node (virtual-kubernetes-gpu-2) status on the cluster. ```bat - ksatzke@gpuhost:~$ kubectl get nodes + ksatzke@gpunode:~$ kubectl get nodes NAME STATUS ROLES AGE VERSION node1 Ready 1d v1.18.6 node2 Ready 13s v1.18.6 @@ -249,11 +249,11 @@ kubectl label nodes gpunode accelerator=nvidia-gtx-1050 13. To check nodes for accelerator label, run ```bat kubectl get nodes -L accelerator``` on Kubernetes master. ```bat -ksatzke@gpuhost:~/kubernetes$ kubectl get nodes -L accelerator +ksatzke@gpunode:~/kubernetes$ kubectl get nodes -L accelerator NAME STATUS ROLES AGE VERSION ACCELERATOR -gpuhost Ready master 5m22s v1.18.6 nvidia-gtx-1050 +gpunode Ready master 5m22s v1.18.6 nvidia-gtx-1050 node1 Ready 1d v1.18.6 node2 Ready 13s v1.18.6 node3 Ready 5d v1.18.6 From 01c5c3d8b92cf5655d4b712146225ea98a5f4a9e Mon Sep 17 00:00:00 2001 From: ksatzke Date: Wed, 29 Jul 2020 15:35:52 +0200 Subject: [PATCH 05/64] corrections to description on adding GPU nodes --- ...GPU_node.md => README_GPU_Installation.md} | 600 +++++++++--------- 1 file changed, 307 insertions(+), 293 deletions(-) rename deploy/helm/microfunctions/{README_join_GPU_node.md => README_GPU_Installation.md} (74%) diff --git a/deploy/helm/microfunctions/README_join_GPU_node.md b/deploy/helm/microfunctions/README_GPU_Installation.md similarity index 74% rename from deploy/helm/microfunctions/README_join_GPU_node.md rename to deploy/helm/microfunctions/README_GPU_Installation.md index 551c6518..c29330d0 100644 --- a/deploy/helm/microfunctions/README_join_GPU_node.md +++ b/deploy/helm/microfunctions/README_GPU_Installation.md @@ -1,293 +1,307 @@ -# Installing GPU node and adding it to a KNIX cluster - - - -This is a guide on how to install a GPU node and join it in a running Kubernetes cluster deployed with kubeadm. The guide was tested on a Kubernetes cluster v1.18.6 installed with kubespray, where cluster nodes can be depoyed as VMs using vagrant. VMs in this configuration are running Ubuntu 16.04.4 LTS. - -The node with GPU has a single NVIDIA GTX1050 GPU card. - - -## Step-by-step guide - -1. We start with a blank node with a GPU. This is the node, we would like to join in our Kubernetes cluster. First, update the node and install graphic drivers. The version of the drivers has to be at least 361.93\. We have installed version 387.26 and CUDA Version 8.0.61\. Drivers and CUDA installation is not a part of this guide. - - **NVIDIA drivers information** - -```bat -ksatzke@gpuhost:~$ nvidia-smi -Thu Jul 23 10:57:05 2020 -+-----------------------------------------------------------------------------+ -| NVIDIA-SMI 450.51.05 Driver Version: 450.51.05 CUDA Version: 11.0 | -|-------------------------------+----------------------+----------------------+ -| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | -| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | -| | | MIG M. | -|===============================+======================+======================| -| 0 GeForce GTX 1050 On | 00000000:01:00.0 On | N/A | -| 30% 46C P0 N/A / 65W | 604MiB / 1992MiB | 2% Default | -| | | N/A | -+-------------------------------+----------------------+----------------------+ -+-----------------------------------------------------------------------------+ -| Processes: | -| GPU GI CI PID Type Process name GPU Memory | -| ID ID Usage | -|=============================================================================| -| 0 N/A N/A 2163 G /usr/lib/xorg/Xorg 369MiB | -| 0 N/A N/A 2904 G /usr/bin/gnome-shell 182MiB | -| 0 N/A N/A 3000 G /usr/lib/firefox/firefox 1MiB | -| 0 N/A N/A 8757 G /usr/lib/firefox/firefox 1MiB | -| 0 N/A N/A 11670 C ...ffice/program/soffice.bin 41MiB | -| 0 N/A N/A 16245 G /usr/lib/firefox/firefox 1MiB | -+-----------------------------------------------------------------------------+ -``` -**CUDA information** - -```bat -ksatzke@gpuhost:~$ cat /usr/local/cuda-10.1/version.txt -CUDA Version 10.1.243 -``` - -2. The next step is to install Docker on the GPU node. Install Docker CE 19.03 from Docker’s repositories for Ubuntu. Proceed with the following commands as a root user. -```bat -sudo apt-get update -sudo apt-get install -y \ - apt-transport-https \ - ca-certificates \ - curl \ - software-properties-common -sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - -sudo add-apt-repository \ - "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \ - $(lsb_release -cs) \ - stable" -sudo apt-get update && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 19.03 | head -1 | awk '{print $3}') -``` - -**Docker installation test** -```bat -ksatzke@gpuhost:~$ docker –version - -Docker version 19.03.11, build 42e35e61f3 -``` - -3. On the GPU node, add nvidia-docker package repositories, install it and reload Docker daemon configuration, which might be altered by nvidia-docker installation. - Note that with the release of Docker 19.03, usage of nvidia-docker2 packages are deprecated since NVIDIA GPUs are now natively supported as devices in the Docker runtime. - -```bat -# Add the package repositories -distribution=$(. /etc/os-release;echo $ID$VERSION_ID) -curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - -curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list - -sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit - -sudo systemctl restart docker -``` - -**nvidia-docker GPU test** - -```bat -ksatzke@gpuhost:~$ docker run --runtime=nvidia --rm nvidia/cuda nvidia-smi -Thu Jul 23 09:17:18 2020 -+-----------------------------------------------------------------------------+ -| NVIDIA-SMI 450.51.05 Driver Version: 450.51.05 CUDA Version: 11.0 | -|-------------------------------+----------------------+----------------------+ -| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | -| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | -| | | MIG M. | -|===============================+======================+======================| -| 0 GeForce GTX 1050 On | 00000000:01:00.0 On | N/A | -| 30% 44C P0 N/A / 65W | 749MiB / 1992MiB | 1% Default | -| | | N/A | -+-------------------------------+----------------------+----------------------+ -+-----------------------------------------------------------------------------+ -| Processes: | -| GPU GI CI PID Type Process name GPU Memory | -| ID ID Usage | -|=============================================================================| -+-----------------------------------------------------------------------------+ -``` - -4. Set nvidia-runtime as the default runtime for Docker on the GPU node. Edit the ```bat /etc/docker/daemon.json``` configuration file and set the ```bat default-runtime``` parameter to nvidia. This also allows us to ommit the ```bat –runtime=nvidia``` parameter for Docker. -```bat -{ - "default-runtime": "nvidia", - "runtimes": { - "nvidia": { - "path": "/usr/bin/nvidia-container-runtime", - "runtimeArgs": [] - } - } - } -``` - -5. As a root user on the GPU node, add Kubernetes package repositories and install kubeadm, kubectl and kubelet. Then turn the swap off as it is not supported by Kubernetes. - -```bat -apt-get update && apt-get install -y apt-transport-https -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -cat </etc/apt/sources.list.d/kubernetes.list -deb http://apt.kubernetes.io/ kubernetes-xenial main -EOF -apt-get update -apt-get install -y kubelet kubeadm kubectl -# turn off swap or comment the swap line in /etc/fstab -sudo swapoff -a -``` -**Specific version installation; e.g., 1.****18****.****6****-00** - -```bat -# install aptitude, an interface to package manager -ksatzke@gpuhost:~$: apt install aptitude -y - -# show available kubeadm versions in the repositories -ksatzke@gpuhost:~$ aptitude versions kubeadm -Package kubeadm: -p 1.5.7-00 kubernetes-xenial 500 -p 1.6.1-00 kubernetes-xenial 500 -p 1.6.2-00 kubernetes-xenial 500 -... -p 1.18.5-00 kubernetes-xenial 500 -p 1.18.6-00 kubernetes-xenial 500 - -# install specific version of kubelet, kubeadm and kubectl -ksatzke@gpuhost:~$: apt-get install -y kubelet=1.18.6-00 kubeadm=1.18.6-00 kubectl=1.18.6-00 -``` - -6. On the GPU node, edit the ```bat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf``` file and add the following environment argument to enable ```bat DevicePlugins``` feature gate. If there is already ```bat Accelerators``` feature gate set, remove it. -```bat -Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true" -``` - -**/etc/systemd/system/kubelet.service.d/10-kubeadm.conf** - -Note: This drop-in only works with kubeadm and kubelet v1.11+ - -```bat -[Service] -Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf –kubeconfig=/etc/kubernetes/kubelet.conf" - -Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" -Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true" - -# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically - -EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env -# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use -# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. - -EnvironmentFile=-/etc/default/kubelet -ExecStart= -ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGSOn the GPU node, reload and restart kubelet to apply previous changes to the configuration. - -sudo systemctl daemon-reload -sudo systemctl restart kubelet -``` - -7. If not already done, enable GPU support on the Kubernetes master by deploying following Daemonset. - -```bat -kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.6.0/nvidia-device-plugin.yml -``` - -8. For the simplicity, generate a new token on the Kubernetes master and print the join command. - -```bat -ksatzke@node1:~$ sudo kubeadm token create --print-join-command - -kubeadm join 192.168.33.11:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash sha256:696c21540f4de7bd600be843dddc1b362582f4a378547c2cb0d37f3be40d5699 -``` - -9. Go back to the GPU node and use the printed join command to add GPU node into the cluster. - -```bat -ksatzke@node1:~$ sudo kubeadm join 192.168.33.11:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash - sha256:696c21540f4de7bd600be843dddc1b362582f4a378547c2cb0d37f3be40d5699 -[preflight] Running pre-flight checks - [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ - [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.11. Latest validated version: 18.09 -[preflight] Reading configuration from the cluster... -[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' -W0723 13:19:02.377909 27185 defaults.go:199] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10] -[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace -[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" -[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" -[kubelet-start] Activating the kubelet service -[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... - -This node has joined the cluster: -* Certificate signing request was sent to apiserver and a response was received. -* The Kubelet was informed of the new secure connection details. - -Run 'kubectl get nodes' on the control-plane to see this node join the cluster. -``` - -10. Run following command to see the GPU node (virtual-kubernetes-gpu-2) status on the cluster. - -```bat - ksatzke@gpunode:~$ kubectl get nodes - NAME STATUS ROLES AGE VERSION - node1 Ready 1d v1.18.6 - node2 Ready 13s v1.18.6 - node3 Ready 5d v1.18.6 - node4 Ready 5d v1.18.6 - gpunode NotReady 1d v1.18.6 -``` - -11. After a while, the node is ready. - -```bat -gpunode Ready 7m v1.18.6 -``` - -12. Now we have a GPU node ready in our Kubernetes cluster. We can label the recently added node (gpunode) with the accelerator type by running following command on the master. - -```bat -kubectl label nodes gpunode accelerator=nvidia-gtx-1050 -``` - -13. To check nodes for accelerator label, run ```bat kubectl get nodes -L accelerator``` on Kubernetes master. - -```bat -ksatzke@gpunode:~/kubernetes$ kubectl get nodes -L accelerator - -NAME STATUS ROLES AGE VERSION ACCELERATOR - -gpunode Ready master 5m22s v1.18.6 nvidia-gtx-1050 -node1 Ready 1d v1.18.6 -node2 Ready 13s v1.18.6 -node3 Ready 5d v1.18.6 -node4 Ready 5d v1.18.6 -``` - -14. To test the GPU nodes, go to the master and create a yml file with the following content and execute it. - -**gpu-test.yml** - -```yaml -apiVersion: v1 - kind: Pod - metadata: - name: cuda-vector-add - spec: - restartPolicy: OnFailure - containers: - - name: cuda-vector-add - # https://github.com/kubernetes/kubernetes/blob/v1.7.11/test/images/nvidia-cuda/Dockerfile - image: "k8s.gcr.io/cuda-vector-add:v0.1" - resources: - limits: - nvidia.com/gpu: 1 # requesting 1 GPU per container - nodeSelector: - accelerator: nvidia-gtx-1050 # or nvidia-tesla-k80 etc. -``` - -```bat -ksatzke@node1:~/kubernetes$ kubectl create -f gpu-test.yml -pod "cuda-vector-add" created - -ksatzke@node1:~/kubernetes$ kubectl get pods -a -NAME READY STATUS RESTARTS AGE -cuda-vector-add 0/1 Completed 0 19s -``` - +# Installing GPU node and adding it to a KNIX cluster + + + +This is a guide on how to install a GPU node and join it in a running Kubernetes cluster deployed with kubeadm. The guide was tested on a Kubernetes cluster v1.16.6 installed with kubespray, where cluster nodes can be depoyed as VMs using vagrant. VMs in this configuration are running Ubuntu 16.04.4 LTS. + +The node with GPU has a single NVIDIA GTX1050 GPU card. + + +## Step-by-step guide + +1. We start with a blank node with a GPU. This is the node, we would like to join in our Kubernetes cluster. First, update the node and install graphic drivers. The version of the drivers has to be at least 361.93\. We have installed version 450.51.05 and CUDA Version 11.0\. Drivers and CUDA installation is not a part of this guide. + + **NVIDIA drivers information** + +```bat +ksatzke@gpuhost:~$ nvidia-smi +Thu Jul 23 10:57:05 2020 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 450.51.05 Driver Version: 450.51.05 CUDA Version: 11.0 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 GeForce GTX 1050 On | 00000000:01:00.0 On | N/A | +| 30% 46C P0 N/A / 65W | 604MiB / 1992MiB | 2% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| +| 0 N/A N/A 2163 G /usr/lib/xorg/Xorg 369MiB | +| 0 N/A N/A 2904 G /usr/bin/gnome-shell 182MiB | +| 0 N/A N/A 3000 G /usr/lib/firefox/firefox 1MiB | +| 0 N/A N/A 8757 G /usr/lib/firefox/firefox 1MiB | +| 0 N/A N/A 11670 C ...ffice/program/soffice.bin 41MiB | +| 0 N/A N/A 16245 G /usr/lib/firefox/firefox 1MiB | ++-----------------------------------------------------------------------------+ +``` +**CUDA information** + +```bat +ksatzke@gpuhost:~$ cat /usr/local/cuda-10.1/version.txt +CUDA Version 10.1.243 +``` + +2. The next step is to install Docker on the GPU node. Install Docker CE 19.03 from Docker’s repositories for Ubuntu. Proceed with the following commands as a root user. +```bat +sudo apt-get update +sudo apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + software-properties-common +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +sudo add-apt-repository \ + "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \ + $(lsb_release -cs) \ + stable" +sudo apt-get update && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 19.03 | head -1 | awk '{print $3}') +``` + +**Docker installation test** +```bat +ksatzke@gpuhost:~$ docker –version + +Docker version 19.03.11, build 42e35e61f3 +``` + +3. On the GPU node, add nvidia-docker package repositories, install it and reload Docker daemon configuration, which might be altered by nvidia-docker installation. + Note that with the release of Docker 19.03, usage of nvidia-docker2 packages are deprecated since NVIDIA GPUs are now natively supported as devices in the Docker runtime. + +```bat +# Add the package repositories +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) +curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - +curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list + +sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit + +sudo systemctl restart docker +``` + +**nvidia-docker GPU test** + +```bat +ksatzke@gpuhost:~$ docker run --runtime=nvidia --rm nvidia/cuda nvidia-smi +Thu Jul 23 09:17:18 2020 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 450.51.05 Driver Version: 450.51.05 CUDA Version: 11.0 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 GeForce GTX 1050 On | 00000000:01:00.0 On | N/A | +| 30% 44C P0 N/A / 65W | 749MiB / 1992MiB | 1% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| ++-----------------------------------------------------------------------------+ +``` + +4. Set nvidia-runtime as the default runtime for Docker on the GPU node. Edit the ```bat /etc/docker/daemon.json``` configuration file and set the ```bat default-runtime``` parameter to nvidia. This also allows us to ommit the ```bat –runtime=nvidia``` parameter for Docker. +```bat +{ + "default-runtime": "nvidia", + "runtimes": { + "nvidia": { + "path": "/usr/bin/nvidia-container-runtime", + "runtimeArgs": [] + } + } + } +``` + +5. As a root user on the GPU node, add Kubernetes package repositories and install kubeadm, kubectl and kubelet. Then turn the swap off as it is not supported by Kubernetes. + +```bat +apt-get update && apt-get install -y apt-transport-https +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - +cat </etc/apt/sources.list.d/kubernetes.list +deb http://apt.kubernetes.io/ kubernetes-xenial main +EOF +apt-get update +apt-get install -y kubelet kubeadm kubectl +# turn off swap or comment the swap line in /etc/fstab +sudo swapoff -a +``` +**Specific version installation; e.g., 1.****16****.****6****-00** + +```bat +# install aptitude, an interface to package manager +ksatzke@gpuhost:~$: apt install aptitude -y + +# show available kubeadm versions in the repositories +ksatzke@gpuhost:~$ aptitude versions kubeadm +Package kubeadm: +p 1.5.7-00 kubernetes-xenial 500 +p 1.6.1-00 kubernetes-xenial 500 +p 1.6.2-00 kubernetes-xenial 500 +... +p 1.16.5-00 kubernetes-xenial 500 +p 1.16.6-00 kubernetes-xenial 500 +... + +# install specific version of kubelet, kubeadm and kubectl +ksatzke@gpuhost:~$: apt-get install -y kubelet=1.16.6-00 kubeadm=1.16.6-00 kubectl=1.16.6-00 +``` + +6. On the GPU node, edit the /etc/systemd/system/kubelet.service.d/10-kubeadm.conf file and add the following environment argument to enable the DevicePlugins feature gate. If there is already Accelerators feature gate set, remove it. +```bat +Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true" +``` + +**/etc/systemd/system/kubelet.service.d/10-kubeadm.conf** + +Note: This drop-in only works with kubeadm and kubelet v1.11+ + +```bat +[Service] +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf –kubeconfig=/etc/kubernetes/kubelet.conf" + +Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" +Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true" + +# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically + +EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env +# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use +# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. + +EnvironmentFile=-/etc/default/kubelet +ExecStart= +ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGSOn the GPU node, reload and restart kubelet to apply previous changes to the configuration. + +sudo systemctl daemon-reload +sudo systemctl restart kubelet +``` + +7. If not already done, enable GPU support on the Kubernetes master by deploying following Daemonset. + +```bat +kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.6.0/nvidia-device-plugin.yml +``` + +8. For the simplicity, generate a new token on the Kubernetes master and print the join command. + +```bat +ksatzke@node1:~$ sudo kubeadm token create --print-join-command + +kubeadm join 192.168.1.161:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash sha256:696c21540f4de7bd600be843dddc1b362582f4a378547c2cb0d37f3be40d5699 +``` + +9. Go back to the GPU node and use the printed join command to add GPU node into the cluster. + +```bat +ksatzke@gpuhost:~$ sudo kubeadm join 192.168.1.159:6443 --token gxzpmv.hzqw4q0xxrw8zai7 --discovery-token-ca-cert-hash + sha256:696c21540f4de7bd600be843dddc1b362582f4a378547c2cb0d37f3be40d5699 +[preflight] Running pre-flight checks + [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ + [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.11. Latest validated version: 18.09 +[preflight] Reading configuration from the cluster... +[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' +W0723 13:19:02.377909 27185 defaults.go:199] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10] +[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Activating the kubelet service +[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... + +This node has joined the cluster: +* Certificate signing request was sent to apiserver and a response was received. +* The Kubelet was informed of the new secure connection details. + +Run 'kubectl get nodes' on the control-plane to see this node join the cluster. +``` + +10. Run following command on master to see the GPU node (gpuhost) status on the cluster. + +```bat + ksatzke@node1:~$ kubectl get nodes + +NAME STATUS ROLES AGE VERSION +gpuhost NotReady 2m12s v1.16.6 +node1 Ready 19h v1.16.6 +node2 Ready 19h v1.16.6 +node3 Ready 19h v1.16.6 +node4 Ready 19h v1.16.6 +``` + +11. After a while, the node is ready. + +```bat +gpuhost Ready 7m v1.16.6 +``` + +12. Now we have a GPU node ready in our KNIX Kubernetes cluster. We can label this recently added node (gpuhost) with the "accelerator" type by running following command on the master. + +```bat +kubectl label nodes gpuhost accelerator=nvidia-gtx-1050 +``` + +13. To check nodes for accelerator label, run +```bat kubectl get nodes -L accelerator +``` +on Kubernetes master. + +```bat +ksatzke@gpuhost:~/kubernetes$ kubectl get nodes -L accelerator + +NAME STATUS ROLES AGE VERSION ACCELERATOR + +gpuhost Ready master 18m v1.16.6 nvidia-gtx-1050 +node1 Ready 19h v1.16.6 +node2 Ready 19h v1.16.6 +node3 Ready 19h v1.16.6 +node4 Ready 19h v1.16.6 +``` + +14. To test the GPU nodes, go to the master and create a yml file with the following content and execute it. + +**gpu-test.yml** + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: cuda-vector-add +spec: + restartPolicy: OnFailure + containers: + - name: cuda-vector-add + # https://github.com/kubernetes/kubernetes/blob/v1.7.11/test/images/nvidia-cuda/Dockerfile + image: "k8s.gcr.io/cuda-vector-add:v0.1" + resources: + limits: + nvidia.com/gpu: 1 # requesting 1 GPU per container +nodeSelector: + accelerator: nvidia-gtx-1050 # or other nvidia GPU type etc. +``` + +```bat +ksatzke@node1:~/kubernetes$ kubectl create -f gpu-test.yml +pod "cuda-vector-add" created + +ksatzke@node1:~/kubernetes$ kubectl get pods +NAME READY STATUS RESTARTS AGE +cuda-vector-add 0/1 Completed 0 19s + +ksatzke@node1:~/kubernetes$ kubectl logs cuda-vector-add +[Vector addition of 50000 elements] +Copy input data from the host memory to the CUDA device +CUDA kernel launch with 196 blocks of 256 threads +Copy output data from the CUDA device to the host memory +Test PASSED +Done +``` + + From 977ee013e73ea7bb8db5b7e6cc4907cf01c97eb1 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 11 Aug 2020 16:31:35 +0200 Subject: [PATCH 06/64] add GPU sandbox type to Makefiles and helm charts --- Sandbox/Dockerfile_gpu | 75 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 Sandbox/Dockerfile_gpu diff --git a/Sandbox/Dockerfile_gpu b/Sandbox/Dockerfile_gpu new file mode 100644 index 00000000..52ca2ef1 --- /dev/null +++ b/Sandbox/Dockerfile_gpu @@ -0,0 +1,75 @@ +# Copyright 2020 The KNIX Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#FROM ubuntu:18.04 +FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 + +# Install (as root) +# Base +RUN apt-get update --fix-missing +RUN apt-get -y --no-install-recommends install build-essential +RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 + +# CUDA 10.1 dependencies and tools to build dlib +RUN apt-get -y --no-install-recommends install libsm6 libxrender1 libxrender-dev libxext6 libglib2.0-0 git cmake +RUN apt-get install -y --no-install-recommends libnvinfer6=6.0.1-1+cuda10.1 libnvinfer-dev=6.0.1-1+cuda10.1 libnvinfer-plugin6=6.0.1-1+cuda10.1 + +# Python +RUN apt-get -y --no-install-recommends install python3 python3-dev +RUN apt-get -y --no-install-recommends install python3-pip +RUN apt-get -y --no-install-recommends install zlib1g libssl1.0 libsasl2-2 ca-certificates + +RUN /usr/bin/python3 -m pip install --upgrade pip + +RUN /usr/bin/python3 -m pip install setuptools +RUN /usr/bin/python3 -m pip install thrift>=0.12.0 +RUN /usr/bin/python3 -m pip install anytree +RUN /usr/bin/python3 -m pip install ujsonpath +RUN /usr/bin/python3 -m pip install requests +RUN /usr/bin/python3 -m pip install retry +# remove warnings from anytree package +RUN /usr/bin/python3 -m pip install fastcache +# Needed for multi-language support (currently just Java) +RUN /usr/bin/python3 -m pip install thriftpy2 + +# Install dlib for CUDA +RUN git clone https://github.com/davisking/dlib.git +RUN mkdir -p /dlib/build + +RUN cmake -H/dlib -B/dlib/build -DDLIB_USE_CUDA=1 -DUSE_AVX_INSTRUCTIONS=1 +RUN cmake --build /dlib/build + +RUN cd /dlib; python3 /dlib/setup.py install + +# Install the face recognition package and tensorflow +RUN pip3 install face_recognition +RUN pip3 install tensorflow==2.1.0 + +# Java (for queue service) +RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless + +# Add components (as mfn) +RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn +RUN mkdir /opt/mfn/logs + +COPY build/queueservice.jar /opt/mfn/ +ADD frontend/frontend /opt/mfn/frontend +ADD build/SandboxAgent.tar.gz /opt/mfn/ +ADD build/FunctionWorker.tar.gz /opt/mfn/ +ADD build/LoggingService.tar.gz /opt/mfn/ + +RUN chown mfn:mfn -R /opt/mfn +USER mfn +WORKDIR /opt/mfn +CMD ["python3", "/opt/mfn/SandboxAgent/sandboxagent.py"] From 87fd7926083141172fc1e739a68d82edfa272580 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 11 Aug 2020 16:31:53 +0200 Subject: [PATCH 07/64] add GPU sandbox type to Makefiles and helm charts --- Sandbox/Dockerfile.gpu | 75 -------------------------- Sandbox/Makefile | 12 +++++ deploy/helm/microfunctions/values.yaml | 1 + 3 files changed, 13 insertions(+), 75 deletions(-) delete mode 100644 Sandbox/Dockerfile.gpu diff --git a/Sandbox/Dockerfile.gpu b/Sandbox/Dockerfile.gpu deleted file mode 100644 index 52ca2ef1..00000000 --- a/Sandbox/Dockerfile.gpu +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2020 The KNIX Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#FROM ubuntu:18.04 -FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 - -# Install (as root) -# Base -RUN apt-get update --fix-missing -RUN apt-get -y --no-install-recommends install build-essential -RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 - -# CUDA 10.1 dependencies and tools to build dlib -RUN apt-get -y --no-install-recommends install libsm6 libxrender1 libxrender-dev libxext6 libglib2.0-0 git cmake -RUN apt-get install -y --no-install-recommends libnvinfer6=6.0.1-1+cuda10.1 libnvinfer-dev=6.0.1-1+cuda10.1 libnvinfer-plugin6=6.0.1-1+cuda10.1 - -# Python -RUN apt-get -y --no-install-recommends install python3 python3-dev -RUN apt-get -y --no-install-recommends install python3-pip -RUN apt-get -y --no-install-recommends install zlib1g libssl1.0 libsasl2-2 ca-certificates - -RUN /usr/bin/python3 -m pip install --upgrade pip - -RUN /usr/bin/python3 -m pip install setuptools -RUN /usr/bin/python3 -m pip install thrift>=0.12.0 -RUN /usr/bin/python3 -m pip install anytree -RUN /usr/bin/python3 -m pip install ujsonpath -RUN /usr/bin/python3 -m pip install requests -RUN /usr/bin/python3 -m pip install retry -# remove warnings from anytree package -RUN /usr/bin/python3 -m pip install fastcache -# Needed for multi-language support (currently just Java) -RUN /usr/bin/python3 -m pip install thriftpy2 - -# Install dlib for CUDA -RUN git clone https://github.com/davisking/dlib.git -RUN mkdir -p /dlib/build - -RUN cmake -H/dlib -B/dlib/build -DDLIB_USE_CUDA=1 -DUSE_AVX_INSTRUCTIONS=1 -RUN cmake --build /dlib/build - -RUN cd /dlib; python3 /dlib/setup.py install - -# Install the face recognition package and tensorflow -RUN pip3 install face_recognition -RUN pip3 install tensorflow==2.1.0 - -# Java (for queue service) -RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless - -# Add components (as mfn) -RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn -RUN mkdir /opt/mfn/logs - -COPY build/queueservice.jar /opt/mfn/ -ADD frontend/frontend /opt/mfn/frontend -ADD build/SandboxAgent.tar.gz /opt/mfn/ -ADD build/FunctionWorker.tar.gz /opt/mfn/ -ADD build/LoggingService.tar.gz /opt/mfn/ - -RUN chown mfn:mfn -R /opt/mfn -USER mfn -WORKDIR /opt/mfn -CMD ["python3", "/opt/mfn/SandboxAgent/sandboxagent.py"] diff --git a/Sandbox/Makefile b/Sandbox/Makefile index 82f829c9..0a30cdc6 100644 --- a/Sandbox/Makefile +++ b/Sandbox/Makefile @@ -20,6 +20,7 @@ include ../build_env.mk default: build_thrift \ image \ + image_gpu \ image_java clean: @@ -91,6 +92,16 @@ image: \ build/SandboxAgent.tar.gz $(call build_image,Dockerfile,microfn/sandbox) +image_gpu: \ + Dockerfile_gpu \ + build/queueservice.jar \ + frontend/frontend \ + build/LoggingService.tar.gz \ + build/FunctionWorker.tar.gz \ + build/SandboxAgent.tar.gz + $(call build_image,Dockerfile_gpu,microfn/sandbox_gpu) + + image_java: \ Dockerfile_java \ build/queueservice.jar \ @@ -103,6 +114,7 @@ image_java: \ push: image image_java $(call push_image,microfn/sandbox) + $(call push_image,microfn/sandbox_gpu) $(call push_image,microfn/sandbox_java) diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 310cbcb1..634d1f8f 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -45,6 +45,7 @@ manager: memory: 1Gi sandbox: imagePathPython: "/microfn/sandbox" + imagePathPythonGPU: "/microfn/sandbox_gpu" imagePathJava: "/microfn/sandbox_java" imageTag: "latest" imagePullPolicy: "Always" From 32dd9042666cca68a37628ef5e268781e19bd782 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Wed, 12 Aug 2020 15:44:41 +0200 Subject: [PATCH 08/64] adding logig to spin up a GPU sandbox on demand --- ManagementService/python/addWorkflow.py | 2 ++ ManagementService/python/deployWorkflow.py | 9 ++++++++- deploy/helm/microfunctions/templates/management.yaml | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ManagementService/python/addWorkflow.py b/ManagementService/python/addWorkflow.py index 5f9d4b03..911b3b83 100644 --- a/ManagementService/python/addWorkflow.py +++ b/ManagementService/python/addWorkflow.py @@ -41,6 +41,8 @@ def handle(value, sapi): wf["id"] = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest() + #wf["on_gpu"] = True # add metadata on GPU requirements for this workflow. ToDo: make this configurable via GUI + sapi.put(email + "_workflow_" + wf["id"], json.dumps(wf), True, True) #sapi.put(email + "_workflow_json_" + wf["id"], "", True, True) #sapi.put(email + "_workflow_requirements_" + wf["id"], "", True, True) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 7e2484cb..180f662e 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -132,6 +132,8 @@ def compile_resource_info_map(resource_names, uploaded_resources, email, sapi, d resource_metadata = json.loads(resource_metadata) if "runtime" in resource_metadata: resource_info["runtime"] = resource_metadata["runtime"] + if "on_gpu" in resource_metadata: + resource_info["on_gpu"] = True num_chunks_str = dlc.get("grain_source_zip_num_chunks_" + resource_id) try: @@ -468,7 +470,12 @@ def handle(value, sapi): else: # We're running BARE METAL mode # _XXX_: due to the queue service still being in java in the sandbox - sandbox_image_name = "microfn/sandbox" + + sandbox_image_name = "microfn/sandbox" # default value + if "on_gpu" in resource_info_map.keys(): # sandbox_gpu image should be used for ths workflow + if resource_info_map["on_gpu"] == True: + sandbox_image_name = "microfn/sandbox_gpu" + if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map): sandbox_image_name = "microfn/sandbox_java" diff --git a/deploy/helm/microfunctions/templates/management.yaml b/deploy/helm/microfunctions/templates/management.yaml index 69734298..75d25688 100644 --- a/deploy/helm/microfunctions/templates/management.yaml +++ b/deploy/helm/microfunctions/templates/management.yaml @@ -188,6 +188,7 @@ data: { "app.fullname.prefix": "wf-{{ .Release.Name }}", "image.Python": "{{ .Values.imageRepo }}{{ .Values.manager.sandbox.imagePathPython }}", + "image.PythonGPU": "{{ .Values.imageRepo }}{{ .Values.manager.sandbox.imagePathPythonGPU }}", "image.Java": "{{ .Values.imageRepo }}{{ .Values.manager.sandbox.imagePathJava }}", {{- if (.Values.manager.createServiceAccounts) -}} "mgmtserviceaccount": "{{ template "manager.fullname" . }}", From cdd0faf8d2dd1468910d0ac5458ee9d5d61450b0 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 20 Aug 2020 11:13:36 +0200 Subject: [PATCH 09/64] fixing typos in README --- .../helm/microfunctions/README_GPU_Installation.md | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/deploy/helm/microfunctions/README_GPU_Installation.md b/deploy/helm/microfunctions/README_GPU_Installation.md index c29330d0..908c4fec 100644 --- a/deploy/helm/microfunctions/README_GPU_Installation.md +++ b/deploy/helm/microfunctions/README_GPU_Installation.md @@ -132,7 +132,7 @@ EOF apt-get update apt-get install -y kubelet kubeadm kubectl # turn off swap or comment the swap line in /etc/fstab -sudo swapoff -a +sudo swapoff -a ``` **Specific version installation; e.g., 1.****16****.****6****-00** @@ -152,7 +152,7 @@ p 1.16.6-00 kubernetes-xenial 500 ... # install specific version of kubelet, kubeadm and kubectl -ksatzke@gpuhost:~$: apt-get install -y kubelet=1.16.6-00 kubeadm=1.16.6-00 kubectl=1.16.6-00 +ksatzke@gpuhost:~$: apt-get install -y kubelet=1.16.6-00 kubeadm=1.16.6-00 kubectl=1.16.6-00 ``` 6. On the GPU node, edit the /etc/systemd/system/kubelet.service.d/10-kubeadm.conf file and add the following environment argument to enable the DevicePlugins feature gate. If there is already Accelerators feature gate set, remove it. @@ -230,7 +230,7 @@ Run 'kubectl get nodes' on the control-plane to see this node join the cluster. NAME STATUS ROLES AGE VERSION gpuhost NotReady 2m12s v1.16.6 -node1 Ready 19h v1.16.6 +node1 Ready master 19h v1.16.6 node2 Ready 19h v1.16.6 node3 Ready 19h v1.16.6 node4 Ready 19h v1.16.6 @@ -258,8 +258,8 @@ ksatzke@gpuhost:~/kubernetes$ kubectl get nodes -L accelerator NAME STATUS ROLES AGE VERSION ACCELERATOR -gpuhost Ready master 18m v1.16.6 nvidia-gtx-1050 -node1 Ready 19h v1.16.6 +gpuhost Ready 18m v1.16.6 nvidia-gtx-1050 +node1 Ready master 19h v1.16.6 node2 Ready 19h v1.16.6 node3 Ready 19h v1.16.6 node4 Ready 19h v1.16.6 @@ -303,5 +303,3 @@ Copy output data from the CUDA device to the host memory Test PASSED Done ``` - - From 9fba58372564b9bd1a30d9d1dae3dcd4413a1a91 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Fri, 21 Aug 2020 11:51:58 +0000 Subject: [PATCH 10/64] configure separate GPU support for management and common workflow kscv's --- ManagementService/python/deployWorkflow.py | 13 +++++ Sandbox/Dockerfile | 20 +------- Sandbox/Dockerfile.cpu | 57 ---------------------- deploy/helm/Makefile | 2 +- deploy/helm/microfunctions/values.yaml | 7 +-- tests/mfn_test_utils.py | 12 ++--- 6 files changed, 25 insertions(+), 86 deletions(-) delete mode 100644 Sandbox/Dockerfile.cpu diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 180f662e..c4256666 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -301,6 +301,19 @@ def create_k8s_deployment(email, workflow_info, runtime, management=False): kservice['spec']['template']['spec']['volumes'] = [{ 'name': 'new-workflow-conf', 'configMap': {'name': new_workflow_conf['configmap']}}] kservice['spec']['template']['spec']['containers'][0]['volumeMounts'] = [{'name': 'new-workflow-conf', 'mountPath': '/opt/mfn/SandboxAgent/conf'}] kservice['spec']['template']['spec']['serviceAccountName'] = new_workflow_conf['mgmtserviceaccount'] + + # management container should not consume a CPU + #kservice['spec']['template']['spec']['containers'][0]['image'] = new_workflow_conf['image.Python'] + #if ("nvidia.com/gpu" in kservice['spec']['template']['spec']['containers'][0]['resources']['limits'].keys()): + # overwrite limits entry, generate new k/v pair + #print("RESOURCES: " + str(kservice['spec']['template']['spec']['containers'][0]['resources'])) # just testin... + #print("RESOURCES: " + str(kservice['spec']['template']['spec']['containers'][0]['resources']['limits'])) # just testin... + if (labels['workflowid'] == "Management"): + kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = "0" + kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = "0" + #kservice['spec']['template']['spec']['containers'][0]['resources']['limits'] = {{"cpu": 1, "memory": "2Gi"}, "requests": {"cpu": 1, "memory": "1Gi"}} + #kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = 0 + if 'HTTP_GATEWAYPORT' in new_workflow_conf: env.append({'name': 'HTTP_GATEWAYPORT', 'value': new_workflow_conf['HTTP_GATEWAYPORT']}) if 'HTTPS_GATEWAYPORT' in new_workflow_conf: diff --git a/Sandbox/Dockerfile b/Sandbox/Dockerfile index 52ca2ef1..54c74044 100644 --- a/Sandbox/Dockerfile +++ b/Sandbox/Dockerfile @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -#FROM ubuntu:18.04 -FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 +FROM ubuntu:18.04 # Install (as root) # Base @@ -21,10 +20,6 @@ RUN apt-get update --fix-missing RUN apt-get -y --no-install-recommends install build-essential RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 -# CUDA 10.1 dependencies and tools to build dlib -RUN apt-get -y --no-install-recommends install libsm6 libxrender1 libxrender-dev libxext6 libglib2.0-0 git cmake -RUN apt-get install -y --no-install-recommends libnvinfer6=6.0.1-1+cuda10.1 libnvinfer-dev=6.0.1-1+cuda10.1 libnvinfer-plugin6=6.0.1-1+cuda10.1 - # Python RUN apt-get -y --no-install-recommends install python3 python3-dev RUN apt-get -y --no-install-recommends install python3-pip @@ -43,19 +38,6 @@ RUN /usr/bin/python3 -m pip install fastcache # Needed for multi-language support (currently just Java) RUN /usr/bin/python3 -m pip install thriftpy2 -# Install dlib for CUDA -RUN git clone https://github.com/davisking/dlib.git -RUN mkdir -p /dlib/build - -RUN cmake -H/dlib -B/dlib/build -DDLIB_USE_CUDA=1 -DUSE_AVX_INSTRUCTIONS=1 -RUN cmake --build /dlib/build - -RUN cd /dlib; python3 /dlib/setup.py install - -# Install the face recognition package and tensorflow -RUN pip3 install face_recognition -RUN pip3 install tensorflow==2.1.0 - # Java (for queue service) RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless diff --git a/Sandbox/Dockerfile.cpu b/Sandbox/Dockerfile.cpu deleted file mode 100644 index 54c74044..00000000 --- a/Sandbox/Dockerfile.cpu +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2020 The KNIX Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM ubuntu:18.04 - -# Install (as root) -# Base -RUN apt-get update --fix-missing -RUN apt-get -y --no-install-recommends install build-essential -RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 - -# Python -RUN apt-get -y --no-install-recommends install python3 python3-dev -RUN apt-get -y --no-install-recommends install python3-pip -RUN apt-get -y --no-install-recommends install zlib1g libssl1.0 libsasl2-2 ca-certificates - -RUN /usr/bin/python3 -m pip install --upgrade pip - -RUN /usr/bin/python3 -m pip install setuptools -RUN /usr/bin/python3 -m pip install thrift>=0.12.0 -RUN /usr/bin/python3 -m pip install anytree -RUN /usr/bin/python3 -m pip install ujsonpath -RUN /usr/bin/python3 -m pip install requests -RUN /usr/bin/python3 -m pip install retry -# remove warnings from anytree package -RUN /usr/bin/python3 -m pip install fastcache -# Needed for multi-language support (currently just Java) -RUN /usr/bin/python3 -m pip install thriftpy2 - -# Java (for queue service) -RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless - -# Add components (as mfn) -RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn -RUN mkdir /opt/mfn/logs - -COPY build/queueservice.jar /opt/mfn/ -ADD frontend/frontend /opt/mfn/frontend -ADD build/SandboxAgent.tar.gz /opt/mfn/ -ADD build/FunctionWorker.tar.gz /opt/mfn/ -ADD build/LoggingService.tar.gz /opt/mfn/ - -RUN chown mfn:mfn -R /opt/mfn -USER mfn -WORKDIR /opt/mfn -CMD ["python3", "/opt/mfn/SandboxAgent/sandboxagent.py"] diff --git a/deploy/helm/Makefile b/deploy/helm/Makefile index 10adac49..d0f7e8e8 100644 --- a/deploy/helm/Makefile +++ b/deploy/helm/Makefile @@ -41,4 +41,4 @@ push: make -C ../../GUI push deploy: push - helm install --name mfn microfunctions/ \ No newline at end of file + helm install mfn microfunctions/ diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 634d1f8f..fa658c2c 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -19,7 +19,8 @@ #------------------------------------------------------------------------------ # MicroFunction management workflow #------------------------------------------------------------------------------ -imageRepo: "registry.kube-system.svc.cluster.local" +#imageRepo: "registry.kube-system.svc.cluster.local" +imageRepo: "localhost:5000" manager: #httpProxy: "http://:" #httpsProxy: "http://:" @@ -64,7 +65,7 @@ manager: # MicroFunction Datalayer #------------------------------------------------------------------------------ datalayer: - replicas: 3 + replicas: 1 imagePath: "/microfn/datalayer" imageTag: "latest" imagePullPolicy: "Always" @@ -82,7 +83,7 @@ datalayer: # Riak global data storage #------------------------------------------------------------------------------ riak: - replicas: 3 + replicas: 1 imagePath: "/microfn/riak" imageTag: "latest" imagePullPolicy: "Always" diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index b6d65dae..b19dd8ce 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -326,10 +326,10 @@ def get_test_workflow_endpoints(self): if self._workflow.status == "deployed": return self._workflow.endpoints - def execute(self, message, timeout=None, check_duration=False, async=False): + def execute(self, message, timeout=None, check_duration=False, async_=False): if timeout is None: timeout = self._settings["timeout"] - if async: + if async_: return self._workflow.execute_async(message, timeout) else: return self._workflow.execute(message, timeout, check_duration) @@ -365,7 +365,7 @@ def exec_only(self, inp): if any_failed_tests: self._print_logs(self._workflow.logs()) - def exec_tests(self, testtuplelist, check_just_keys=False, check_duration=False, should_undeploy=True, async=False): + def exec_tests(self, testtuplelist, check_just_keys=False, check_duration=False, should_undeploy=True, async_=False): any_failed_tests = False durations = [] @@ -378,7 +378,7 @@ def exec_tests(self, testtuplelist, check_just_keys=False, check_duration=False, if check_duration: rn, t_total = self.execute(json.loads(inp), check_duration=check_duration) else: - rn = self.execute(json.loads(inp), async=async) + rn = self.execute(json.loads(inp), async_=async_) if check_duration: durations.append(t_total) @@ -390,7 +390,7 @@ def exec_tests(self, testtuplelist, check_just_keys=False, check_duration=False, res_to_check = [] # hold on to the Execution object, so that we can retrieve more results if needed - if async: + if async_: rn_async = rn if not isinstance(res, list): @@ -404,7 +404,7 @@ def exec_tests(self, testtuplelist, check_just_keys=False, check_duration=False, for cur_res in res_to_check: # before we can compare results, we need to ensure that we get the actual result # if we executed asynchronously, we'll have to wait until we get the result - if async: + if async_: rn = rn_async.get() if check_just_keys: From 0124123561adc64e8e703960c3d5a25731b06893 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 29 Sep 2020 09:33:55 +0000 Subject: [PATCH 11/64] added configuration of sandox_gpu container image for wf pods to run GPU related tests --- ManagementService/python/deployWorkflow.py | 20 +++++++------------- deploy/helm/microfunctions/values.yaml | 4 ++-- mfn_sdk/mfn_sdk/workflow.py | 2 ++ tests/mfn_test_utils.py | 4 ++++ 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index c4256666..88905043 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -132,8 +132,8 @@ def compile_resource_info_map(resource_names, uploaded_resources, email, sapi, d resource_metadata = json.loads(resource_metadata) if "runtime" in resource_metadata: resource_info["runtime"] = resource_metadata["runtime"] - if "on_gpu" in resource_metadata: - resource_info["on_gpu"] = True + #if "on_gpu" in resource_metadata: + # resource_info["on_gpu"] = True num_chunks_str = dlc.get("grain_source_zip_num_chunks_" + resource_id) try: @@ -302,17 +302,11 @@ def create_k8s_deployment(email, workflow_info, runtime, management=False): kservice['spec']['template']['spec']['containers'][0]['volumeMounts'] = [{'name': 'new-workflow-conf', 'mountPath': '/opt/mfn/SandboxAgent/conf'}] kservice['spec']['template']['spec']['serviceAccountName'] = new_workflow_conf['mgmtserviceaccount'] - # management container should not consume a CPU - #kservice['spec']['template']['spec']['containers'][0]['image'] = new_workflow_conf['image.Python'] - #if ("nvidia.com/gpu" in kservice['spec']['template']['spec']['containers'][0]['resources']['limits'].keys()): - # overwrite limits entry, generate new k/v pair - #print("RESOURCES: " + str(kservice['spec']['template']['spec']['containers'][0]['resources'])) # just testin... - #print("RESOURCES: " + str(kservice['spec']['template']['spec']['containers'][0]['resources']['limits'])) # just testin... + # management container should not consume a CPU and use standard sandbox image if (labels['workflowid'] == "Management"): kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = "0" kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = "0" - #kservice['spec']['template']['spec']['containers'][0]['resources']['limits'] = {{"cpu": 1, "memory": "2Gi"}, "requests": {"cpu": 1, "memory": "1Gi"}} - #kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = 0 + kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" if 'HTTP_GATEWAYPORT' in new_workflow_conf: env.append({'name': 'HTTP_GATEWAYPORT', 'value': new_workflow_conf['HTTP_GATEWAYPORT']}) @@ -485,9 +479,9 @@ def handle(value, sapi): # _XXX_: due to the queue service still being in java in the sandbox sandbox_image_name = "microfn/sandbox" # default value - if "on_gpu" in resource_info_map.keys(): # sandbox_gpu image should be used for ths workflow - if resource_info_map["on_gpu"] == True: - sandbox_image_name = "microfn/sandbox_gpu" + #if "on_gpu" in resource_info_map.keys(): # sandbox_gpu image should be used for ths workflow + # if resource_info_map["on_gpu"] == True: + # sandbox_image_name = "microfn/sandbox_gpu" if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map): sandbox_image_name = "microfn/sandbox_java" diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index fa658c2c..c3de1908 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -45,8 +45,8 @@ manager: cpu: 100m memory: 1Gi sandbox: - imagePathPython: "/microfn/sandbox" - imagePathPythonGPU: "/microfn/sandbox_gpu" + #imagePathPython: "/microfn/sandbox" + imagePathPython: "/microfn/sandbox_gpu" imagePathJava: "/microfn/sandbox_java" imageTag: "latest" imagePullPolicy: "Always" diff --git a/mfn_sdk/mfn_sdk/workflow.py b/mfn_sdk/mfn_sdk/workflow.py index bbacdb20..7870674a 100644 --- a/mfn_sdk/mfn_sdk/workflow.py +++ b/mfn_sdk/mfn_sdk/workflow.py @@ -279,6 +279,8 @@ def execute(self,data,timeout=60, check_duration=False): # we are already deployed and have the endpoints stored in self._endpoints url = random.choice(self._endpoints) + print(url) + url=url+":30336" try: #postdata = {} #postdata["value"] = json.dumps(data) diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index b19dd8ce..87343599 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -219,6 +219,10 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No print("ERROR: invalid workflow description.") assert False + #resource_info_map[resource_name]['on_gpu'] = True + + #print("resource_info_map: " + str(resource_info_map)) + return resource_info_map def _delete_resource_if_existing(self, existing_resources, resource_name): From cf047f0e5c4be80d665fd49763dea600c8a39dbc Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 29 Sep 2020 15:47:33 +0000 Subject: [PATCH 12/64] improved configuration for workflows calling for GPUs --- ManagementService/python/deployWorkflow.py | 25 +++++++++++++++++++--- mfn_sdk/mfn_sdk/workflow.py | 6 +++++- tests/asl_DLIB/test.py | 2 +- tests/mfn_test_utils.py | 17 +++++++++------ 4 files changed, 39 insertions(+), 11 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 88905043..c5c4ee75 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -132,8 +132,9 @@ def compile_resource_info_map(resource_names, uploaded_resources, email, sapi, d resource_metadata = json.loads(resource_metadata) if "runtime" in resource_metadata: resource_info["runtime"] = resource_metadata["runtime"] - #if "on_gpu" in resource_metadata: - # resource_info["on_gpu"] = True + print("RESOURCE_INFO_ALL: " +str(resource_info)) + #if "num_gpu" in resource_metadata: + # print("RESOURCE_INFO: " + str(resource_info["num_gpu"])) num_chunks_str = dlc.get("grain_source_zip_num_chunks_" + resource_id) try: @@ -296,12 +297,23 @@ def create_k8s_deployment(email, workflow_info, runtime, management=False): env.append({'name': 'WORKFLOWID', 'value': workflow_info["workflowId"]}) env.append({'name': 'WORKFLOWNAME', 'value': workflow_info["workflowName"]}) + """ + if "num_gpu" in workflow_info.keys(): + print("INSIDE K8S Deploy, num_gpu: " + str(workflow_info['num_gpu'])) + num_gpu = int(workflow_info['num_gpu']) + # overwrite values from values.yaml for new workflows + kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(num_gpu) + kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(num_gpu) + kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" + if num_gpu > 0: + kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" + """ # Special handling for the management container if management: kservice['spec']['template']['spec']['volumes'] = [{ 'name': 'new-workflow-conf', 'configMap': {'name': new_workflow_conf['configmap']}}] kservice['spec']['template']['spec']['containers'][0]['volumeMounts'] = [{'name': 'new-workflow-conf', 'mountPath': '/opt/mfn/SandboxAgent/conf'}] kservice['spec']['template']['spec']['serviceAccountName'] = new_workflow_conf['mgmtserviceaccount'] - + # management container should not consume a CPU and use standard sandbox image if (labels['workflowid'] == "Management"): kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = "0" @@ -464,6 +476,13 @@ def handle(value, sapi): runtime = "Java" else: runtime = "Python" + + if "num_gpu" in resource_info_map.keys(): + print ("RESOURCE_INFO_MAP: " + str(resource_info_map)) + workflow_info['num_gpu'] = resource_info_map['num_gpu'] + else: + workflow_info['num_gpu'] = 0 + url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime) if url is not None and len(url) > 0: status = "deploying" diff --git a/mfn_sdk/mfn_sdk/workflow.py b/mfn_sdk/mfn_sdk/workflow.py index 7870674a..8c756b55 100644 --- a/mfn_sdk/mfn_sdk/workflow.py +++ b/mfn_sdk/mfn_sdk/workflow.py @@ -127,7 +127,7 @@ def json(self,json): self.client.action('uploadWorkflowJSON',{'workflow':{'id':self.id,'json':base64.b64encode(self._json.encode()).decode()}}) - def deploy(self, timeout=None): + def deploy(self, timeout=None): #, num_gpu=None): """ deploy a workflow and optionally wait in linearly increasing multiples of 1000ms :timeout: By default returns after calling deploy on the workflow without waiting for it to be actually deployed. If timeout is set to a numeric <= 0, it waits indefinitely in intervals of 1000ms, 2000ms, 3000ms, ... @@ -145,6 +145,10 @@ def deploy(self, timeout=None): else: self.client.action('deployWorkflow',{'workflow':{'id':self.id}}) + #if num_gpu is not None: + # print("NUM_GPU:" + str(num_gpu)) + + # if timeout is None, do not wait but return immediately even if it's not yet deployed if timeout is None: return diff --git a/tests/asl_DLIB/test.py b/tests/asl_DLIB/test.py index 281da20d..fd990152 100644 --- a/tests/asl_DLIB/test.py +++ b/tests/asl_DLIB/test.py @@ -34,6 +34,6 @@ def test_dlib(self): testtuplelist =[(inp1, res1)] - test = MFNTest(test_name = "Dlib__Test") + test = MFNTest(test_name = "Dlib_Test", num_gpu = 1) test.exec_tests(testtuplelist) diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index 87343599..40a7c8be 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -43,7 +43,7 @@ class MfnAppTextFormat(): mfntestfailed = MfnAppTextFormat.STYLE_BOLD + MfnAppTextFormat.COLOR_RED + 'FAILED' + MfnAppTextFormat.END + MfnAppTextFormat.END class MFNTest(): - def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_user=False, delete_user=False): + def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_user=False, delete_user=False, num_gpu=None): self._settings = self._get_settings() @@ -84,6 +84,9 @@ def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_use if timeout is not None: self._settings["timeout"] = timeout + if num_gpu is not None: + self._settings["num_gpu"] = num_gpu + self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0) # will be the deployed workflow object in self._client @@ -190,6 +193,9 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No resource_info["resource_req_filename"] = "requirements/" + resource_ref + "_requirements.txt" resource_info["resource_env_filename"] = "environment_variables/" + resource_ref + "_environment_variables.txt" resource_info_map[resource_ref] = resource_info + resource_info_map[resource_ref]['num_gpu'] = self._settings['num_gpu'] + print("resource_info_map: " + json.dumps(resource_info_map)) + elif "States" in workflow_description: states = workflow_description["States"] @@ -203,6 +209,9 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No resource_info["resource_req_filename"] = "requirements/" + resource_name + "_requirements.txt" resource_info["resource_env_filename"] = "environment_variables/" + resource_name + "_environment_variables.txt" resource_info_map[resource_name] = resource_info + resource_info_map[resource_name]['num_gpu'] = self._settings['num_gpu'] + print("resource_info_map: " + json.dumps(resource_info_map)) + if "Type" in state and state["Type"] == "Parallel": branches = state['Branches'] @@ -219,10 +228,6 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No print("ERROR: invalid workflow description.") assert False - #resource_info_map[resource_name]['on_gpu'] = True - - #print("resource_info_map: " + str(resource_info_map)) - return resource_info_map def _delete_resource_if_existing(self, existing_resources, resource_name): @@ -299,7 +304,7 @@ def deploy_workflow(self): try: wf = self._client.add_workflow(self._workflow_name) wf.json = json.dumps(self._workflow_description) - wf.deploy(self._settings["timeout"]) + wf.deploy(self._settings["timeout"]) #, num_gpu=self._settings['num_gpu']) self._workflow = wf if self._workflow.status != "failed": print("MFN workflow " + self._workflow_name + " deployed.") From 89e8d24d73203701d828651a2eebed93990b1704 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Fri, 2 Oct 2020 10:01:42 +0000 Subject: [PATCH 13/64] first cut on extending Workflow class with GPU properties --- ManagementService/python/addWorkflow.py | 4 ++ ManagementService/python/deployWorkflow.py | 50 ++++++++++++++++------ deploy/helm/microfunctions/values.yaml | 4 +- mfn_sdk/mfn_sdk/mfnclient.py | 7 +-- mfn_sdk/mfn_sdk/workflow.py | 26 ++++++++--- tests/mfn_test_utils.py | 30 +++++++++++-- 6 files changed, 93 insertions(+), 28 deletions(-) diff --git a/ManagementService/python/addWorkflow.py b/ManagementService/python/addWorkflow.py index 911b3b83..2c730c13 100644 --- a/ManagementService/python/addWorkflow.py +++ b/ManagementService/python/addWorkflow.py @@ -27,6 +27,7 @@ def handle(value, sapi): success = False email = data["email"] + if "workflow" in data: workflow = data["workflow"] @@ -38,6 +39,9 @@ def handle(value, sapi): wf["status"] = "undeployed" wf["modified"] = time.time() wf["endpoints"] = [] + wf["gpu_usage"] = None + if "gpu_usage" in workflow: + wf["gpu_usage"] = workflow["gpu_usage"] wf["id"] = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest() diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index c5c4ee75..7d7057e0 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -26,6 +26,23 @@ WF_TYPE_SAND = 0 WF_TYPE_ASL = 1 +def get_kv_pairs(testdict, keys, dicts=None): + # find and return kv pairs with particular keys in testdict + if not dicts: + dicts = [testdict] + testdict = [testdict] + data = testdict.pop(0) + if isinstance(data, dict): + data = data.values() + for d in data: + if isinstance(d, dict) or isinstance(d, list): # check d for type + testdict.append(d) + if isinstance(d, dict): + dicts.append(d) + if testdict: # no more data to search + return get_kv_pairs(testdict, keys, dicts) + return [(k, v) for d in dicts for k, v in d.items() if k in keys] + def is_asl_workflow(wfobj): return 'StartAt' in wfobj and 'States' in wfobj and isinstance(wfobj['States'], dict) @@ -132,7 +149,6 @@ def compile_resource_info_map(resource_names, uploaded_resources, email, sapi, d resource_metadata = json.loads(resource_metadata) if "runtime" in resource_metadata: resource_info["runtime"] = resource_metadata["runtime"] - print("RESOURCE_INFO_ALL: " +str(resource_info)) #if "num_gpu" in resource_metadata: # print("RESOURCE_INFO: " + str(resource_info["num_gpu"])) @@ -245,7 +261,7 @@ def get_workflow_host_port(host_to_deploy, sid): return success, host_port -def create_k8s_deployment(email, workflow_info, runtime, management=False): +def create_k8s_deployment(email, workflow_info, runtime, management=False, use_gpus=0): # KUBERNETES MODE new_workflow_conf = {} conf_file = '/opt/mfn/SandboxAgent/conf/new_workflow.conf' @@ -297,17 +313,15 @@ def create_k8s_deployment(email, workflow_info, runtime, management=False): env.append({'name': 'WORKFLOWID', 'value': workflow_info["workflowId"]}) env.append({'name': 'WORKFLOWNAME', 'value': workflow_info["workflowName"]}) - """ - if "num_gpu" in workflow_info.keys(): - print("INSIDE K8S Deploy, num_gpu: " + str(workflow_info['num_gpu'])) - num_gpu = int(workflow_info['num_gpu']) + if use_gpus >= 0: + #print("INSIDE K8S Deploy, num_gpu: " + str(workflow_info['num_gpu'])) + #num_gpu = int(workflow_info['num_gpu']) # overwrite values from values.yaml for new workflows - kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(num_gpu) - kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(num_gpu) - kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" - if num_gpu > 0: - kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" - """ + kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) + kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) + #kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" + kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox_gpu" + # Special handling for the management container if management: kservice['spec']['template']['spec']['volumes'] = [{ 'name': 'new-workflow-conf', 'configMap': {'name': new_workflow_conf['configmap']}}] @@ -407,6 +421,11 @@ def handle(value, sapi): workflow = data["workflow"] if "id" not in workflow: raise Exception("malformed input") + """ + if "gpu_usage" not in workflow: + raise Exception("malformed input: no gpu_usage") + use_gpus = int(data['gpu_usage']) + """ sapi.log(json.dumps(workflow)) wfmeta = sapi.get(email + "_workflow_" + workflow["id"], True) if wfmeta is None or wfmeta == "": @@ -437,6 +456,8 @@ def handle(value, sapi): if is_asl_workflow(wfobj): wf_type = WF_TYPE_ASL + #use_gpus = int(wfmeta._gpu_usage) + success, errmsg, resource_names, uploaded_resources = check_workflow_functions(wf_type, wfobj, email, sapi) if not success: raise Exception("Couldn't deploy workflow; " + errmsg) @@ -477,13 +498,16 @@ def handle(value, sapi): else: runtime = "Python" + """ if "num_gpu" in resource_info_map.keys(): print ("RESOURCE_INFO_MAP: " + str(resource_info_map)) workflow_info['num_gpu'] = resource_info_map['num_gpu'] else: workflow_info['num_gpu'] = 0 + """ + use_gpus = 0 - url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime) + url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime, use_gpus) if url is not None and len(url) > 0: status = "deploying" sapi.addSetEntry(workflow_info["workflowId"] + "_workflow_endpoints", str(url), is_private=True) diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index c3de1908..ec9a642b 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -24,8 +24,8 @@ imageRepo: "localhost:5000" manager: #httpProxy: "http://:" #httpsProxy: "http://:" - #httpGatewayPort: 80 - #httpsGatewayPort: 443 + httpGatewayPort: 30336 + #httpsGatewayPort: 32533 nameOverride: "microfunctions" newWorkflow: hpa: diff --git a/mfn_sdk/mfn_sdk/mfnclient.py b/mfn_sdk/mfn_sdk/mfnclient.py index ab4082d5..3d94a69a 100644 --- a/mfn_sdk/mfn_sdk/mfnclient.py +++ b/mfn_sdk/mfn_sdk/mfnclient.py @@ -266,6 +266,7 @@ def action(self,action,data=None): r.raise_for_status() log.debug("%s: %s <- %s", self.user, action, r.text[:256]+(r.text[256:] and '...')) resp = r.json() + print(str(resp)) if resp.get('status','') != 'success': if resp.get('has_error',False): raise Exception(f"MicroFunctions Error for action {action}: {resp['error_type']}") @@ -449,7 +450,7 @@ def _get_state_names_and_resource(self, desired_state_type, wf_dict): return state_list - def add_workflow(self,name,filename=None): + def add_workflow(self,name,filename=None, gpu_usage=None): """ add a workflow returns an existing workflow if the name exists, registers a new workflow name if it doesn't exist @@ -458,7 +459,7 @@ def add_workflow(self,name,filename=None): for wf in self._workflows: if wf._name == name: return wf - data = self.action('addWorkflow',{'workflow':{'name':name}}) + data = self.action('addWorkflow',{'workflow':{'name':name, "gpu_usage":gpu_usage}}) wfd = data['workflow'] wf = Workflow(self,wfd) self._workflows.append(wf) @@ -475,6 +476,7 @@ def add_workflow(self,name,filename=None): # parse the WF json to find required functions fnames = [] wfjson = json.loads(wfdesc) + #print("wfjson: "+ str(wfjson)) if 'States' in wfjson: state_list = self._get_state_names_and_resource('Task', wfjson) for state_info in state_list: @@ -505,7 +507,6 @@ def add_workflow(self,name,filename=None): with open(fpyname, 'r') as f: fcode = f.read() f.code = fcode - return wf diff --git a/mfn_sdk/mfn_sdk/workflow.py b/mfn_sdk/mfn_sdk/workflow.py index 8c756b55..447afa12 100644 --- a/mfn_sdk/mfn_sdk/workflow.py +++ b/mfn_sdk/mfn_sdk/workflow.py @@ -52,9 +52,13 @@ class Workflow(object): """ def __init__(self,client,wf): + print(str(wf)) self.client=client self.id=wf["id"] self._name=wf["name"] + self._gpu_usage=None + if "gpu_usage" in wf: + self._gpu_usage=wf["gpu_usage"] self._modified=wf["modified"] self._status=wf.get("status",None) self._endpoints=wf.get("endpoints",None) @@ -68,6 +72,19 @@ def __str__(self): else: return f"{self.id} ({self._name}, status: {self._status})" + @property + def gpu_usage(self): + # TODO: workflow GPU usage could have been updated, decide if we should fetch workflow status + return self._gpu_usage + + """ + @gpu_usage.setter + def gpu_usage(self,gpu_usage): + # TODO: workflow GPU could have been updated, decide if we should fetch workflow status + res = self.client.action('modifyWorkflow',{'workflow':{'id':self.id,'name':name,'gpu_usage':self._gpu_usage}}) + self.gpu_usage = gpu_usage + """ + @property def name(self): # TODO: workflow name could have been updated, decide if we should fetch workflow status @@ -124,15 +141,17 @@ def json(self): def json(self,json): if json != self.json: self._json = json + print ("uploaded workflow JOSN"+ str( json)) self.client.action('uploadWorkflowJSON',{'workflow':{'id':self.id,'json':base64.b64encode(self._json.encode()).decode()}}) - def deploy(self, timeout=None): #, num_gpu=None): + def deploy(self, timeout=None): """ deploy a workflow and optionally wait in linearly increasing multiples of 1000ms :timeout: By default returns after calling deploy on the workflow without waiting for it to be actually deployed. If timeout is set to a numeric <= 0, it waits indefinitely in intervals of 1000ms, 2000ms, 3000ms, ... If timeout is set to a numeric > 0, it waits for the workflow to be deployed in increasing multiples of 100ms, but no longer than the timeout. When the timeout expires and the workflow is not deployed, the function raises an Exception """ + s = self.status if s == 'deployed': log.debug("deploy: wf %s already deployed",self.name) @@ -145,9 +164,6 @@ def deploy(self, timeout=None): #, num_gpu=None): else: self.client.action('deployWorkflow',{'workflow':{'id':self.id}}) - #if num_gpu is not None: - # print("NUM_GPU:" + str(num_gpu)) - # if timeout is None, do not wait but return immediately even if it's not yet deployed if timeout is None: @@ -283,8 +299,6 @@ def execute(self,data,timeout=60, check_duration=False): # we are already deployed and have the endpoints stored in self._endpoints url = random.choice(self._endpoints) - print(url) - url=url+":30336" try: #postdata = {} #postdata["value"] = json.dumps(data) diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index 40a7c8be..945e6d05 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -84,8 +84,17 @@ def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_use if timeout is not None: self._settings["timeout"] = timeout + """ + else: + #self._gpu_usage = None + #self._workflow_description['num_gpu'] = self._settings["num_gpu"] + #print("Workflow_description:" + str(self._workflow_description)) + + self.gpu_usage = 0 # hardcoded for now if num_gpu is not None: self._settings["num_gpu"] = num_gpu + self._gpu_usage = self._settings["num_gpu"] + """ self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0) @@ -118,6 +127,7 @@ def _get_settings(self): # Defaults settings.setdefault("timeout", 60) + settings.setdefault("num_gpu", 0) return settings @@ -174,6 +184,7 @@ def _get_resource_info(self, resource_ref): return retval def _get_resource_info_map(self, workflow_description=None, resource_info_map=None): + #print(str("wf description: " + str(workflow_description))) if workflow_description is None: workflow_description = self._workflow_description if resource_info_map is None: @@ -194,8 +205,8 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No resource_info["resource_env_filename"] = "environment_variables/" + resource_ref + "_environment_variables.txt" resource_info_map[resource_ref] = resource_info resource_info_map[resource_ref]['num_gpu'] = self._settings['num_gpu'] - print("resource_info_map: " + json.dumps(resource_info_map)) - + #resource_info_map['num_gpu'] = self._settings['num_gpu'] + #print("resource_info_map: " + json.dumps(resource_info_map)) elif "States" in workflow_description: states = workflow_description["States"] @@ -210,8 +221,8 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No resource_info["resource_env_filename"] = "environment_variables/" + resource_name + "_environment_variables.txt" resource_info_map[resource_name] = resource_info resource_info_map[resource_name]['num_gpu'] = self._settings['num_gpu'] - print("resource_info_map: " + json.dumps(resource_info_map)) - + #resource_info_map['num_gpu'] = self._settings['num_gpu'] + #print("resource_info_map: " + json.dumps(resource_info_map)) if "Type" in state and state["Type"] == "Parallel": branches = state['Branches'] @@ -228,6 +239,7 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No print("ERROR: invalid workflow description.") assert False + #resource_info_map['num_gpu'] = self._settings['num_gpu'] return resource_info_map def _delete_resource_if_existing(self, existing_resources, resource_name): @@ -239,6 +251,7 @@ def _delete_resource_if_existing(self, existing_resources, resource_name): def _create_and_upload_resource(self, resource_name, resource_info): print("Deploying resource: " + resource_name) + #print(str (resource_info)) resource_filename = resource_info["resource_filename"] is_zip = resource_info["is_zip"] @@ -287,10 +300,13 @@ def upload_workflow(self): self.undeploy_workflow() resource_info_map = self._get_resource_info_map() + #resource_info_map['num_gpu'] = 1 + #print(str(resource_info_map)) existing_resources = self._client.functions for resource_name in resource_info_map.keys(): + #if not resource_name == 'num_gpu': self._delete_resource_if_existing(existing_resources, resource_name) resource_info = resource_info_map[resource_name] @@ -304,8 +320,14 @@ def deploy_workflow(self): try: wf = self._client.add_workflow(self._workflow_name) wf.json = json.dumps(self._workflow_description) + #print (wf.json) + #wf._use_gpu=self._settings["num_gpu"] + wf._gpu_usage = "teststringgpu" # _use_gpu=self._settings["num_gpu"] wf.deploy(self._settings["timeout"]) #, num_gpu=self._settings['num_gpu']) self._workflow = wf + #print ("WF: " + str(wf._use_gpu)) + #print ("WF1: " + str(wf.gpu_usage)) + #wf.gpu_usage = "teststring" if self._workflow.status != "failed": print("MFN workflow " + self._workflow_name + " deployed.") else: From dfc7cd7337927139de78fb9bacefe5e243aa801a Mon Sep 17 00:00:00 2001 From: ksatzke Date: Fri, 2 Oct 2020 15:29:51 +0000 Subject: [PATCH 14/64] fixing bug on addWorkflow --- ManagementService/python/addWorkflow.py | 2 +- deploy/helm/microfunctions/values.yaml | 1 + mfn_sdk/mfn_sdk/mfnclient.py | 4 +-- mfn_sdk/mfn_sdk/workflow.py | 3 +- tests/asl_DLIB/test.py | 2 +- tests/mfn_test_utils.py | 37 +++++++++---------------- 6 files changed, 20 insertions(+), 29 deletions(-) diff --git a/ManagementService/python/addWorkflow.py b/ManagementService/python/addWorkflow.py index 2c730c13..b6b5394c 100644 --- a/ManagementService/python/addWorkflow.py +++ b/ManagementService/python/addWorkflow.py @@ -41,7 +41,7 @@ def handle(value, sapi): wf["endpoints"] = [] wf["gpu_usage"] = None if "gpu_usage" in workflow: - wf["gpu_usage"] = workflow["gpu_usage"] + wf["gpu_usage"] = str(workflow["gpu_usage"]) wf["id"] = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest() diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index ec9a642b..2b560b59 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -141,6 +141,7 @@ nginx: # Please use --set nginx.managementService to specify the FQDN that the service will get from your Knative setup # _helpers.tpl would default it to wf-mfn1-management.knix.example.com #managementService: wf-mfn1-management.knix.example.com + managementService: wf-mfn-management.default.192.168.1.162.xip.io:30336 frontendImagePath: "/microfn/frontend" frontendImageTag: "latest" imagePath: "/microfn/nginx" diff --git a/mfn_sdk/mfn_sdk/mfnclient.py b/mfn_sdk/mfn_sdk/mfnclient.py index 3d94a69a..68e0c174 100644 --- a/mfn_sdk/mfn_sdk/mfnclient.py +++ b/mfn_sdk/mfn_sdk/mfnclient.py @@ -266,7 +266,7 @@ def action(self,action,data=None): r.raise_for_status() log.debug("%s: %s <- %s", self.user, action, r.text[:256]+(r.text[256:] and '...')) resp = r.json() - print(str(resp)) + #print(str(resp)) if resp.get('status','') != 'success': if resp.get('has_error',False): raise Exception(f"MicroFunctions Error for action {action}: {resp['error_type']}") @@ -450,7 +450,7 @@ def _get_state_names_and_resource(self, desired_state_type, wf_dict): return state_list - def add_workflow(self,name,filename=None, gpu_usage=None): + def add_workflow(self,name,filename=None, gpu_usage="None"): """ add a workflow returns an existing workflow if the name exists, registers a new workflow name if it doesn't exist diff --git a/mfn_sdk/mfn_sdk/workflow.py b/mfn_sdk/mfn_sdk/workflow.py index 447afa12..04dacaad 100644 --- a/mfn_sdk/mfn_sdk/workflow.py +++ b/mfn_sdk/mfn_sdk/workflow.py @@ -104,6 +104,7 @@ def modified(self): def status(self): data = self.client.action('getWorkflows',{'workflow':{'id':self.id}}) self._status = data['workflow']['status'] + #self._status == "undeployed" if self._status == "deployed": self._endpoints = data['workflow']['endpoints'] else: @@ -141,7 +142,7 @@ def json(self): def json(self,json): if json != self.json: self._json = json - print ("uploaded workflow JOSN"+ str( json)) + #print ("uploaded workflow JSON"+ str( json)) self.client.action('uploadWorkflowJSON',{'workflow':{'id':self.id,'json':base64.b64encode(self._json.encode()).decode()}}) diff --git a/tests/asl_DLIB/test.py b/tests/asl_DLIB/test.py index fd990152..600233ee 100644 --- a/tests/asl_DLIB/test.py +++ b/tests/asl_DLIB/test.py @@ -34,6 +34,6 @@ def test_dlib(self): testtuplelist =[(inp1, res1)] - test = MFNTest(test_name = "Dlib_Test", num_gpu = 1) + test = MFNTest(test_name = "Dlib_Test", gpu_usage = "1") test.exec_tests(testtuplelist) diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index 945e6d05..8ff2f34b 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -43,7 +43,7 @@ class MfnAppTextFormat(): mfntestfailed = MfnAppTextFormat.STYLE_BOLD + MfnAppTextFormat.COLOR_RED + 'FAILED' + MfnAppTextFormat.END + MfnAppTextFormat.END class MFNTest(): - def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_user=False, delete_user=False, num_gpu=None): + def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_user=False, delete_user=False, gpu_usage=None): self._settings = self._get_settings() @@ -84,17 +84,9 @@ def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_use if timeout is not None: self._settings["timeout"] = timeout - """ - else: - #self._gpu_usage = None - #self._workflow_description['num_gpu'] = self._settings["num_gpu"] - #print("Workflow_description:" + str(self._workflow_description)) - - self.gpu_usage = 0 # hardcoded for now - if num_gpu is not None: - self._settings["num_gpu"] = num_gpu - self._gpu_usage = self._settings["num_gpu"] - """ + if gpu_usage is not None: + self._settings["gpu_usage"] = gpu_usage + #self._gpu_usage = self._settings["num_gpu"] self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0) @@ -105,7 +97,7 @@ def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_use self._workflow_resources = [] self.upload_workflow() - self.deploy_workflow() + self.deploy_workflow() # gpu usage def _get_json_file(self, filename): json_data = {} @@ -127,7 +119,7 @@ def _get_settings(self): # Defaults settings.setdefault("timeout", 60) - settings.setdefault("num_gpu", 0) + settings.setdefault("gpu_usage", "None") return settings @@ -204,7 +196,7 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No resource_info["resource_req_filename"] = "requirements/" + resource_ref + "_requirements.txt" resource_info["resource_env_filename"] = "environment_variables/" + resource_ref + "_environment_variables.txt" resource_info_map[resource_ref] = resource_info - resource_info_map[resource_ref]['num_gpu'] = self._settings['num_gpu'] + #resource_info_map[resource_ref]['num_gpu'] = self._settings['num_gpu'] #resource_info_map['num_gpu'] = self._settings['num_gpu'] #print("resource_info_map: " + json.dumps(resource_info_map)) @@ -220,7 +212,7 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No resource_info["resource_req_filename"] = "requirements/" + resource_name + "_requirements.txt" resource_info["resource_env_filename"] = "environment_variables/" + resource_name + "_environment_variables.txt" resource_info_map[resource_name] = resource_info - resource_info_map[resource_name]['num_gpu'] = self._settings['num_gpu'] + #resource_info_map[resource_name]['num_gpu'] = self._settings['num_gpu'] #resource_info_map['num_gpu'] = self._settings['num_gpu'] #print("resource_info_map: " + json.dumps(resource_info_map)) @@ -318,16 +310,13 @@ def get_deployment_error(self): def deploy_workflow(self): try: - wf = self._client.add_workflow(self._workflow_name) + gpu_usage=self._settings["gpu_usage"] + wf = self._client.add_workflow(self._workflow_name, None, gpu_usage) + #print ("retuned from add_workflow: " + str(wf)) wf.json = json.dumps(self._workflow_description) - #print (wf.json) - #wf._use_gpu=self._settings["num_gpu"] - wf._gpu_usage = "teststringgpu" # _use_gpu=self._settings["num_gpu"] - wf.deploy(self._settings["timeout"]) #, num_gpu=self._settings['num_gpu']) + wf.deploy(self._settings["timeout"]) self._workflow = wf - #print ("WF: " + str(wf._use_gpu)) - #print ("WF1: " + str(wf.gpu_usage)) - #wf.gpu_usage = "teststring" + print ("transformed wf with gpu usage" + str(wf.gpu_usage)) if self._workflow.status != "failed": print("MFN workflow " + self._workflow_name + " deployed.") else: From 4508ea64519b2f78f475fb93a11b0671a1625802 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Mon, 5 Oct 2020 12:59:11 +0000 Subject: [PATCH 15/64] adding support for dynamic config of helm deployments on GPU to ManagementService --- ManagementService/management_init.py | 2 +- ManagementService/python/addWorkflow.py | 2 +- ManagementService/python/deployWorkflow.py | 37 ++++++++-------------- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/ManagementService/management_init.py b/ManagementService/management_init.py index 6ed4442f..891af55a 100755 --- a/ManagementService/management_init.py +++ b/ManagementService/management_init.py @@ -355,7 +355,7 @@ def printUsage(): sys.path.append(workflowdir) if os.getenv("KUBERNETES_PORT", None) != None: import deployWorkflow - url, endpoint_key = deployWorkflow.create_k8s_deployment(email, workflow_info, "Python", management=True) + url, endpoint_key = deployWorkflow.create_k8s_deployment(email, workflow_info, "Python", 0, management=True) DLCLIENT_MANAGEMENT.putMapEntry("Management_workflow_endpoint_map", endpoint_key, url) # Kubernetes mode only has one url endpoint_list = [url] diff --git a/ManagementService/python/addWorkflow.py b/ManagementService/python/addWorkflow.py index b6b5394c..9731c1d1 100644 --- a/ManagementService/python/addWorkflow.py +++ b/ManagementService/python/addWorkflow.py @@ -39,7 +39,7 @@ def handle(value, sapi): wf["status"] = "undeployed" wf["modified"] = time.time() wf["endpoints"] = [] - wf["gpu_usage"] = None + #wf["gpu_usage"] = None if "gpu_usage" in workflow: wf["gpu_usage"] = str(workflow["gpu_usage"]) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 7d7057e0..3be50565 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -149,8 +149,6 @@ def compile_resource_info_map(resource_names, uploaded_resources, email, sapi, d resource_metadata = json.loads(resource_metadata) if "runtime" in resource_metadata: resource_info["runtime"] = resource_metadata["runtime"] - #if "num_gpu" in resource_metadata: - # print("RESOURCE_INFO: " + str(resource_info["num_gpu"])) num_chunks_str = dlc.get("grain_source_zip_num_chunks_" + resource_id) try: @@ -261,7 +259,7 @@ def get_workflow_host_port(host_to_deploy, sid): return success, host_port -def create_k8s_deployment(email, workflow_info, runtime, management=False, use_gpus=0): +def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=False): # KUBERNETES MODE new_workflow_conf = {} conf_file = '/opt/mfn/SandboxAgent/conf/new_workflow.conf' @@ -313,16 +311,17 @@ def create_k8s_deployment(email, workflow_info, runtime, management=False, use_g env.append({'name': 'WORKFLOWID', 'value': workflow_info["workflowId"]}) env.append({'name': 'WORKFLOWNAME', 'value': workflow_info["workflowName"]}) - if use_gpus >= 0: - #print("INSIDE K8S Deploy, num_gpu: " + str(workflow_info['num_gpu'])) - #num_gpu = int(workflow_info['num_gpu']) + # apply gpu_usage fraction to k8s deployment configuration + use_gpus = gpu_usage + + if not management and use_gpus >= 0: # overwrite values from values.yaml for new workflows kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) #kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox_gpu" - # Special handling for the management container + # Special handling for the management container: never run on gpu if management: kservice['spec']['template']['spec']['volumes'] = [{ 'name': 'new-workflow-conf', 'configMap': {'name': new_workflow_conf['configmap']}}] kservice['spec']['template']['spec']['containers'][0]['volumeMounts'] = [{'name': 'new-workflow-conf', 'mountPath': '/opt/mfn/SandboxAgent/conf'}] @@ -421,13 +420,10 @@ def handle(value, sapi): workflow = data["workflow"] if "id" not in workflow: raise Exception("malformed input") - """ - if "gpu_usage" not in workflow: - raise Exception("malformed input: no gpu_usage") - use_gpus = int(data['gpu_usage']) - """ sapi.log(json.dumps(workflow)) wfmeta = sapi.get(email + "_workflow_" + workflow["id"], True) + print("WFMETA in deployWorkflow: "+ str(wfmeta)) + if wfmeta is None or wfmeta == "": raise Exception("workflow metadata is not valid.") try: @@ -498,16 +494,14 @@ def handle(value, sapi): else: runtime = "Python" - """ - if "num_gpu" in resource_info_map.keys(): - print ("RESOURCE_INFO_MAP: " + str(resource_info_map)) - workflow_info['num_gpu'] = resource_info_map['num_gpu'] + if "gpu_usage" in wfmeta and wfmeta["gpu_usage"] != "None": + gpu_usage = float(wfmeta["gpu_usage"]) else: - workflow_info['num_gpu'] = 0 - """ - use_gpus = 0 + gpu_usage = 0. + + #print("deduced gpu_usage: " + str(gpu_usage)) - url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime, use_gpus) + url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime, gpu_usage) if url is not None and len(url) > 0: status = "deploying" sapi.addSetEntry(workflow_info["workflowId"] + "_workflow_endpoints", str(url), is_private=True) @@ -522,9 +516,6 @@ def handle(value, sapi): # _XXX_: due to the queue service still being in java in the sandbox sandbox_image_name = "microfn/sandbox" # default value - #if "on_gpu" in resource_info_map.keys(): # sandbox_gpu image should be used for ths workflow - # if resource_info_map["on_gpu"] == True: - # sandbox_image_name = "microfn/sandbox_gpu" if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map): sandbox_image_name = "microfn/sandbox_java" From e251bf251b3cb33bcb3464d34f30553ff987d687 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 6 Oct 2020 07:33:53 +0000 Subject: [PATCH 16/64] removing bug on java function executions --- ManagementService/python/deployWorkflow.py | 2 +- Sandbox/Dockerfile_java_gpu | 66 +++++++++++++++++++ .../microfunctions/templates/management.yaml | 6 +- mfn_sdk/mfn_sdk/workflow.py | 6 +- 4 files changed, 75 insertions(+), 5 deletions(-) create mode 100644 Sandbox/Dockerfile_java_gpu diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 3be50565..a0976a11 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -314,7 +314,7 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F # apply gpu_usage fraction to k8s deployment configuration use_gpus = gpu_usage - if not management and use_gpus >= 0: + if not management and use_gpus >= 0 and runtime=="Python": # overwrite values from values.yaml for new workflows kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) diff --git a/Sandbox/Dockerfile_java_gpu b/Sandbox/Dockerfile_java_gpu new file mode 100644 index 00000000..507d0975 --- /dev/null +++ b/Sandbox/Dockerfile_java_gpu @@ -0,0 +1,66 @@ +# Copyright 2020 The KNIX Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#FROM ubuntu:18.04 +FROM nvidia/cuda:10.1-cudnn-devel-ubuntu18.04 + +# Install (as root) +# Base +RUN apt-get update --fix-missing +RUN apt-get -y --no-install-recommends install build-essential +RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 + +# Python +RUN apt-get -y --no-install-recommends install python3 python3-dev +RUN apt-get -y --no-install-recommends install python3-pip +RUN apt-get -y --no-install-recommends install zlib1g libssl1.0 libsasl2-2 ca-certificates + +RUN /usr/bin/python3 -m pip install --upgrade pip + +RUN /usr/bin/python3 -m pip install setuptools +RUN /usr/bin/python3 -m pip install thrift>=0.12.0 +RUN /usr/bin/python3 -m pip install anytree +RUN /usr/bin/python3 -m pip install ujsonpath +RUN /usr/bin/python3 -m pip install requests +RUN /usr/bin/python3 -m pip install retry +# remove warnings from anytree package +RUN /usr/bin/python3 -m pip install fastcache +# Needed for multi-language support (currently just Java) +RUN /usr/bin/python3 -m pip install thriftpy2 + +# Java +RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless + +RUN apt-get -y --no-install-recommends install maven + +# Add components (as mfn) +RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn +RUN mkdir /opt/mfn/logs + +COPY build/queueservice.jar /opt/mfn/ +ADD frontend/frontend /opt/mfn/frontend +ADD build/SandboxAgent.tar.gz /opt/mfn/ +ADD build/FunctionWorker.tar.gz /opt/mfn/ +ADD build/LoggingService.tar.gz /opt/mfn/ + +ADD build/JavaRequestHandler.tar.gz /opt/mfn/ + +RUN chmod +x /opt/mfn/JavaRequestHandler/setup_maven.sh +RUN /opt/mfn/JavaRequestHandler/./setup_maven.sh True +RUN mvn -Duser.home=/tmp -DskipTests -gs /opt/mfn/JavaRequestHandler/maven/sandbox-mvn-settings.xml -f /opt/mfn/JavaRequestHandler/maven/init-mvn.pom.xml dependency:resolve-plugins + +RUN chown mfn:mfn -R /opt/mfn +USER mfn +WORKDIR /opt/mfn +CMD ["python3", "/opt/mfn/SandboxAgent/sandboxagent.py"] diff --git a/deploy/helm/microfunctions/templates/management.yaml b/deploy/helm/microfunctions/templates/management.yaml index 75d25688..ec84b3de 100644 --- a/deploy/helm/microfunctions/templates/management.yaml +++ b/deploy/helm/microfunctions/templates/management.yaml @@ -125,7 +125,11 @@ data: {{- /* Disable scale to zero with a minScale of 1. */ -}} "autoscaling.knative.dev/minScale": "1", {{- /* Limit scaling to 100 pods. */ -}} - "autoscaling.knative.dev/maxScale": "5" + "autoscaling.knative.dev/maxScale": "5", + {{- /* KubeShare GPU-related configurations */ -}} + "kubeshare/gpu_request": "0.4", + "kubeshare/gpu_limit": "1.0", + "kubeshare/gpu_mem": "3145728000" }, "labels": { "app": "microfunctions-workflow", diff --git a/mfn_sdk/mfn_sdk/workflow.py b/mfn_sdk/mfn_sdk/workflow.py index 04dacaad..f278aa6d 100644 --- a/mfn_sdk/mfn_sdk/workflow.py +++ b/mfn_sdk/mfn_sdk/workflow.py @@ -102,9 +102,9 @@ def modified(self): @property def status(self): - data = self.client.action('getWorkflows',{'workflow':{'id':self.id}}) - self._status = data['workflow']['status'] - #self._status == "undeployed" + #data = self.client.action('getWorkflows',{'workflow':{'id':self.id}}) + #self._status = data['workflow']['status'] + self._status == "undeployed" if self._status == "deployed": self._endpoints = data['workflow']['endpoints'] else: From 5508abca3132aa074c69ad9666557eff7d9f2ef3 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 6 Oct 2020 13:00:51 +0000 Subject: [PATCH 17/64] fixing a bug on asl_Map state tests with helm deployment --- ManagementService/python/deployWorkflow.py | 2 +- mfn_sdk/mfn_sdk/workflow.py | 6 +- tests/asl_Map_short/CallLambda.py | 6 ++ tests/asl_Map_short/test.py | 81 +++++++++++++++++ tests/asl_Map_short/wfms_context_test.data | 11 +++ .../wfms_context_test/wfms_context.json | 23 +++++ tests/asl_Map_short/wfms_delivery_test.data | 52 +++++++++++ .../wfms_delivery_test/checkAvailability.py | 5 + .../wfms_delivery_test.json | 40 ++++++++ tests/asl_Map_short/wfms_example_test.data | 21 +++++ .../wfms_example_test/ship-val.py | 23 +++++ .../wfms_example_test/wfms_example_test.json | 22 +++++ tests/asl_Map_short/wfms_hardcoded_test.data | 6 ++ .../wfms_hardcoded_test.json | 51 +++++++++++ .../wfms_iro_paths_processing_test.data | 50 ++++++++++ .../wfms_iro_paths_test/checkAvailability.py | 6 ++ .../wfms_iro_paths_test/paymentFailed.py | 6 ++ .../wfms_iro_paths_test/prepareForDelivery.py | 6 ++ .../wfms_iro_paths_test/sendOrderSummary.py | 10 ++ .../wfms_iro_paths_test/startDelivery.py | 6 ++ .../asl_Map_short/wfms_iro_paths_test/test.py | 91 +++++++++++++++++++ .../wfms_iro_paths_test/validatePayment.py | 6 ++ .../wfms_iro_paths_processing_test.json | 79 ++++++++++++++++ tests/asl_Map_short/wfms_parameters_test.data | 21 +++++ .../wfms_parameters_test.json | 24 +++++ tests/asl_Map_short/wfms_test_mc0.json | 21 +++++ tests/asl_Map_short/wfms_test_mc1.json | 21 +++++ tests/asl_Map_short/wfms_test_mc2.json | 21 +++++ tests/asl_Map_short/wfms_test_mc3.json | 21 +++++ .../wfms_thingspiratessay_test.data | 18 ++++ .../wfms_thingspiratessay_test.json | 19 ++++ 31 files changed, 771 insertions(+), 4 deletions(-) create mode 100644 tests/asl_Map_short/CallLambda.py create mode 100644 tests/asl_Map_short/test.py create mode 100644 tests/asl_Map_short/wfms_context_test.data create mode 100644 tests/asl_Map_short/wfms_context_test/wfms_context.json create mode 100644 tests/asl_Map_short/wfms_delivery_test.data create mode 100644 tests/asl_Map_short/wfms_delivery_test/checkAvailability.py create mode 100644 tests/asl_Map_short/wfms_delivery_test/wfms_delivery_test.json create mode 100644 tests/asl_Map_short/wfms_example_test.data create mode 100644 tests/asl_Map_short/wfms_example_test/ship-val.py create mode 100644 tests/asl_Map_short/wfms_example_test/wfms_example_test.json create mode 100644 tests/asl_Map_short/wfms_hardcoded_test.data create mode 100644 tests/asl_Map_short/wfms_hardcoded_test/wfms_hardcoded_test.json create mode 100644 tests/asl_Map_short/wfms_iro_paths_processing_test.data create mode 100644 tests/asl_Map_short/wfms_iro_paths_test/checkAvailability.py create mode 100644 tests/asl_Map_short/wfms_iro_paths_test/paymentFailed.py create mode 100644 tests/asl_Map_short/wfms_iro_paths_test/prepareForDelivery.py create mode 100644 tests/asl_Map_short/wfms_iro_paths_test/sendOrderSummary.py create mode 100644 tests/asl_Map_short/wfms_iro_paths_test/startDelivery.py create mode 100644 tests/asl_Map_short/wfms_iro_paths_test/test.py create mode 100644 tests/asl_Map_short/wfms_iro_paths_test/validatePayment.py create mode 100644 tests/asl_Map_short/wfms_iro_paths_test/wfms_iro_paths_processing_test.json create mode 100644 tests/asl_Map_short/wfms_parameters_test.data create mode 100644 tests/asl_Map_short/wfms_parameters_test/wfms_parameters_test.json create mode 100644 tests/asl_Map_short/wfms_test_mc0.json create mode 100644 tests/asl_Map_short/wfms_test_mc1.json create mode 100644 tests/asl_Map_short/wfms_test_mc2.json create mode 100644 tests/asl_Map_short/wfms_test_mc3.json create mode 100644 tests/asl_Map_short/wfms_thingspiratessay_test.data create mode 100644 tests/asl_Map_short/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index a0976a11..59302483 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -277,7 +277,7 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F # Kubernetes labels cannot contain @ or _ and should start and end with alphanumeric characters wfNameSanitized = 'wf-' + workflow_info["workflowId"].replace('@', '-').replace('_', '-').lower() + '-wf' - wfActualNameSanitized = 'wf-' + workflow_info["workflowName"].replace('@', '-').replace('_', '-').lower() + '-wf' + wfActualNameSanitized = 'wf-' + workflow_info["workflowName"].replace('@', '-').replace('_', '-').replace('/','-').lower() + '-wf' emailSanitized = 'u-' + email.replace('@', '-').replace('_', '-').lower() + '-u' # Pod, Deployment and Hpa names for the new workflow will have a prefix containing the workflow name and user name app_fullname_prefix = '' diff --git a/mfn_sdk/mfn_sdk/workflow.py b/mfn_sdk/mfn_sdk/workflow.py index f278aa6d..04dacaad 100644 --- a/mfn_sdk/mfn_sdk/workflow.py +++ b/mfn_sdk/mfn_sdk/workflow.py @@ -102,9 +102,9 @@ def modified(self): @property def status(self): - #data = self.client.action('getWorkflows',{'workflow':{'id':self.id}}) - #self._status = data['workflow']['status'] - self._status == "undeployed" + data = self.client.action('getWorkflows',{'workflow':{'id':self.id}}) + self._status = data['workflow']['status'] + #self._status == "undeployed" if self._status == "deployed": self._endpoints = data['workflow']['endpoints'] else: diff --git a/tests/asl_Map_short/CallLambda.py b/tests/asl_Map_short/CallLambda.py new file mode 100644 index 00000000..efaf655f --- /dev/null +++ b/tests/asl_Map_short/CallLambda.py @@ -0,0 +1,6 @@ +import json +import time +def handle(event, context): + name = event["who"] + time.sleep(1.0) + return "Hello, %s!" % name diff --git a/tests/asl_Map_short/test.py b/tests/asl_Map_short/test.py new file mode 100644 index 00000000..ab18b490 --- /dev/null +++ b/tests/asl_Map_short/test.py @@ -0,0 +1,81 @@ +# Copyright 2020 The microfunctions Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import unittest +import os, sys +import json +import time +#import ast + +sys.path.append("../") +from mfn_test_utils import MFNTest + +class MapStateTest(unittest.TestCase): + """ + event = '[{"who": "bob"},{"who": "meg"},{"who": "joe"}]' + expectedResponse = '[{"ContextValue": {"who": "bob"}, "ContextIndex": 0},{"ContextValue": {"who": "meg"}, "ContextIndex": 1}, {"ContextValue": {"who": "joe"}, "ContextIndex": 2 }]' + test_map = [("asl_Map_State_Context_Data", "wfms_context_test/wfms_context_test.json", [(event, expectedResponse)])] + """ + + def test_map_state(self): + file_list = ["wfms_delivery_test.data", + "wfms_context_test.data", + "wfms_example_test.data", + "wfms_parameters_test.data", + "wfms_thingspiratessay_test.data", + "wfms_iro_paths_processing_test.data", + "wfms_hardcoded_test.data"] + + for file in file_list: + with open(file) as json_input: + testtuplelist = [] + data = json.load(json_input) + testtuplelist.append((json.dumps(data["event"]), json.dumps(data["expectedResponse"]))) + print("CHECK: " +str(data["test_name"])+ " " + str(data["workflow_name"])) + test = MFNTest(test_name=data["test_name"], workflow_filename=data["workflow_name"]) + st = time.time() + test.exec_tests(testtuplelist) + et = time.time() + print ("test duration (s): %s" % str(et-st)) + + for mc in range(4): # set maxConcurrency parameter + """ creates and executes the Map state test workflow from the ASL description """ + + testtuplelist = [] + + event = [{"who": "bob"}, {"who": "meg"}, {"who": "joe"}] + expectedResponse = ["Hello, bob!", "Hello, meg!", "Hello, joe!"] + testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) + + event = [{"who": "meg"}, {"who": "joe"}, {"who": "bob"}] + expectedResponse = ["Hello, meg!", "Hello, joe!", "Hello, bob!"] + testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) + + event = [{"who": "joe"}, {"who": "bob"}, {"who": "meg"}] + expectedResponse = ["Hello, joe!", "Hello, bob!", "Hello, meg!"] + testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) + + event = [{"who": "joe"}, {"who": "bob"}, {"who": "meg"}, {"who":"dave"}, {"who":"tom"}, {"who":"ray"}] + expectedResponse = ["Hello, joe!", "Hello, bob!", "Hello, meg!", "Hello, dave!", "Hello, tom!", "Hello, ray!"] + testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) + + test = MFNTest(test_name="Map State Test", workflow_filename=("wfms_test_mc%s.json" % mc)) + + print("MaxConcurrency level: %i " % mc) + + st = time.time() + test.exec_tests(testtuplelist) + et = time.time() + + print ("test duration (s): %s" % str(et-st)) diff --git a/tests/asl_Map_short/wfms_context_test.data b/tests/asl_Map_short/wfms_context_test.data new file mode 100644 index 00000000..1c4c1116 --- /dev/null +++ b/tests/asl_Map_short/wfms_context_test.data @@ -0,0 +1,11 @@ + +{ +"test_name": "Map State Context Test", + +"workflow_name": "wfms_context_test/wfms_context.json", + +"event": [{"who": "bob"},{"who": "meg"},{"who": "joe"}], + +"expectedResponse": [{"ContextValue": {"who": "bob"}, "ContextIndex": 0},{"ContextValue": {"who": "meg"}, "ContextIndex": 1}, {"ContextValue": {"who": "joe"}, "ContextIndex": 2 }] + +} diff --git a/tests/asl_Map_short/wfms_context_test/wfms_context.json b/tests/asl_Map_short/wfms_context_test/wfms_context.json new file mode 100644 index 00000000..2586a44e --- /dev/null +++ b/tests/asl_Map_short/wfms_context_test/wfms_context.json @@ -0,0 +1,23 @@ +{ + "StartAt": "ExampleMapState", + "States": { + "ExampleMapState": { + "Type": "Map", + "Parameters": { + "ContextIndex.$": "$$.Map.Item.Index", + "ContextValue.$": "$$.Map.Item.Value" + }, + "Iterator": { + "StartAt": "TestPass", + "States": { + "TestPass": { + "Type": "Pass", + "End": true + } + } + }, + "End": true + } + } +} + diff --git a/tests/asl_Map_short/wfms_delivery_test.data b/tests/asl_Map_short/wfms_delivery_test.data new file mode 100644 index 00000000..6cb77f4a --- /dev/null +++ b/tests/asl_Map_short/wfms_delivery_test.data @@ -0,0 +1,52 @@ + +{"test_name": "Map State Delivery Test", + +"workflow_name": "wfms_delivery_test/wfms_delivery_test.json", + +"event": + {"orderId": "12345678", + "orderDate": "20190820101213", + "detail": { + "customerId": "1234", + "deliveryAddress": "123, Seattle, WA", + "deliverySpeed": "1-day", + "paymentMethod": "aCreditCard", + "items": [ + { + "productName": "Agile Software Development", + "category": "book", + "price": 60.0, + "quantity": 1 + }, + { + "productName": "Domain-Driven Design", + "category": "book", + "price": 32.0, + "quantity": 1 + }, + { + "productName": "The Mythical Man Month", + "category": "book", + "price": 18.0, + "quantity": 1 + }, + { + "productName": "The Art of Computer Programming", + "category": "book", + "price": 180.0, + "quantity": 1 + }, + { + "productName": "Ground Coffee, Dark Roast", + "category": "grocery", + "price": 8.0, + "quantity": 6 + } + ] + } +}, + +"expectedResponse": +[{"productName": "Agile Software Development", "category": "book", "price": 60.0, "quantity": 1}, {"productName": "Domain-Driven Design", "category": "book", "price": 32.0, "quantity": 1}, {"productName": "The Mythical Man Month", "category": "book", "price": 18.0, "quantity": 1}, {"productName": "The Art of Computer Programming", "category": "book", "price": 180.0, "quantity": 1}, {"productName": "Ground Coffee, Dark Roast", "category": "grocery", "price": 8.0, "quantity": 6}] +} + diff --git a/tests/asl_Map_short/wfms_delivery_test/checkAvailability.py b/tests/asl_Map_short/wfms_delivery_test/checkAvailability.py new file mode 100644 index 00000000..9440b45a --- /dev/null +++ b/tests/asl_Map_short/wfms_delivery_test/checkAvailability.py @@ -0,0 +1,5 @@ +#!/usr/bin/python + +def handle(event, context): + + return event diff --git a/tests/asl_Map_short/wfms_delivery_test/wfms_delivery_test.json b/tests/asl_Map_short/wfms_delivery_test/wfms_delivery_test.json new file mode 100644 index 00000000..c92b20b2 --- /dev/null +++ b/tests/asl_Map_short/wfms_delivery_test/wfms_delivery_test.json @@ -0,0 +1,40 @@ +{ + "StartAt": "ProcessAllItems", + "States": { + "ProcessAllItems": { + "Type": "Map", + "InputPath": "$.detail", + "ItemsPath": "$.items", + "MaxConcurrency": 3, + "Iterator": { + "StartAt": "CheckAvailability", + "States": { + "CheckAvailability": { + "Type": "Task", + "Resource": "checkAvailability", + "Retry": [ + { + "ErrorEquals": [ + "TimeOut" + ], + "IntervalSeconds": 1, + "BackoffRate": 2, + "MaxAttempts": 3 + } + ], + "Next": "PrepareForDelivery" + }, + "PrepareForDelivery": { + "Type": "Pass", + "Next": "StartDelivery" + }, + "StartDelivery": { + "Type": "Pass", + "End": true + } + } + }, + "End": true + } + } +} diff --git a/tests/asl_Map_short/wfms_example_test.data b/tests/asl_Map_short/wfms_example_test.data new file mode 100644 index 00000000..0a94747d --- /dev/null +++ b/tests/asl_Map_short/wfms_example_test.data @@ -0,0 +1,21 @@ + +{ +"test_name": "Map State Example Test", + +"workflow_name": "wfms_example_test/wfms_example_test.json", + +"event": {"ship-date": "2016-03-14T01:59:00Z", + "detail": + {"delivery-partner": "UQS", + "shipped": [ + { "prod": "R31", "dest-code": 9511, "quantity": 1344 }, + { "prod": "S39", "dest-code": 9511, "quantit_y": 40 }, + { "prod": "R31", "dest-code": 9833, "quantity": 12 }, + { "prod": "R40", "dest-code": 9860, "quantity": 887 }, + { "prod": "R40", "dest-code": 9511, "quantity": 1220 } + ] + } + }, +"expectedResponse": {"detail": {"shipped": ["All keys are OK!", "item OK!", "All keys are OK!", "All keys are OK!", "All keys are OK!"]}} +} + diff --git a/tests/asl_Map_short/wfms_example_test/ship-val.py b/tests/asl_Map_short/wfms_example_test/ship-val.py new file mode 100644 index 00000000..493a0e3a --- /dev/null +++ b/tests/asl_Map_short/wfms_example_test/ship-val.py @@ -0,0 +1,23 @@ +#!/usr/bin/python +""" +{ + "parcel": { + "prod": "R31", + "dest-code": 9511, + "quantity": 1344 + }, + "courier": "UQS" +} +""" + +def handle(event, context): + + ret = "item NOK!" + if ("courier" in event and "parcel" in event): # just check for the keys + ret = "item OK!" + par = event["parcel"] + if "prod" in par.keys() and "dest-code" in par.keys() and "quantity" in par.keys(): + ret = "All keys are OK!" + + return ret + diff --git a/tests/asl_Map_short/wfms_example_test/wfms_example_test.json b/tests/asl_Map_short/wfms_example_test/wfms_example_test.json new file mode 100644 index 00000000..6602a549 --- /dev/null +++ b/tests/asl_Map_short/wfms_example_test/wfms_example_test.json @@ -0,0 +1,22 @@ +{ + "StartAt": "Validate-All", + "States": { + "Validate-All": { + "End": true, + "Type": "Map", + "InputPath": "$.detail", + "ItemsPath": "$.shipped", + "ResultPath": "$.detail.shipped", + "Parameters": { + "parcel.$": "$$.Map.Item.Value", + "courier.$": "$.delivery-partner" + }, + "MaxConcurrency": 0, + "Iterator": { + "StartAt": "ship-val", + "States": { + "ship-val": {"Type": "Task", "Resource": "ship-val", "End": true}} +} +} +} +} diff --git a/tests/asl_Map_short/wfms_hardcoded_test.data b/tests/asl_Map_short/wfms_hardcoded_test.data new file mode 100644 index 00000000..5f8bb520 --- /dev/null +++ b/tests/asl_Map_short/wfms_hardcoded_test.data @@ -0,0 +1,6 @@ +{ +"test_name": "Map State Hardcoded Input Test", +"workflow_name": "wfms_hardcoded_test/wfms_hardcoded_test.json", +"event" : "trigger map state input", +"expectedResponse" : [{"Value": "Iterate", "Index": 0}, {"Value": "Over", "Index": 1}, {"Value": "This", "Index": 2}, {"Value": "Array", "Index": 3}] +} diff --git a/tests/asl_Map_short/wfms_hardcoded_test/wfms_hardcoded_test.json b/tests/asl_Map_short/wfms_hardcoded_test/wfms_hardcoded_test.json new file mode 100644 index 00000000..efa81e28 --- /dev/null +++ b/tests/asl_Map_short/wfms_hardcoded_test/wfms_hardcoded_test.json @@ -0,0 +1,51 @@ +{ + "Comment": "An example of the Amazon States Language using a map state to process elements of an array with a max concurrency of 2.", + "StartAt": "HardcodedInputsState", + "States": { + "HardcodedInputsState": { + "Type": "Pass", + "Result": { + "array": [ + "Iterate", + "Over", + "This", + "Array" + ] + }, + "Next": "Map" + }, + "Map": { + "Type": "Map", + "ItemsPath": "$.array", + "MaxConcurrency": 0, + "Parameters": { + "Value.$": "$$.Map.Item.Value", + "Index.$": "$$.Map.Item.Index" + }, + "Next": "FinalState", + "Iterator": { + "StartAt": "You", + "States": { + "You": { + "Type": "Pass", + "Next": "Can" + }, + "Can": { + "Type": "Pass", + "Next": "Do" + }, + "Do": { + "Type": "Pass", + "Next": "Anything" + }, + "Anything": { + "Type": "Pass", + "End": true + } + } + } + }, + "FinalState": {"Type": "Pass", "End": true} + } +} + diff --git a/tests/asl_Map_short/wfms_iro_paths_processing_test.data b/tests/asl_Map_short/wfms_iro_paths_processing_test.data new file mode 100644 index 00000000..3894a118 --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_processing_test.data @@ -0,0 +1,50 @@ +{ +"test_name": "Map State IRO Paths Processing Test", + +"workflow_name": "wfms_iro_paths_test/wfms_iro_paths_processing_test.json", + +"event": { + "orderId": "12345678", + "orderDate": "20190820101213", + "detail": { + "customerId": "1234", + "deliveryAddress": "123, Seattle, WA", + "deliverySpeed": "1-day", + "paymentMethod": "aCreditCard", + "items": [ + { + "productName": "Agile Software Development", + "category": "book", + "price": 60.0, + "quantity": 1 + }, + { + "productName": "Domain-Driven Design", + "category": "book", + "price": 32.0, + "quantity": 1 + }, + { + "productName": "The Mythical Man Month", + "category": "book", + "price": 18.0, + "quantity": 1 + }, + { + "productName": "The Art of Computer Programming", + "category": "book", + "price": 180.0, + "quantity": 1 + }, + { + "productName": "Ground Coffee, Dark Roast", + "category": "grocery", + "price": 8.0, + "quantity": 6 + } + ] + } +}, + +"expectedResponse" : {"summary": "This is a test summary!", "detail": {"processedItems": [{"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Agile Software Development", "category": "book", "price": 60.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Domain-Driven Design", "category": "book", "price": 32.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Mythical Man Month", "category": "book", "price": 18.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Art of Computer Programming", "category": "book", "price": 180.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Ground Coffee, Dark Roast", "category": "grocery", "price": 8.0, "quantity": 6}}]}} +} diff --git a/tests/asl_Map_short/wfms_iro_paths_test/checkAvailability.py b/tests/asl_Map_short/wfms_iro_paths_test/checkAvailability.py new file mode 100644 index 00000000..9816febc --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_test/checkAvailability.py @@ -0,0 +1,6 @@ +#!/usr/bin/python + +def handle(event, context): + + return event + diff --git a/tests/asl_Map_short/wfms_iro_paths_test/paymentFailed.py b/tests/asl_Map_short/wfms_iro_paths_test/paymentFailed.py new file mode 100644 index 00000000..4498557c --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_test/paymentFailed.py @@ -0,0 +1,6 @@ +#!/usr/bin/python + +def handle(event, context): + + return "payment failed!" + diff --git a/tests/asl_Map_short/wfms_iro_paths_test/prepareForDelivery.py b/tests/asl_Map_short/wfms_iro_paths_test/prepareForDelivery.py new file mode 100644 index 00000000..9816febc --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_test/prepareForDelivery.py @@ -0,0 +1,6 @@ +#!/usr/bin/python + +def handle(event, context): + + return event + diff --git a/tests/asl_Map_short/wfms_iro_paths_test/sendOrderSummary.py b/tests/asl_Map_short/wfms_iro_paths_test/sendOrderSummary.py new file mode 100644 index 00000000..357a4d63 --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_test/sendOrderSummary.py @@ -0,0 +1,10 @@ +#!/usr/bin/python + +def handle(event, context): + context.log(str(event)) + + #for it in event: + # it["summary"] = "test" + + return "This is a test summary!" + diff --git a/tests/asl_Map_short/wfms_iro_paths_test/startDelivery.py b/tests/asl_Map_short/wfms_iro_paths_test/startDelivery.py new file mode 100644 index 00000000..9816febc --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_test/startDelivery.py @@ -0,0 +1,6 @@ +#!/usr/bin/python + +def handle(event, context): + + return event + diff --git a/tests/asl_Map_short/wfms_iro_paths_test/test.py b/tests/asl_Map_short/wfms_iro_paths_test/test.py new file mode 100644 index 00000000..61367b76 --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_test/test.py @@ -0,0 +1,91 @@ +# Copyright 2020 The microfunctions Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Map state test motivated by stackovercloud article "New Step Functions Support for Dynamic Parallelism" +# https://www.stackovercloud.com/2019/09/18/new-step-functions-support-for-dynamic-parallelism/ + +import unittest +import os, sys +import json +import time + +sys.path.append("../") +from mfn_test_utils import MFNTest + +class MapStateHardcodedTest(unittest.TestCase): + + def test_map_state(self): + """ creates and executes the Map state test workflow from the ASL description """ + + testtuplelist = [] + + event = { + "orderId": "12345678", + "orderDate": "20190820101213", + "detail": { + "customerId": "1234", + "deliveryAddress": "123, Seattle, WA", + "deliverySpeed": "1-day", + "paymentMethod": "aCreditCard", + "items": [ + { + "productName": "Agile Software Development", + "category": "book", + "price": 60.0, + "quantity": 1 + }, + { + "productName": "Domain-Driven Design", + "category": "book", + "price": 32.0, + "quantity": 1 + }, + { + "productName": "The Mythical Man Month", + "category": "book", + "price": 18.0, + "quantity": 1 + }, + { + "productName": "The Art of Computer Programming", + "category": "book", + "price": 180.0, + "quantity": 1 + }, + { + "productName": "Ground Coffee, Dark Roast", + "category": "grocery", + "price": 8.0, + "quantity": 6 + } + ] + } +} + """ + expectedResponse = {"summary": "This is a test summary!", "processedItems": [{"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Agile Software Development", "category": "book", "price": 60.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Domain-Driven Design", "category": "book", "price": 32.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Mythical Man Month", "category": "book", "price": 18.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Art of Computer Programming", "category": "book", "price": 180.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Ground Coffee, Dark Roast", "category": "grocery", "price": 8.0, "quantity": 6}}]} + """ + + expectedResponse = {"summary": "This is a test summary!", "detail": {"processedItems": [{"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Agile Software Development", "category": "book", "price": 60.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Domain-Driven Design", "category": "book", "price": 32.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Mythical Man Month", "category": "book", "price": 18.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Art of Computer Programming", "category": "book", "price": 180.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Ground Coffee, Dark Roast", "category": "grocery", "price": 8.0, "quantity": 6}}]}} + + testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) + + test = MFNTest(test_name="Map State Input Items Result Output Path Test", workflow_filename="workflow_map_state_ior_path_test.json" ) + + st = time.time() + test.exec_tests(testtuplelist) + et = time.time() + + print ("test duration (s): %s" % str(et-st)) + diff --git a/tests/asl_Map_short/wfms_iro_paths_test/validatePayment.py b/tests/asl_Map_short/wfms_iro_paths_test/validatePayment.py new file mode 100644 index 00000000..79363aff --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_test/validatePayment.py @@ -0,0 +1,6 @@ +#!/usr/bin/python + +def handle(event, context): + event["payment"] = "Ok" + return event + diff --git a/tests/asl_Map_short/wfms_iro_paths_test/wfms_iro_paths_processing_test.json b/tests/asl_Map_short/wfms_iro_paths_test/wfms_iro_paths_processing_test.json new file mode 100644 index 00000000..00979d69 --- /dev/null +++ b/tests/asl_Map_short/wfms_iro_paths_test/wfms_iro_paths_processing_test.json @@ -0,0 +1,79 @@ +{ + "StartAt": "ValidatePayment", + "States": { + "ValidatePayment": { + "Type": "Task", + "Resource": "validatePayment", + "Next": "CheckPayment" + }, + "CheckPayment": { + "Type": "Choice", + "Choices": [ + { + "Not": { + "Variable": "$.payment", + "StringEquals": "Ok" + }, + "Next": "PaymentFailed" + } + ], + "Default": "ProcessAllItems" + }, + "PaymentFailed": { + "Type": "Task", + "Resource": "paymentFailed", + "End": true + }, + "ProcessAllItems": { + "Type": "Map", + "ItemsPath": "$.items", + "InputPath": "$.detail", + "MaxConcurrency": 3, + "Parameters": { + "index.$": "$$.Map.Item.Index", + "item.$": "$$.Map.Item.Value", + "deliveryAddress.$": "$.deliveryAddress" + }, + "Iterator": { + "StartAt": "CheckAvailability", + "States": { + "CheckAvailability": { + "Type": "Task", + "Resource": "checkAvailability", + "Retry": [ + { + "ErrorEquals": [ + "TimeOut" + ], + "IntervalSeconds": 1, + "BackoffRate": 2, + "MaxAttempts": 3 + } + ], + "Next": "PrepareForDelivery" + }, + "PrepareForDelivery": { + "Type": "Task", + "Resource": "prepareForDelivery", + "Next": "StartDelivery" + }, + "StartDelivery": { + "Type": "Task", + "Resource": "startDelivery", + "End": true + } + } + }, + "ResultPath": "$.detail.processedItems", + "Next": "SendOrderSummary" + }, + "SendOrderSummary": { + "Type": "Task", + "Resource": "sendOrderSummary", + "ResultPath": "$.summary", + "InputPath": "$.processedItems", + "End": true + } + } +} + diff --git a/tests/asl_Map_short/wfms_parameters_test.data b/tests/asl_Map_short/wfms_parameters_test.data new file mode 100644 index 00000000..4dbed3e9 --- /dev/null +++ b/tests/asl_Map_short/wfms_parameters_test.data @@ -0,0 +1,21 @@ + +{ +"test_name": "Map State Parameters Test", + +"workflow_name": "wfms_parameters_test/wfms_parameters_test.json", + +"event": [{"who": "bob"},{"who": "meg"},{"who": "joe"}], + +"expectedResponse" : {"result" :[{ + "ContextValue": { + "who": "bob" + },"ContextIndex": 0}, + { + "ContextValue": { + "who": "meg" + },"ContextIndex": 1}, + { + "ContextValue": { + "who": "joe" + }, "ContextIndex": 2}]} +} diff --git a/tests/asl_Map_short/wfms_parameters_test/wfms_parameters_test.json b/tests/asl_Map_short/wfms_parameters_test/wfms_parameters_test.json new file mode 100644 index 00000000..fae88533 --- /dev/null +++ b/tests/asl_Map_short/wfms_parameters_test/wfms_parameters_test.json @@ -0,0 +1,24 @@ +{ + "StartAt": "ExampleMapState", + "States": { + "ExampleMapState": { + "Type": "Map", + "ResultPath": "$.result", + "Parameters": { + "ContextIndex.$": "$$.Map.Item.Index", + "ContextValue.$": "$$.Map.Item.Value" + }, + "Iterator": { + "StartAt": "TestPass", + "States": { + "TestPass": { + "Type": "Pass", + "End": true + } + } + }, + "End": true + } + } +} + diff --git a/tests/asl_Map_short/wfms_test_mc0.json b/tests/asl_Map_short/wfms_test_mc0.json new file mode 100644 index 00000000..924b8bd2 --- /dev/null +++ b/tests/asl_Map_short/wfms_test_mc0.json @@ -0,0 +1,21 @@ +{ + "StartAt": "PassTest1", + "States": { + "PassTest2": {"Type":"Pass", "End": true}, + "PassTest1": {"Type":"Pass", "Next":"ExampleMapState"}, + "ExampleMapState": { + "Next": "PassTest2", + "Type": "Map", + "MaxConcurrency": 0, + "Iterator": { + "StartAt": "PassTest5", + "States": { + "PassTest5": {"Type": "Pass", "Next":"PassTest3"}, + "PassTest3": {"Type": "Pass", "Next":"CallLambda"}, + "CallLambda": {"Type": "Task", "Resource": "CallLambda", "Next": "PassTest4"}, + "PassTest4": {"Type": "Pass", "End": true} +} +} +} +} +} diff --git a/tests/asl_Map_short/wfms_test_mc1.json b/tests/asl_Map_short/wfms_test_mc1.json new file mode 100644 index 00000000..a62ee7ab --- /dev/null +++ b/tests/asl_Map_short/wfms_test_mc1.json @@ -0,0 +1,21 @@ +{ + "StartAt": "PassTest1", + "States": { + "PassTest2": {"Type":"Pass", "End": true}, + "PassTest1": {"Type":"Pass", "Next":"ExampleMapState"}, + "ExampleMapState": { + "Next": "PassTest2", + "Type": "Map", + "MaxConcurrency": 1, + "Iterator": { + "StartAt": "PassTest5", + "States": { + "PassTest5": {"Type": "Pass", "Next":"PassTest3"}, + "PassTest3": {"Type": "Pass", "Next":"CallLambda"}, + "CallLambda": {"Type": "Task", "Resource": "CallLambda", "Next": "PassTest4"}, + "PassTest4": {"Type": "Pass", "End": true} +} +} +} +} +} diff --git a/tests/asl_Map_short/wfms_test_mc2.json b/tests/asl_Map_short/wfms_test_mc2.json new file mode 100644 index 00000000..e69edffe --- /dev/null +++ b/tests/asl_Map_short/wfms_test_mc2.json @@ -0,0 +1,21 @@ +{ + "StartAt": "PassTest1", + "States": { + "PassTest2": {"Type":"Pass", "End": true}, + "PassTest1": {"Type":"Pass", "Next":"ExampleMapState"}, + "ExampleMapState": { + "Next": "PassTest2", + "Type": "Map", + "MaxConcurrency": 2, + "Iterator": { + "StartAt": "PassTest5", + "States": { + "PassTest5": {"Type": "Pass", "Next":"PassTest3"}, + "PassTest3": {"Type": "Pass", "Next":"CallLambda"}, + "CallLambda": {"Type": "Task", "Resource": "CallLambda", "Next": "PassTest4"}, + "PassTest4": {"Type": "Pass", "End": true} +} +} +} +} +} diff --git a/tests/asl_Map_short/wfms_test_mc3.json b/tests/asl_Map_short/wfms_test_mc3.json new file mode 100644 index 00000000..d19b09b1 --- /dev/null +++ b/tests/asl_Map_short/wfms_test_mc3.json @@ -0,0 +1,21 @@ +{ + "StartAt": "PassTest1", + "States": { + "PassTest2": {"Type":"Pass", "End": true}, + "PassTest1": {"Type":"Pass", "Next":"ExampleMapState"}, + "ExampleMapState": { + "Next": "PassTest2", + "Type": "Map", + "MaxConcurrency": 3, + "Iterator": { + "StartAt": "PassTest5", + "States": { + "PassTest5": {"Type": "Pass", "Next":"PassTest3"}, + "PassTest3": {"Type": "Pass", "Next":"CallLambda"}, + "CallLambda": {"Type": "Task", "Resource": "CallLambda", "Next": "PassTest4"}, + "PassTest4": {"Type": "Pass", "End": true} +} +} +} +} +} diff --git a/tests/asl_Map_short/wfms_thingspiratessay_test.data b/tests/asl_Map_short/wfms_thingspiratessay_test.data new file mode 100644 index 00000000..9af524e0 --- /dev/null +++ b/tests/asl_Map_short/wfms_thingspiratessay_test.data @@ -0,0 +1,18 @@ +{ +"test_name": "Map State Pirates Say Test", + +"workflow_name": "wfms_thingspiratessay_test/wfms_thingspiratessay_test.json", + +"event" : {"ThingsPiratesSay": [{ + "say": "Avast!"},{ + "say": "Yar!"},{ + "say": "Walk the Plank!"}], + "ThingsGiantsSay": [{ + "say": "Fee!"},{ + "say": "Fi!"},{ + "say": "Fo!"},{ + "say": "Fum!"}] + }, +"expectedResponse" : [ {"say": "Avast!" },{ "say": "Yar!" }, { "say": "Walk the Plank!" } ] +} + diff --git a/tests/asl_Map_short/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json b/tests/asl_Map_short/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json new file mode 100644 index 00000000..98b3cbec --- /dev/null +++ b/tests/asl_Map_short/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json @@ -0,0 +1,19 @@ +{ + "StartAt": "PiratesSay", + "States": { + "PiratesSay": { + "Type": "Map", + "ItemsPath": "$.ThingsPiratesSay", + "Iterator": { + "StartAt": "SayWord", + "States": { + "SayWord": { + "Type": "Pass", + "End": true + } + } + }, + "End": true + } + } +} From a9ad92052029e44d3248907d19c572c96b0d2ba1 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Wed, 7 Oct 2020 17:08:46 +0000 Subject: [PATCH 18/64] adding first cut on gpu node selection logic for ansible multi-host deployments --- ManagementService/python/deployWorkflow.py | 40 ++++++++++++++++------ mfn_sdk/mfn_sdk/mfnclient.py | 2 -- mfn_sdk/mfn_sdk/workflow.py | 2 -- tests/mfn_test_utils.py | 10 ------ 4 files changed, 30 insertions(+), 24 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 59302483..9ea8ea70 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -486,6 +486,14 @@ def handle(value, sapi): #dlc.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info)) # _XXX_: important! # put must not be queued as the function currently waits for the container to become ready + + if "gpu_usage" in wfmeta and wfmeta["gpu_usage"] != "None": + gpu_usage = float(wfmeta["gpu_usage"]) + else: + gpu_usage = 0. + + #print("deduced gpu_usage: " + str(gpu_usage)) + sapi.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info), True, False) if 'KUBERNETES_SERVICE_HOST' in os.environ: @@ -494,13 +502,6 @@ def handle(value, sapi): else: runtime = "Python" - if "gpu_usage" in wfmeta and wfmeta["gpu_usage"] != "None": - gpu_usage = float(wfmeta["gpu_usage"]) - else: - gpu_usage = 0. - - #print("deduced gpu_usage: " + str(gpu_usage)) - url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime, gpu_usage) if url is not None and len(url) > 0: status = "deploying" @@ -515,8 +516,11 @@ def handle(value, sapi): # We're running BARE METAL mode # _XXX_: due to the queue service still being in java in the sandbox - sandbox_image_name = "microfn/sandbox" # default value - + if gpu_usage == 0: + sandbox_image_name = "microfn/sandbox" # default value + elif gpu_usage != 0 and runtime == "Python": + sandbox_image_name = "microfn/sandbox_gpu" # sandbox uses GPU + if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map): sandbox_image_name = "microfn/sandbox_java" @@ -526,8 +530,24 @@ def handle(value, sapi): if hosts is not None and hosts != "": hosts = json.loads(hosts) deployed_hosts = {} - # instruct hosts to start the sandbox and deploy workflow + gpu_hosts = {} + picked_hosts = {} + for hostname in hosts: + if hostname.endswith("_gpu"): + hostip = hosts[hostname] + gpu_hosts[hostname] = hostip + + # instruct hosts to start the sandbox and deploy workflow + if runtime=="Java" or sandbox_image_name == "microfn/sandbox": # can use any host + picked_hosts = hosts + elif len(gpu_hosts) > 0: + picked_hosts = gpu_hosts + else: + picked_hosts = hosts # fallback as there are no gpu hosts available + print("available GPU hosts is empty. Deploying on general purpose host") + + for hostname in picked_hosts: # loop over all hosts, need to pich gpu hosts for python/gpu workflows hostip = hosts[hostname] host_to_deploy = (hostname, hostip) success, endpoint_key = start_docker_sandbox(host_to_deploy, email, workflow_info["sandboxId"], workflow_info["workflowId"], workflow_info["workflowName"], sandbox_image_name) diff --git a/mfn_sdk/mfn_sdk/mfnclient.py b/mfn_sdk/mfn_sdk/mfnclient.py index 68e0c174..7e5ed4ee 100644 --- a/mfn_sdk/mfn_sdk/mfnclient.py +++ b/mfn_sdk/mfn_sdk/mfnclient.py @@ -266,7 +266,6 @@ def action(self,action,data=None): r.raise_for_status() log.debug("%s: %s <- %s", self.user, action, r.text[:256]+(r.text[256:] and '...')) resp = r.json() - #print(str(resp)) if resp.get('status','') != 'success': if resp.get('has_error',False): raise Exception(f"MicroFunctions Error for action {action}: {resp['error_type']}") @@ -476,7 +475,6 @@ def add_workflow(self,name,filename=None, gpu_usage="None"): # parse the WF json to find required functions fnames = [] wfjson = json.loads(wfdesc) - #print("wfjson: "+ str(wfjson)) if 'States' in wfjson: state_list = self._get_state_names_and_resource('Task', wfjson) for state_info in state_list: diff --git a/mfn_sdk/mfn_sdk/workflow.py b/mfn_sdk/mfn_sdk/workflow.py index 04dacaad..424d8e92 100644 --- a/mfn_sdk/mfn_sdk/workflow.py +++ b/mfn_sdk/mfn_sdk/workflow.py @@ -52,7 +52,6 @@ class Workflow(object): """ def __init__(self,client,wf): - print(str(wf)) self.client=client self.id=wf["id"] self._name=wf["name"] @@ -104,7 +103,6 @@ def modified(self): def status(self): data = self.client.action('getWorkflows',{'workflow':{'id':self.id}}) self._status = data['workflow']['status'] - #self._status == "undeployed" if self._status == "deployed": self._endpoints = data['workflow']['endpoints'] else: diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index 8ff2f34b..7d5b0e45 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -67,7 +67,6 @@ def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_use self._workflow_folder = self._workflow_filename[:ind+1] else: self._workflow_folder = "./" - #print("Workflow folder: " + self._workflow_folder) self._workflow_description = self._get_json_file(self._workflow_filename) @@ -176,7 +175,6 @@ def _get_resource_info(self, resource_ref): return retval def _get_resource_info_map(self, workflow_description=None, resource_info_map=None): - #print(str("wf description: " + str(workflow_description))) if workflow_description is None: workflow_description = self._workflow_description if resource_info_map is None: @@ -223,15 +221,12 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No if "Type" in state and state["Type"] == "Map": branch = state['Iterator'] - #print(str(branch)) resource_info_map = self._get_resource_info_map(branch, resource_info_map) - #print(str(resource_info_map)) else: print("ERROR: invalid workflow description.") assert False - #resource_info_map['num_gpu'] = self._settings['num_gpu'] return resource_info_map def _delete_resource_if_existing(self, existing_resources, resource_name): @@ -243,7 +238,6 @@ def _delete_resource_if_existing(self, existing_resources, resource_name): def _create_and_upload_resource(self, resource_name, resource_info): print("Deploying resource: " + resource_name) - #print(str (resource_info)) resource_filename = resource_info["resource_filename"] is_zip = resource_info["is_zip"] @@ -292,8 +286,6 @@ def upload_workflow(self): self.undeploy_workflow() resource_info_map = self._get_resource_info_map() - #resource_info_map['num_gpu'] = 1 - #print(str(resource_info_map)) existing_resources = self._client.functions @@ -312,11 +304,9 @@ def deploy_workflow(self): try: gpu_usage=self._settings["gpu_usage"] wf = self._client.add_workflow(self._workflow_name, None, gpu_usage) - #print ("retuned from add_workflow: " + str(wf)) wf.json = json.dumps(self._workflow_description) wf.deploy(self._settings["timeout"]) self._workflow = wf - print ("transformed wf with gpu usage" + str(wf.gpu_usage)) if self._workflow.status != "failed": print("MFN workflow " + self._workflow_name + " deployed.") else: From ef7ed75a8756d834cb2c628354249ad99dab1301 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 8 Oct 2020 10:55:15 +0000 Subject: [PATCH 19/64] fixing bugs on SDK and GPU test configurations --- tests/asl_Face_Recognition/test.py | 2 +- tests/asl_Tensorflow_HelloWorld/test.py | 2 +- tests/mfn_test_utils.py | 4 ---- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/asl_Face_Recognition/test.py b/tests/asl_Face_Recognition/test.py index 4caf93c8..4487e4ba 100644 --- a/tests/asl_Face_Recognition/test.py +++ b/tests/asl_Face_Recognition/test.py @@ -34,7 +34,7 @@ def test_tensorflow(self): res1 = '"[(68, 497, 175, 390)]"' testtuplelist =[(inp1, res1)] - test = MFNTest(test_name = "FaceRecognition__Test") + test = MFNTest(test_name = "FaceRecognition__Test", gpu_usage="1") #time.sleep(10) # wait for deployment test.exec_tests(testtuplelist) diff --git a/tests/asl_Tensorflow_HelloWorld/test.py b/tests/asl_Tensorflow_HelloWorld/test.py index 0c4337ad..0b47f872 100644 --- a/tests/asl_Tensorflow_HelloWorld/test.py +++ b/tests/asl_Tensorflow_HelloWorld/test.py @@ -34,7 +34,7 @@ def test_tensorflow(self): res1 = '"GPU available: True"' testtuplelist =[(inp1, res1)] - test = MFNTest(test_name = "Tensorflow__Test") + test = MFNTest(test_name = "Tensorflow__Test", gpu_usage = "1") #time.sleep(10) # wait for deployment test.exec_tests(testtuplelist) diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index 9ebc1850..39589fe7 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -376,11 +376,7 @@ def exec_only(self, inp): if any_failed_tests: self._print_logs(self._workflow.logs()) -<<<<<<< HEAD - def exec_tests(self, testtuplelist, check_just_keys=False, check_duration=False, should_undeploy=True, async_=False): -======= def exec_tests(self, testtuplelist, check_just_keys=False, check_duration=False, should_undeploy=True, async_=False, print_report=True): ->>>>>>> develop any_failed_tests = False durations = [] From f7571f238bf14422a29c507ab0420afee10b3488 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Fri, 9 Oct 2020 12:48:45 +0000 Subject: [PATCH 20/64] adding logic to configure gpu hosts, fixing bug on deployWorkflow on k8s label names --- ManagementService/python/deployWorkflow.py | 4 ++++ deploy/ansible/scripts/available_hosts.py | 11 +++++++++- deploy/helm/microfunctions/values.yaml | 11 +++++----- tests/asl_Map/test.py | 20 ++++++++++--------- ...ntext_test.data => wfms_context_test.data} | 2 +- .../wfms_context.json} | 0 ...very_test.data => wfms_delivery_test.data} | 2 +- .../checkAvailability.py | 0 .../wfms_delivery_test.json} | 0 ...ample_test.data => wfms_example_test.data} | 2 +- .../ship-val.py | 0 .../wfms_example_test.json} | 0 ...ded_test.data => wfms_hardcoded_test.data} | 3 +-- .../wfms_hardcoded_test.json} | 0 ...ta => wfms_iro_paths_processing_test.data} | 2 +- .../checkAvailability.py | 0 .../paymentFailed.py | 0 .../prepareForDelivery.py | 0 .../sendOrderSummary.py | 0 .../startDelivery.py | 0 .../test.py | 0 .../validatePayment.py | 0 .../wfms_iro_paths_processing_test.json} | 0 ...rs_test.data => wfms_parameters_test.data} | 2 +- .../wfms_parameters_test.json} | 0 ...state_test_mc0.json => wfms_test_mc0.json} | 0 ...state_test_mc1.json => wfms_test_mc1.json} | 0 ...state_test_mc2.json => wfms_test_mc2.json} | 0 ...state_test_mc3.json => wfms_test_mc3.json} | 0 ...t.data => wfms_thingspiratessay_test.data} | 2 +- .../wfms_thingspiratessay_test.json} | 0 31 files changed, 38 insertions(+), 23 deletions(-) rename tests/asl_Map/{workflow_map_state_context_test.data => wfms_context_test.data} (76%) rename tests/asl_Map/{workflow_map_state_context_test/workflow_map_state_context_test.json => wfms_context_test/wfms_context.json} (100%) rename tests/asl_Map/{workflow_map_state_delivery_test.data => wfms_delivery_test.data} (94%) rename tests/asl_Map/{workflow_map_state_delivery_test => wfms_delivery_test}/checkAvailability.py (100%) rename tests/asl_Map/{workflow_map_state_delivery_test/workflow_map_state_delivery_test.json => wfms_delivery_test/wfms_delivery_test.json} (100%) rename tests/asl_Map/{workflow_map_state_example_test.data => wfms_example_test.data} (90%) rename tests/asl_Map/{workflow_map_state_example_test => wfms_example_test}/ship-val.py (100%) rename tests/asl_Map/{workflow_map_state_example_test/workflow_map_state_example_test.json => wfms_example_test/wfms_example_test.json} (100%) rename tests/asl_Map/{workflow_map_state_hardcoded_test.data => wfms_hardcoded_test.data} (72%) rename tests/asl_Map/{workflow_map_state_hardcoded_test/workflow_map_state_hardcoded_test.json => wfms_hardcoded_test/wfms_hardcoded_test.json} (100%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test.data => wfms_iro_paths_processing_test.data} (94%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test => wfms_iro_paths_test}/checkAvailability.py (100%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test => wfms_iro_paths_test}/paymentFailed.py (100%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test => wfms_iro_paths_test}/prepareForDelivery.py (100%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test => wfms_iro_paths_test}/sendOrderSummary.py (100%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test => wfms_iro_paths_test}/startDelivery.py (100%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test => wfms_iro_paths_test}/test.py (100%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test => wfms_iro_paths_test}/validatePayment.py (100%) rename tests/asl_Map/{workflow_map_state_iro_paths_processing_test/workflow_map_state_iro_paths_processing_test.json => wfms_iro_paths_test/wfms_iro_paths_processing_test.json} (100%) rename tests/asl_Map/{workflow_map_state_parameters_test.data => wfms_parameters_test.data} (87%) rename tests/asl_Map/{workflow_map_state_parameters_test/workflow_map_state_parameters_test.json => wfms_parameters_test/wfms_parameters_test.json} (100%) rename tests/asl_Map/{workflow_map_state_test_mc0.json => wfms_test_mc0.json} (100%) rename tests/asl_Map/{workflow_map_state_test_mc1.json => wfms_test_mc1.json} (100%) rename tests/asl_Map/{workflow_map_state_test_mc2.json => wfms_test_mc2.json} (100%) rename tests/asl_Map/{workflow_map_state_test_mc3.json => wfms_test_mc3.json} (100%) rename tests/asl_Map/{workflow_map_state_thingspiratessay_test.data => wfms_thingspiratessay_test.data} (82%) rename tests/asl_Map/{workflow_map_state_thingspiratessay_test/workflow_map_state_thingspiratessay_test.json => wfms_thingspiratessay_test/wfms_thingspiratessay_test.json} (100%) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index c2d29c77..5d7f07b8 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -278,6 +278,10 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F # Kubernetes labels cannot contain @ or _ and should start and end with alphanumeric characters wfNameSanitized = 'wf-' + workflow_info["workflowId"].replace('@', '-').replace('_', '-').lower() + '-wf' wfActualNameSanitized = 'wf-' + workflow_info["workflowName"].replace('@', '-').replace('_', '-').replace('/','-').lower() + '-wf' + if len(wfActualNameSanitized) > 63: + print("Error creating kubernetes deployment for "+email+" "+workflow_info["workflowId"] + ", workflow name too long") + + emailSanitized = 'u-' + email.replace('@', '-').replace('_', '-').lower() + '-u' # Pod, Deployment and Hpa names for the new workflow will have a prefix containing the workflow name and user name app_fullname_prefix = '' diff --git a/deploy/ansible/scripts/available_hosts.py b/deploy/ansible/scripts/available_hosts.py index df94cb00..c893d08f 100755 --- a/deploy/ansible/scripts/available_hosts.py +++ b/deploy/ansible/scripts/available_hosts.py @@ -21,6 +21,7 @@ import sys import riak import socket +import subprocess ### global variables set at runtime DLCLIENT=None @@ -60,7 +61,14 @@ def dl_get(key): def add_host(hostname,hostip=None): if hostip is None: hostip = socket.gethostbyname(hostname) - print("Adding host: " + str(hostname)) + has_gpu = False + try: + has_gpu = ("NVIDIA" in subprocess.check_output('nvcc --version.split(' ')).decode()')) + except Exception: + pass + + print("Adding host: " + str(hostname) + ", has gpu: "+ str(has_gpu)) + v = dl_get("available_hosts") if v.encoded_data is not None and len(v.encoded_data) > 0: hosts = json.loads((v.encoded_data).decode()) @@ -71,6 +79,7 @@ def add_host(hostname,hostip=None): hosts = {} if hostname != None and hostname not in hosts: hosts[hostname] = hostip + # hosts[hostname]["has_gpu"] = True v.encoded_data = json.dumps(hosts).encode() v.store() return hosts diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 02e2cfb6..0b298cfe 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -20,12 +20,11 @@ # MicroFunction management workflow #------------------------------------------------------------------------------ #imageRepo: "registry.kube-system.svc.cluster.local" -imageRepo: "localhost:5000" +imageRepo: "localhost:5000" # kubespray deployment: imageRepo: "localhost:5000" manager: #httpProxy: "http://:" #httpsProxy: "http://:" - httpGatewayPort: 30336 - #httpsGatewayPort: 32533 + httpGatewayPort: 30336 # kubespray deployment nameOverride: "microfunctions" newWorkflow: hpa: @@ -65,7 +64,8 @@ manager: # MicroFunction Datalayer #------------------------------------------------------------------------------ datalayer: - replicas: 1 + replicas: 1 # kubespray deployment + #replicas: 3 imagePath: "/microfn/datalayer" imageTag: "latest" imagePullPolicy: "Always" @@ -83,7 +83,8 @@ datalayer: # Riak global data storage #------------------------------------------------------------------------------ riak: - replicas: 1 + replicas: 1 # kubespray deployment + #replicas: 3 imagePath: "/microfn/riak" imageTag: "latest" imagePullPolicy: "Always" diff --git a/tests/asl_Map/test.py b/tests/asl_Map/test.py index b69a1c7e..463b71e6 100644 --- a/tests/asl_Map/test.py +++ b/tests/asl_Map/test.py @@ -25,23 +25,25 @@ class MapStateTest(unittest.TestCase): """ event = '[{"who": "bob"},{"who": "meg"},{"who": "joe"}]' expectedResponse = '[{"ContextValue": {"who": "bob"}, "ContextIndex": 0},{"ContextValue": {"who": "meg"}, "ContextIndex": 1}, {"ContextValue": {"who": "joe"}, "ContextIndex": 2 }]' - test_map = [("asl_Map_State_Context_Data", "workflow_map_state_context_test/workflow_map_state_context_test.json", [(event, expectedResponse)])] + test_map = [("asl_Map_State_Context_Data", "wfms_context_test/wfms_context_test.json", [(event, expectedResponse)])] """ def test_map_state(self): - file_list = ["workflow_map_state_delivery_test.data", - "workflow_map_state_context_test.data", - "workflow_map_state_example_test.data", - "workflow_map_state_parameters_test.data", - "workflow_map_state_thingspiratessay_test.data", - "workflow_map_state_iro_paths_processing_test.data", - "workflow_map_state_hardcoded_test.data"] + file_list = ["wfms_delivery_test.data", + "wfms_context_test.data", + "wfms_example_test.data", + "wfms_parameters_test.data", + "wfms_thingspiratessay_test.data", + #"wfms_iro_paths_processing_test.data", + #"wfms_hardcoded_test.data" + ] for file in file_list: with open(file) as json_input: testtuplelist = [] data = json.load(json_input) testtuplelist.append((json.dumps(data["event"]), json.dumps(data["expectedResponse"]))) + print("CHECK: " +str(data["test_name"])+ " " + str(data["workflow_name"])) test = MFNTest(test_name=data["test_name"], workflow_filename=data["workflow_name"]) st = time.time() test.exec_tests(testtuplelist) @@ -69,7 +71,7 @@ def test_map_state(self): expectedResponse = ["Hello, joe!", "Hello, bob!", "Hello, meg!", "Hello, dave!", "Hello, tom!", "Hello, ray!"] testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) - test = MFNTest(test_name="Map State Test", workflow_filename=("workflow_map_state_test_mc%s.json" % mc)) + test = MFNTest(test_name="Map State Test", workflow_filename=("wfms_test_mc%s.json" % mc)) print("MaxConcurrency level: %i " % mc) diff --git a/tests/asl_Map/workflow_map_state_context_test.data b/tests/asl_Map/wfms_context_test.data similarity index 76% rename from tests/asl_Map/workflow_map_state_context_test.data rename to tests/asl_Map/wfms_context_test.data index 7efbb4e7..1c4c1116 100644 --- a/tests/asl_Map/workflow_map_state_context_test.data +++ b/tests/asl_Map/wfms_context_test.data @@ -2,7 +2,7 @@ { "test_name": "Map State Context Test", -"workflow_name": "workflow_map_state_context_test/workflow_map_state_context_test.json", +"workflow_name": "wfms_context_test/wfms_context.json", "event": [{"who": "bob"},{"who": "meg"},{"who": "joe"}], diff --git a/tests/asl_Map/workflow_map_state_context_test/workflow_map_state_context_test.json b/tests/asl_Map/wfms_context_test/wfms_context.json similarity index 100% rename from tests/asl_Map/workflow_map_state_context_test/workflow_map_state_context_test.json rename to tests/asl_Map/wfms_context_test/wfms_context.json diff --git a/tests/asl_Map/workflow_map_state_delivery_test.data b/tests/asl_Map/wfms_delivery_test.data similarity index 94% rename from tests/asl_Map/workflow_map_state_delivery_test.data rename to tests/asl_Map/wfms_delivery_test.data index 7f8e7c9a..6cb77f4a 100644 --- a/tests/asl_Map/workflow_map_state_delivery_test.data +++ b/tests/asl_Map/wfms_delivery_test.data @@ -1,7 +1,7 @@ {"test_name": "Map State Delivery Test", -"workflow_name": "workflow_map_state_delivery_test/workflow_map_state_delivery_test.json", +"workflow_name": "wfms_delivery_test/wfms_delivery_test.json", "event": {"orderId": "12345678", diff --git a/tests/asl_Map/workflow_map_state_delivery_test/checkAvailability.py b/tests/asl_Map/wfms_delivery_test/checkAvailability.py similarity index 100% rename from tests/asl_Map/workflow_map_state_delivery_test/checkAvailability.py rename to tests/asl_Map/wfms_delivery_test/checkAvailability.py diff --git a/tests/asl_Map/workflow_map_state_delivery_test/workflow_map_state_delivery_test.json b/tests/asl_Map/wfms_delivery_test/wfms_delivery_test.json similarity index 100% rename from tests/asl_Map/workflow_map_state_delivery_test/workflow_map_state_delivery_test.json rename to tests/asl_Map/wfms_delivery_test/wfms_delivery_test.json diff --git a/tests/asl_Map/workflow_map_state_example_test.data b/tests/asl_Map/wfms_example_test.data similarity index 90% rename from tests/asl_Map/workflow_map_state_example_test.data rename to tests/asl_Map/wfms_example_test.data index 7253d596..0a94747d 100644 --- a/tests/asl_Map/workflow_map_state_example_test.data +++ b/tests/asl_Map/wfms_example_test.data @@ -2,7 +2,7 @@ { "test_name": "Map State Example Test", -"workflow_name": "workflow_map_state_example_test/workflow_map_state_example_test.json", +"workflow_name": "wfms_example_test/wfms_example_test.json", "event": {"ship-date": "2016-03-14T01:59:00Z", "detail": diff --git a/tests/asl_Map/workflow_map_state_example_test/ship-val.py b/tests/asl_Map/wfms_example_test/ship-val.py similarity index 100% rename from tests/asl_Map/workflow_map_state_example_test/ship-val.py rename to tests/asl_Map/wfms_example_test/ship-val.py diff --git a/tests/asl_Map/workflow_map_state_example_test/workflow_map_state_example_test.json b/tests/asl_Map/wfms_example_test/wfms_example_test.json similarity index 100% rename from tests/asl_Map/workflow_map_state_example_test/workflow_map_state_example_test.json rename to tests/asl_Map/wfms_example_test/wfms_example_test.json diff --git a/tests/asl_Map/workflow_map_state_hardcoded_test.data b/tests/asl_Map/wfms_hardcoded_test.data similarity index 72% rename from tests/asl_Map/workflow_map_state_hardcoded_test.data rename to tests/asl_Map/wfms_hardcoded_test.data index d3e24b18..5f8bb520 100644 --- a/tests/asl_Map/workflow_map_state_hardcoded_test.data +++ b/tests/asl_Map/wfms_hardcoded_test.data @@ -1,7 +1,6 @@ - { "test_name": "Map State Hardcoded Input Test", -"workflow_name": "workflow_map_state_hardcoded_test/workflow_map_state_hardcoded_test.json", +"workflow_name": "wfms_hardcoded_test/wfms_hardcoded_test.json", "event" : "trigger map state input", "expectedResponse" : [{"Value": "Iterate", "Index": 0}, {"Value": "Over", "Index": 1}, {"Value": "This", "Index": 2}, {"Value": "Array", "Index": 3}] } diff --git a/tests/asl_Map/workflow_map_state_hardcoded_test/workflow_map_state_hardcoded_test.json b/tests/asl_Map/wfms_hardcoded_test/wfms_hardcoded_test.json similarity index 100% rename from tests/asl_Map/workflow_map_state_hardcoded_test/workflow_map_state_hardcoded_test.json rename to tests/asl_Map/wfms_hardcoded_test/wfms_hardcoded_test.json diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test.data b/tests/asl_Map/wfms_iro_paths_processing_test.data similarity index 94% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test.data rename to tests/asl_Map/wfms_iro_paths_processing_test.data index 456045bf..3894a118 100644 --- a/tests/asl_Map/workflow_map_state_iro_paths_processing_test.data +++ b/tests/asl_Map/wfms_iro_paths_processing_test.data @@ -1,7 +1,7 @@ { "test_name": "Map State IRO Paths Processing Test", -"workflow_name": "workflow_map_state_iro_paths_processing_test/workflow_map_state_iro_paths_processing_test.json", +"workflow_name": "wfms_iro_paths_test/wfms_iro_paths_processing_test.json", "event": { "orderId": "12345678", diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test/checkAvailability.py b/tests/asl_Map/wfms_iro_paths_test/checkAvailability.py similarity index 100% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test/checkAvailability.py rename to tests/asl_Map/wfms_iro_paths_test/checkAvailability.py diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test/paymentFailed.py b/tests/asl_Map/wfms_iro_paths_test/paymentFailed.py similarity index 100% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test/paymentFailed.py rename to tests/asl_Map/wfms_iro_paths_test/paymentFailed.py diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test/prepareForDelivery.py b/tests/asl_Map/wfms_iro_paths_test/prepareForDelivery.py similarity index 100% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test/prepareForDelivery.py rename to tests/asl_Map/wfms_iro_paths_test/prepareForDelivery.py diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test/sendOrderSummary.py b/tests/asl_Map/wfms_iro_paths_test/sendOrderSummary.py similarity index 100% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test/sendOrderSummary.py rename to tests/asl_Map/wfms_iro_paths_test/sendOrderSummary.py diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test/startDelivery.py b/tests/asl_Map/wfms_iro_paths_test/startDelivery.py similarity index 100% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test/startDelivery.py rename to tests/asl_Map/wfms_iro_paths_test/startDelivery.py diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test/test.py b/tests/asl_Map/wfms_iro_paths_test/test.py similarity index 100% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test/test.py rename to tests/asl_Map/wfms_iro_paths_test/test.py diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test/validatePayment.py b/tests/asl_Map/wfms_iro_paths_test/validatePayment.py similarity index 100% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test/validatePayment.py rename to tests/asl_Map/wfms_iro_paths_test/validatePayment.py diff --git a/tests/asl_Map/workflow_map_state_iro_paths_processing_test/workflow_map_state_iro_paths_processing_test.json b/tests/asl_Map/wfms_iro_paths_test/wfms_iro_paths_processing_test.json similarity index 100% rename from tests/asl_Map/workflow_map_state_iro_paths_processing_test/workflow_map_state_iro_paths_processing_test.json rename to tests/asl_Map/wfms_iro_paths_test/wfms_iro_paths_processing_test.json diff --git a/tests/asl_Map/workflow_map_state_parameters_test.data b/tests/asl_Map/wfms_parameters_test.data similarity index 87% rename from tests/asl_Map/workflow_map_state_parameters_test.data rename to tests/asl_Map/wfms_parameters_test.data index e26d5f41..4dbed3e9 100644 --- a/tests/asl_Map/workflow_map_state_parameters_test.data +++ b/tests/asl_Map/wfms_parameters_test.data @@ -2,7 +2,7 @@ { "test_name": "Map State Parameters Test", -"workflow_name": "workflow_map_state_parameters_test/workflow_map_state_parameters_test.json", +"workflow_name": "wfms_parameters_test/wfms_parameters_test.json", "event": [{"who": "bob"},{"who": "meg"},{"who": "joe"}], diff --git a/tests/asl_Map/workflow_map_state_parameters_test/workflow_map_state_parameters_test.json b/tests/asl_Map/wfms_parameters_test/wfms_parameters_test.json similarity index 100% rename from tests/asl_Map/workflow_map_state_parameters_test/workflow_map_state_parameters_test.json rename to tests/asl_Map/wfms_parameters_test/wfms_parameters_test.json diff --git a/tests/asl_Map/workflow_map_state_test_mc0.json b/tests/asl_Map/wfms_test_mc0.json similarity index 100% rename from tests/asl_Map/workflow_map_state_test_mc0.json rename to tests/asl_Map/wfms_test_mc0.json diff --git a/tests/asl_Map/workflow_map_state_test_mc1.json b/tests/asl_Map/wfms_test_mc1.json similarity index 100% rename from tests/asl_Map/workflow_map_state_test_mc1.json rename to tests/asl_Map/wfms_test_mc1.json diff --git a/tests/asl_Map/workflow_map_state_test_mc2.json b/tests/asl_Map/wfms_test_mc2.json similarity index 100% rename from tests/asl_Map/workflow_map_state_test_mc2.json rename to tests/asl_Map/wfms_test_mc2.json diff --git a/tests/asl_Map/workflow_map_state_test_mc3.json b/tests/asl_Map/wfms_test_mc3.json similarity index 100% rename from tests/asl_Map/workflow_map_state_test_mc3.json rename to tests/asl_Map/wfms_test_mc3.json diff --git a/tests/asl_Map/workflow_map_state_thingspiratessay_test.data b/tests/asl_Map/wfms_thingspiratessay_test.data similarity index 82% rename from tests/asl_Map/workflow_map_state_thingspiratessay_test.data rename to tests/asl_Map/wfms_thingspiratessay_test.data index e40ae570..9af524e0 100644 --- a/tests/asl_Map/workflow_map_state_thingspiratessay_test.data +++ b/tests/asl_Map/wfms_thingspiratessay_test.data @@ -1,7 +1,7 @@ { "test_name": "Map State Pirates Say Test", -"workflow_name": "workflow_map_state_thingspiratessay_test/workflow_map_state_thingspiratessay_test.json", +"workflow_name": "wfms_thingspiratessay_test/wfms_thingspiratessay_test.json", "event" : {"ThingsPiratesSay": [{ "say": "Avast!"},{ diff --git a/tests/asl_Map/workflow_map_state_thingspiratessay_test/workflow_map_state_thingspiratessay_test.json b/tests/asl_Map/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json similarity index 100% rename from tests/asl_Map/workflow_map_state_thingspiratessay_test/workflow_map_state_thingspiratessay_test.json rename to tests/asl_Map/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json From a250fb39c884ef741bd21b3dd8a66f49fccf2c1d Mon Sep 17 00:00:00 2001 From: ksatzke Date: Mon, 12 Oct 2020 12:11:56 +0000 Subject: [PATCH 21/64] cleanup tests and values.yaml --- ManagementService/python/deployWorkflow.py | 19 +++- deploy/helm/microfunctions/values.yaml | 6 +- tests/asl_Map/test.py | 5 +- tests/asl_Map_short/CallLambda.py | 6 -- tests/asl_Map_short/test.py | 81 ----------------- tests/asl_Map_short/wfms_context_test.data | 11 --- .../wfms_context_test/wfms_context.json | 23 ----- tests/asl_Map_short/wfms_delivery_test.data | 52 ----------- .../wfms_delivery_test/checkAvailability.py | 5 - .../wfms_delivery_test.json | 40 -------- tests/asl_Map_short/wfms_example_test.data | 21 ----- .../wfms_example_test/ship-val.py | 23 ----- .../wfms_example_test/wfms_example_test.json | 22 ----- tests/asl_Map_short/wfms_hardcoded_test.data | 6 -- .../wfms_hardcoded_test.json | 51 ----------- .../wfms_iro_paths_processing_test.data | 50 ---------- .../wfms_iro_paths_test/checkAvailability.py | 6 -- .../wfms_iro_paths_test/paymentFailed.py | 6 -- .../wfms_iro_paths_test/prepareForDelivery.py | 6 -- .../wfms_iro_paths_test/sendOrderSummary.py | 10 -- .../wfms_iro_paths_test/startDelivery.py | 6 -- .../asl_Map_short/wfms_iro_paths_test/test.py | 91 ------------------- .../wfms_iro_paths_test/validatePayment.py | 6 -- .../wfms_iro_paths_processing_test.json | 79 ---------------- tests/asl_Map_short/wfms_parameters_test.data | 21 ----- .../wfms_parameters_test.json | 24 ----- tests/asl_Map_short/wfms_test_mc0.json | 21 ----- tests/asl_Map_short/wfms_test_mc1.json | 21 ----- tests/asl_Map_short/wfms_test_mc2.json | 21 ----- tests/asl_Map_short/wfms_test_mc3.json | 21 ----- .../wfms_thingspiratessay_test.data | 18 ---- .../wfms_thingspiratessay_test.json | 19 ---- tests/mfn_test_utils.py | 3 +- 33 files changed, 21 insertions(+), 779 deletions(-) delete mode 100644 tests/asl_Map_short/CallLambda.py delete mode 100644 tests/asl_Map_short/test.py delete mode 100644 tests/asl_Map_short/wfms_context_test.data delete mode 100644 tests/asl_Map_short/wfms_context_test/wfms_context.json delete mode 100644 tests/asl_Map_short/wfms_delivery_test.data delete mode 100644 tests/asl_Map_short/wfms_delivery_test/checkAvailability.py delete mode 100644 tests/asl_Map_short/wfms_delivery_test/wfms_delivery_test.json delete mode 100644 tests/asl_Map_short/wfms_example_test.data delete mode 100644 tests/asl_Map_short/wfms_example_test/ship-val.py delete mode 100644 tests/asl_Map_short/wfms_example_test/wfms_example_test.json delete mode 100644 tests/asl_Map_short/wfms_hardcoded_test.data delete mode 100644 tests/asl_Map_short/wfms_hardcoded_test/wfms_hardcoded_test.json delete mode 100644 tests/asl_Map_short/wfms_iro_paths_processing_test.data delete mode 100644 tests/asl_Map_short/wfms_iro_paths_test/checkAvailability.py delete mode 100644 tests/asl_Map_short/wfms_iro_paths_test/paymentFailed.py delete mode 100644 tests/asl_Map_short/wfms_iro_paths_test/prepareForDelivery.py delete mode 100644 tests/asl_Map_short/wfms_iro_paths_test/sendOrderSummary.py delete mode 100644 tests/asl_Map_short/wfms_iro_paths_test/startDelivery.py delete mode 100644 tests/asl_Map_short/wfms_iro_paths_test/test.py delete mode 100644 tests/asl_Map_short/wfms_iro_paths_test/validatePayment.py delete mode 100644 tests/asl_Map_short/wfms_iro_paths_test/wfms_iro_paths_processing_test.json delete mode 100644 tests/asl_Map_short/wfms_parameters_test.data delete mode 100644 tests/asl_Map_short/wfms_parameters_test/wfms_parameters_test.json delete mode 100644 tests/asl_Map_short/wfms_test_mc0.json delete mode 100644 tests/asl_Map_short/wfms_test_mc1.json delete mode 100644 tests/asl_Map_short/wfms_test_mc2.json delete mode 100644 tests/asl_Map_short/wfms_test_mc3.json delete mode 100644 tests/asl_Map_short/wfms_thingspiratessay_test.data delete mode 100644 tests/asl_Map_short/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 5d7f07b8..8b279735 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -316,13 +316,25 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F env.append({'name': 'WORKFLOWNAME', 'value': workflow_info["workflowName"]}) # apply gpu_usage fraction to k8s deployment configuration + print("GPU sage in create_k8s_service: "+ str(gpu_usage)) use_gpus = gpu_usage - if not management and use_gpus >= 0 and runtime=="Python": + if runtime=="Java": # non gpu python function + # overwrite values from values.yaml for new workflows + #kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) + #kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) + kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox_java" + + if not management and use_gpus == 0. and runtime=="Python": # non gpu python function + # overwrite values from values.yaml for new workflows + kservice['spec']['template']['spec']['containers'][0]['resources']['limits'].pop('nvidia.com/gpu', None) # ['nvidia.com/gpu'] = str(use_gpus) + kservice['spec']['template']['spec']['containers'][0]['resources']['requests'].pop('nvidia.com/gpu', None) # ['nvidia.com/gpu'] = str(use_gpus) + kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" + + if not management and use_gpus > 0. and runtime=="Python": # gpu using python function # overwrite values from values.yaml for new workflows kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) - #kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox" kservice['spec']['template']['spec']['containers'][0]['image'] = "localhost:5000/microfn/sandbox_gpu" # Special handling for the management container: never run on gpu @@ -366,6 +378,7 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F print("ERROR deleting existing kservice") print(resp.text) + # no change for Java function print('Creating new kservice') resp = requests.post( "https://kubernetes.default:"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/apis/serving.knative.dev/v1/namespaces/"+namespace+"/services", @@ -496,7 +509,7 @@ def handle(value, sapi): else: gpu_usage = 0. - #print("deduced gpu_usage: " + str(gpu_usage)) + print("deduced gpu_usage: " + str(gpu_usage)) sapi.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info), True, False) diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 0b298cfe..42ea4ea9 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -44,8 +44,8 @@ manager: cpu: 100m memory: 1Gi sandbox: - #imagePathPython: "/microfn/sandbox" - imagePathPython: "/microfn/sandbox_gpu" + imagePathPython: "/microfn/sandbox" + #imagePathPython: "/microfn/sandbox_gpu" imagePathJava: "/microfn/sandbox_java" imageTag: "latest" imagePullPolicy: "Always" @@ -53,11 +53,9 @@ manager: limits: cpu: 1 memory: 2Gi - nvidia.com/gpu: 1 requests: cpu: 1 memory: 1Gi - nvidia.com/gpu: 1 createServiceAccounts: true #------------------------------------------------------------------------------ diff --git a/tests/asl_Map/test.py b/tests/asl_Map/test.py index 463b71e6..2dba98c6 100644 --- a/tests/asl_Map/test.py +++ b/tests/asl_Map/test.py @@ -34,8 +34,8 @@ def test_map_state(self): "wfms_example_test.data", "wfms_parameters_test.data", "wfms_thingspiratessay_test.data", - #"wfms_iro_paths_processing_test.data", - #"wfms_hardcoded_test.data" + "wfms_iro_paths_processing_test.data", + "wfms_hardcoded_test.data" ] for file in file_list: @@ -43,7 +43,6 @@ def test_map_state(self): testtuplelist = [] data = json.load(json_input) testtuplelist.append((json.dumps(data["event"]), json.dumps(data["expectedResponse"]))) - print("CHECK: " +str(data["test_name"])+ " " + str(data["workflow_name"])) test = MFNTest(test_name=data["test_name"], workflow_filename=data["workflow_name"]) st = time.time() test.exec_tests(testtuplelist) diff --git a/tests/asl_Map_short/CallLambda.py b/tests/asl_Map_short/CallLambda.py deleted file mode 100644 index efaf655f..00000000 --- a/tests/asl_Map_short/CallLambda.py +++ /dev/null @@ -1,6 +0,0 @@ -import json -import time -def handle(event, context): - name = event["who"] - time.sleep(1.0) - return "Hello, %s!" % name diff --git a/tests/asl_Map_short/test.py b/tests/asl_Map_short/test.py deleted file mode 100644 index ab18b490..00000000 --- a/tests/asl_Map_short/test.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2020 The microfunctions Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import unittest -import os, sys -import json -import time -#import ast - -sys.path.append("../") -from mfn_test_utils import MFNTest - -class MapStateTest(unittest.TestCase): - """ - event = '[{"who": "bob"},{"who": "meg"},{"who": "joe"}]' - expectedResponse = '[{"ContextValue": {"who": "bob"}, "ContextIndex": 0},{"ContextValue": {"who": "meg"}, "ContextIndex": 1}, {"ContextValue": {"who": "joe"}, "ContextIndex": 2 }]' - test_map = [("asl_Map_State_Context_Data", "wfms_context_test/wfms_context_test.json", [(event, expectedResponse)])] - """ - - def test_map_state(self): - file_list = ["wfms_delivery_test.data", - "wfms_context_test.data", - "wfms_example_test.data", - "wfms_parameters_test.data", - "wfms_thingspiratessay_test.data", - "wfms_iro_paths_processing_test.data", - "wfms_hardcoded_test.data"] - - for file in file_list: - with open(file) as json_input: - testtuplelist = [] - data = json.load(json_input) - testtuplelist.append((json.dumps(data["event"]), json.dumps(data["expectedResponse"]))) - print("CHECK: " +str(data["test_name"])+ " " + str(data["workflow_name"])) - test = MFNTest(test_name=data["test_name"], workflow_filename=data["workflow_name"]) - st = time.time() - test.exec_tests(testtuplelist) - et = time.time() - print ("test duration (s): %s" % str(et-st)) - - for mc in range(4): # set maxConcurrency parameter - """ creates and executes the Map state test workflow from the ASL description """ - - testtuplelist = [] - - event = [{"who": "bob"}, {"who": "meg"}, {"who": "joe"}] - expectedResponse = ["Hello, bob!", "Hello, meg!", "Hello, joe!"] - testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) - - event = [{"who": "meg"}, {"who": "joe"}, {"who": "bob"}] - expectedResponse = ["Hello, meg!", "Hello, joe!", "Hello, bob!"] - testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) - - event = [{"who": "joe"}, {"who": "bob"}, {"who": "meg"}] - expectedResponse = ["Hello, joe!", "Hello, bob!", "Hello, meg!"] - testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) - - event = [{"who": "joe"}, {"who": "bob"}, {"who": "meg"}, {"who":"dave"}, {"who":"tom"}, {"who":"ray"}] - expectedResponse = ["Hello, joe!", "Hello, bob!", "Hello, meg!", "Hello, dave!", "Hello, tom!", "Hello, ray!"] - testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) - - test = MFNTest(test_name="Map State Test", workflow_filename=("wfms_test_mc%s.json" % mc)) - - print("MaxConcurrency level: %i " % mc) - - st = time.time() - test.exec_tests(testtuplelist) - et = time.time() - - print ("test duration (s): %s" % str(et-st)) diff --git a/tests/asl_Map_short/wfms_context_test.data b/tests/asl_Map_short/wfms_context_test.data deleted file mode 100644 index 1c4c1116..00000000 --- a/tests/asl_Map_short/wfms_context_test.data +++ /dev/null @@ -1,11 +0,0 @@ - -{ -"test_name": "Map State Context Test", - -"workflow_name": "wfms_context_test/wfms_context.json", - -"event": [{"who": "bob"},{"who": "meg"},{"who": "joe"}], - -"expectedResponse": [{"ContextValue": {"who": "bob"}, "ContextIndex": 0},{"ContextValue": {"who": "meg"}, "ContextIndex": 1}, {"ContextValue": {"who": "joe"}, "ContextIndex": 2 }] - -} diff --git a/tests/asl_Map_short/wfms_context_test/wfms_context.json b/tests/asl_Map_short/wfms_context_test/wfms_context.json deleted file mode 100644 index 2586a44e..00000000 --- a/tests/asl_Map_short/wfms_context_test/wfms_context.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "StartAt": "ExampleMapState", - "States": { - "ExampleMapState": { - "Type": "Map", - "Parameters": { - "ContextIndex.$": "$$.Map.Item.Index", - "ContextValue.$": "$$.Map.Item.Value" - }, - "Iterator": { - "StartAt": "TestPass", - "States": { - "TestPass": { - "Type": "Pass", - "End": true - } - } - }, - "End": true - } - } -} - diff --git a/tests/asl_Map_short/wfms_delivery_test.data b/tests/asl_Map_short/wfms_delivery_test.data deleted file mode 100644 index 6cb77f4a..00000000 --- a/tests/asl_Map_short/wfms_delivery_test.data +++ /dev/null @@ -1,52 +0,0 @@ - -{"test_name": "Map State Delivery Test", - -"workflow_name": "wfms_delivery_test/wfms_delivery_test.json", - -"event": - {"orderId": "12345678", - "orderDate": "20190820101213", - "detail": { - "customerId": "1234", - "deliveryAddress": "123, Seattle, WA", - "deliverySpeed": "1-day", - "paymentMethod": "aCreditCard", - "items": [ - { - "productName": "Agile Software Development", - "category": "book", - "price": 60.0, - "quantity": 1 - }, - { - "productName": "Domain-Driven Design", - "category": "book", - "price": 32.0, - "quantity": 1 - }, - { - "productName": "The Mythical Man Month", - "category": "book", - "price": 18.0, - "quantity": 1 - }, - { - "productName": "The Art of Computer Programming", - "category": "book", - "price": 180.0, - "quantity": 1 - }, - { - "productName": "Ground Coffee, Dark Roast", - "category": "grocery", - "price": 8.0, - "quantity": 6 - } - ] - } -}, - -"expectedResponse": -[{"productName": "Agile Software Development", "category": "book", "price": 60.0, "quantity": 1}, {"productName": "Domain-Driven Design", "category": "book", "price": 32.0, "quantity": 1}, {"productName": "The Mythical Man Month", "category": "book", "price": 18.0, "quantity": 1}, {"productName": "The Art of Computer Programming", "category": "book", "price": 180.0, "quantity": 1}, {"productName": "Ground Coffee, Dark Roast", "category": "grocery", "price": 8.0, "quantity": 6}] -} - diff --git a/tests/asl_Map_short/wfms_delivery_test/checkAvailability.py b/tests/asl_Map_short/wfms_delivery_test/checkAvailability.py deleted file mode 100644 index 9440b45a..00000000 --- a/tests/asl_Map_short/wfms_delivery_test/checkAvailability.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/python - -def handle(event, context): - - return event diff --git a/tests/asl_Map_short/wfms_delivery_test/wfms_delivery_test.json b/tests/asl_Map_short/wfms_delivery_test/wfms_delivery_test.json deleted file mode 100644 index c92b20b2..00000000 --- a/tests/asl_Map_short/wfms_delivery_test/wfms_delivery_test.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "StartAt": "ProcessAllItems", - "States": { - "ProcessAllItems": { - "Type": "Map", - "InputPath": "$.detail", - "ItemsPath": "$.items", - "MaxConcurrency": 3, - "Iterator": { - "StartAt": "CheckAvailability", - "States": { - "CheckAvailability": { - "Type": "Task", - "Resource": "checkAvailability", - "Retry": [ - { - "ErrorEquals": [ - "TimeOut" - ], - "IntervalSeconds": 1, - "BackoffRate": 2, - "MaxAttempts": 3 - } - ], - "Next": "PrepareForDelivery" - }, - "PrepareForDelivery": { - "Type": "Pass", - "Next": "StartDelivery" - }, - "StartDelivery": { - "Type": "Pass", - "End": true - } - } - }, - "End": true - } - } -} diff --git a/tests/asl_Map_short/wfms_example_test.data b/tests/asl_Map_short/wfms_example_test.data deleted file mode 100644 index 0a94747d..00000000 --- a/tests/asl_Map_short/wfms_example_test.data +++ /dev/null @@ -1,21 +0,0 @@ - -{ -"test_name": "Map State Example Test", - -"workflow_name": "wfms_example_test/wfms_example_test.json", - -"event": {"ship-date": "2016-03-14T01:59:00Z", - "detail": - {"delivery-partner": "UQS", - "shipped": [ - { "prod": "R31", "dest-code": 9511, "quantity": 1344 }, - { "prod": "S39", "dest-code": 9511, "quantit_y": 40 }, - { "prod": "R31", "dest-code": 9833, "quantity": 12 }, - { "prod": "R40", "dest-code": 9860, "quantity": 887 }, - { "prod": "R40", "dest-code": 9511, "quantity": 1220 } - ] - } - }, -"expectedResponse": {"detail": {"shipped": ["All keys are OK!", "item OK!", "All keys are OK!", "All keys are OK!", "All keys are OK!"]}} -} - diff --git a/tests/asl_Map_short/wfms_example_test/ship-val.py b/tests/asl_Map_short/wfms_example_test/ship-val.py deleted file mode 100644 index 493a0e3a..00000000 --- a/tests/asl_Map_short/wfms_example_test/ship-val.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/python -""" -{ - "parcel": { - "prod": "R31", - "dest-code": 9511, - "quantity": 1344 - }, - "courier": "UQS" -} -""" - -def handle(event, context): - - ret = "item NOK!" - if ("courier" in event and "parcel" in event): # just check for the keys - ret = "item OK!" - par = event["parcel"] - if "prod" in par.keys() and "dest-code" in par.keys() and "quantity" in par.keys(): - ret = "All keys are OK!" - - return ret - diff --git a/tests/asl_Map_short/wfms_example_test/wfms_example_test.json b/tests/asl_Map_short/wfms_example_test/wfms_example_test.json deleted file mode 100644 index 6602a549..00000000 --- a/tests/asl_Map_short/wfms_example_test/wfms_example_test.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "StartAt": "Validate-All", - "States": { - "Validate-All": { - "End": true, - "Type": "Map", - "InputPath": "$.detail", - "ItemsPath": "$.shipped", - "ResultPath": "$.detail.shipped", - "Parameters": { - "parcel.$": "$$.Map.Item.Value", - "courier.$": "$.delivery-partner" - }, - "MaxConcurrency": 0, - "Iterator": { - "StartAt": "ship-val", - "States": { - "ship-val": {"Type": "Task", "Resource": "ship-val", "End": true}} -} -} -} -} diff --git a/tests/asl_Map_short/wfms_hardcoded_test.data b/tests/asl_Map_short/wfms_hardcoded_test.data deleted file mode 100644 index 5f8bb520..00000000 --- a/tests/asl_Map_short/wfms_hardcoded_test.data +++ /dev/null @@ -1,6 +0,0 @@ -{ -"test_name": "Map State Hardcoded Input Test", -"workflow_name": "wfms_hardcoded_test/wfms_hardcoded_test.json", -"event" : "trigger map state input", -"expectedResponse" : [{"Value": "Iterate", "Index": 0}, {"Value": "Over", "Index": 1}, {"Value": "This", "Index": 2}, {"Value": "Array", "Index": 3}] -} diff --git a/tests/asl_Map_short/wfms_hardcoded_test/wfms_hardcoded_test.json b/tests/asl_Map_short/wfms_hardcoded_test/wfms_hardcoded_test.json deleted file mode 100644 index efa81e28..00000000 --- a/tests/asl_Map_short/wfms_hardcoded_test/wfms_hardcoded_test.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "Comment": "An example of the Amazon States Language using a map state to process elements of an array with a max concurrency of 2.", - "StartAt": "HardcodedInputsState", - "States": { - "HardcodedInputsState": { - "Type": "Pass", - "Result": { - "array": [ - "Iterate", - "Over", - "This", - "Array" - ] - }, - "Next": "Map" - }, - "Map": { - "Type": "Map", - "ItemsPath": "$.array", - "MaxConcurrency": 0, - "Parameters": { - "Value.$": "$$.Map.Item.Value", - "Index.$": "$$.Map.Item.Index" - }, - "Next": "FinalState", - "Iterator": { - "StartAt": "You", - "States": { - "You": { - "Type": "Pass", - "Next": "Can" - }, - "Can": { - "Type": "Pass", - "Next": "Do" - }, - "Do": { - "Type": "Pass", - "Next": "Anything" - }, - "Anything": { - "Type": "Pass", - "End": true - } - } - } - }, - "FinalState": {"Type": "Pass", "End": true} - } -} - diff --git a/tests/asl_Map_short/wfms_iro_paths_processing_test.data b/tests/asl_Map_short/wfms_iro_paths_processing_test.data deleted file mode 100644 index 3894a118..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_processing_test.data +++ /dev/null @@ -1,50 +0,0 @@ -{ -"test_name": "Map State IRO Paths Processing Test", - -"workflow_name": "wfms_iro_paths_test/wfms_iro_paths_processing_test.json", - -"event": { - "orderId": "12345678", - "orderDate": "20190820101213", - "detail": { - "customerId": "1234", - "deliveryAddress": "123, Seattle, WA", - "deliverySpeed": "1-day", - "paymentMethod": "aCreditCard", - "items": [ - { - "productName": "Agile Software Development", - "category": "book", - "price": 60.0, - "quantity": 1 - }, - { - "productName": "Domain-Driven Design", - "category": "book", - "price": 32.0, - "quantity": 1 - }, - { - "productName": "The Mythical Man Month", - "category": "book", - "price": 18.0, - "quantity": 1 - }, - { - "productName": "The Art of Computer Programming", - "category": "book", - "price": 180.0, - "quantity": 1 - }, - { - "productName": "Ground Coffee, Dark Roast", - "category": "grocery", - "price": 8.0, - "quantity": 6 - } - ] - } -}, - -"expectedResponse" : {"summary": "This is a test summary!", "detail": {"processedItems": [{"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Agile Software Development", "category": "book", "price": 60.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Domain-Driven Design", "category": "book", "price": 32.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Mythical Man Month", "category": "book", "price": 18.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Art of Computer Programming", "category": "book", "price": 180.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Ground Coffee, Dark Roast", "category": "grocery", "price": 8.0, "quantity": 6}}]}} -} diff --git a/tests/asl_Map_short/wfms_iro_paths_test/checkAvailability.py b/tests/asl_Map_short/wfms_iro_paths_test/checkAvailability.py deleted file mode 100644 index 9816febc..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_test/checkAvailability.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -def handle(event, context): - - return event - diff --git a/tests/asl_Map_short/wfms_iro_paths_test/paymentFailed.py b/tests/asl_Map_short/wfms_iro_paths_test/paymentFailed.py deleted file mode 100644 index 4498557c..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_test/paymentFailed.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -def handle(event, context): - - return "payment failed!" - diff --git a/tests/asl_Map_short/wfms_iro_paths_test/prepareForDelivery.py b/tests/asl_Map_short/wfms_iro_paths_test/prepareForDelivery.py deleted file mode 100644 index 9816febc..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_test/prepareForDelivery.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -def handle(event, context): - - return event - diff --git a/tests/asl_Map_short/wfms_iro_paths_test/sendOrderSummary.py b/tests/asl_Map_short/wfms_iro_paths_test/sendOrderSummary.py deleted file mode 100644 index 357a4d63..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_test/sendOrderSummary.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/python - -def handle(event, context): - context.log(str(event)) - - #for it in event: - # it["summary"] = "test" - - return "This is a test summary!" - diff --git a/tests/asl_Map_short/wfms_iro_paths_test/startDelivery.py b/tests/asl_Map_short/wfms_iro_paths_test/startDelivery.py deleted file mode 100644 index 9816febc..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_test/startDelivery.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -def handle(event, context): - - return event - diff --git a/tests/asl_Map_short/wfms_iro_paths_test/test.py b/tests/asl_Map_short/wfms_iro_paths_test/test.py deleted file mode 100644 index 61367b76..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_test/test.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2020 The microfunctions Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Map state test motivated by stackovercloud article "New Step Functions Support for Dynamic Parallelism" -# https://www.stackovercloud.com/2019/09/18/new-step-functions-support-for-dynamic-parallelism/ - -import unittest -import os, sys -import json -import time - -sys.path.append("../") -from mfn_test_utils import MFNTest - -class MapStateHardcodedTest(unittest.TestCase): - - def test_map_state(self): - """ creates and executes the Map state test workflow from the ASL description """ - - testtuplelist = [] - - event = { - "orderId": "12345678", - "orderDate": "20190820101213", - "detail": { - "customerId": "1234", - "deliveryAddress": "123, Seattle, WA", - "deliverySpeed": "1-day", - "paymentMethod": "aCreditCard", - "items": [ - { - "productName": "Agile Software Development", - "category": "book", - "price": 60.0, - "quantity": 1 - }, - { - "productName": "Domain-Driven Design", - "category": "book", - "price": 32.0, - "quantity": 1 - }, - { - "productName": "The Mythical Man Month", - "category": "book", - "price": 18.0, - "quantity": 1 - }, - { - "productName": "The Art of Computer Programming", - "category": "book", - "price": 180.0, - "quantity": 1 - }, - { - "productName": "Ground Coffee, Dark Roast", - "category": "grocery", - "price": 8.0, - "quantity": 6 - } - ] - } -} - """ - expectedResponse = {"summary": "This is a test summary!", "processedItems": [{"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Agile Software Development", "category": "book", "price": 60.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Domain-Driven Design", "category": "book", "price": 32.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Mythical Man Month", "category": "book", "price": 18.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Art of Computer Programming", "category": "book", "price": 180.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Ground Coffee, Dark Roast", "category": "grocery", "price": 8.0, "quantity": 6}}]} - """ - - expectedResponse = {"summary": "This is a test summary!", "detail": {"processedItems": [{"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Agile Software Development", "category": "book", "price": 60.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Domain-Driven Design", "category": "book", "price": 32.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Mythical Man Month", "category": "book", "price": 18.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "The Art of Computer Programming", "category": "book", "price": 180.0, "quantity": 1}}, {"deliveryAddress": "123, Seattle, WA", "item": {"productName": "Ground Coffee, Dark Roast", "category": "grocery", "price": 8.0, "quantity": 6}}]}} - - testtuplelist.append((json.dumps(event), json.dumps(expectedResponse))) - - test = MFNTest(test_name="Map State Input Items Result Output Path Test", workflow_filename="workflow_map_state_ior_path_test.json" ) - - st = time.time() - test.exec_tests(testtuplelist) - et = time.time() - - print ("test duration (s): %s" % str(et-st)) - diff --git a/tests/asl_Map_short/wfms_iro_paths_test/validatePayment.py b/tests/asl_Map_short/wfms_iro_paths_test/validatePayment.py deleted file mode 100644 index 79363aff..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_test/validatePayment.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/python - -def handle(event, context): - event["payment"] = "Ok" - return event - diff --git a/tests/asl_Map_short/wfms_iro_paths_test/wfms_iro_paths_processing_test.json b/tests/asl_Map_short/wfms_iro_paths_test/wfms_iro_paths_processing_test.json deleted file mode 100644 index 00979d69..00000000 --- a/tests/asl_Map_short/wfms_iro_paths_test/wfms_iro_paths_processing_test.json +++ /dev/null @@ -1,79 +0,0 @@ -{ - "StartAt": "ValidatePayment", - "States": { - "ValidatePayment": { - "Type": "Task", - "Resource": "validatePayment", - "Next": "CheckPayment" - }, - "CheckPayment": { - "Type": "Choice", - "Choices": [ - { - "Not": { - "Variable": "$.payment", - "StringEquals": "Ok" - }, - "Next": "PaymentFailed" - } - ], - "Default": "ProcessAllItems" - }, - "PaymentFailed": { - "Type": "Task", - "Resource": "paymentFailed", - "End": true - }, - "ProcessAllItems": { - "Type": "Map", - "ItemsPath": "$.items", - "InputPath": "$.detail", - "MaxConcurrency": 3, - "Parameters": { - "index.$": "$$.Map.Item.Index", - "item.$": "$$.Map.Item.Value", - "deliveryAddress.$": "$.deliveryAddress" - }, - "Iterator": { - "StartAt": "CheckAvailability", - "States": { - "CheckAvailability": { - "Type": "Task", - "Resource": "checkAvailability", - "Retry": [ - { - "ErrorEquals": [ - "TimeOut" - ], - "IntervalSeconds": 1, - "BackoffRate": 2, - "MaxAttempts": 3 - } - ], - "Next": "PrepareForDelivery" - }, - "PrepareForDelivery": { - "Type": "Task", - "Resource": "prepareForDelivery", - "Next": "StartDelivery" - }, - "StartDelivery": { - "Type": "Task", - "Resource": "startDelivery", - "End": true - } - } - }, - "ResultPath": "$.detail.processedItems", - "Next": "SendOrderSummary" - }, - "SendOrderSummary": { - "Type": "Task", - "Resource": "sendOrderSummary", - "ResultPath": "$.summary", - "InputPath": "$.processedItems", - "End": true - } - } -} - diff --git a/tests/asl_Map_short/wfms_parameters_test.data b/tests/asl_Map_short/wfms_parameters_test.data deleted file mode 100644 index 4dbed3e9..00000000 --- a/tests/asl_Map_short/wfms_parameters_test.data +++ /dev/null @@ -1,21 +0,0 @@ - -{ -"test_name": "Map State Parameters Test", - -"workflow_name": "wfms_parameters_test/wfms_parameters_test.json", - -"event": [{"who": "bob"},{"who": "meg"},{"who": "joe"}], - -"expectedResponse" : {"result" :[{ - "ContextValue": { - "who": "bob" - },"ContextIndex": 0}, - { - "ContextValue": { - "who": "meg" - },"ContextIndex": 1}, - { - "ContextValue": { - "who": "joe" - }, "ContextIndex": 2}]} -} diff --git a/tests/asl_Map_short/wfms_parameters_test/wfms_parameters_test.json b/tests/asl_Map_short/wfms_parameters_test/wfms_parameters_test.json deleted file mode 100644 index fae88533..00000000 --- a/tests/asl_Map_short/wfms_parameters_test/wfms_parameters_test.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "StartAt": "ExampleMapState", - "States": { - "ExampleMapState": { - "Type": "Map", - "ResultPath": "$.result", - "Parameters": { - "ContextIndex.$": "$$.Map.Item.Index", - "ContextValue.$": "$$.Map.Item.Value" - }, - "Iterator": { - "StartAt": "TestPass", - "States": { - "TestPass": { - "Type": "Pass", - "End": true - } - } - }, - "End": true - } - } -} - diff --git a/tests/asl_Map_short/wfms_test_mc0.json b/tests/asl_Map_short/wfms_test_mc0.json deleted file mode 100644 index 924b8bd2..00000000 --- a/tests/asl_Map_short/wfms_test_mc0.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "StartAt": "PassTest1", - "States": { - "PassTest2": {"Type":"Pass", "End": true}, - "PassTest1": {"Type":"Pass", "Next":"ExampleMapState"}, - "ExampleMapState": { - "Next": "PassTest2", - "Type": "Map", - "MaxConcurrency": 0, - "Iterator": { - "StartAt": "PassTest5", - "States": { - "PassTest5": {"Type": "Pass", "Next":"PassTest3"}, - "PassTest3": {"Type": "Pass", "Next":"CallLambda"}, - "CallLambda": {"Type": "Task", "Resource": "CallLambda", "Next": "PassTest4"}, - "PassTest4": {"Type": "Pass", "End": true} -} -} -} -} -} diff --git a/tests/asl_Map_short/wfms_test_mc1.json b/tests/asl_Map_short/wfms_test_mc1.json deleted file mode 100644 index a62ee7ab..00000000 --- a/tests/asl_Map_short/wfms_test_mc1.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "StartAt": "PassTest1", - "States": { - "PassTest2": {"Type":"Pass", "End": true}, - "PassTest1": {"Type":"Pass", "Next":"ExampleMapState"}, - "ExampleMapState": { - "Next": "PassTest2", - "Type": "Map", - "MaxConcurrency": 1, - "Iterator": { - "StartAt": "PassTest5", - "States": { - "PassTest5": {"Type": "Pass", "Next":"PassTest3"}, - "PassTest3": {"Type": "Pass", "Next":"CallLambda"}, - "CallLambda": {"Type": "Task", "Resource": "CallLambda", "Next": "PassTest4"}, - "PassTest4": {"Type": "Pass", "End": true} -} -} -} -} -} diff --git a/tests/asl_Map_short/wfms_test_mc2.json b/tests/asl_Map_short/wfms_test_mc2.json deleted file mode 100644 index e69edffe..00000000 --- a/tests/asl_Map_short/wfms_test_mc2.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "StartAt": "PassTest1", - "States": { - "PassTest2": {"Type":"Pass", "End": true}, - "PassTest1": {"Type":"Pass", "Next":"ExampleMapState"}, - "ExampleMapState": { - "Next": "PassTest2", - "Type": "Map", - "MaxConcurrency": 2, - "Iterator": { - "StartAt": "PassTest5", - "States": { - "PassTest5": {"Type": "Pass", "Next":"PassTest3"}, - "PassTest3": {"Type": "Pass", "Next":"CallLambda"}, - "CallLambda": {"Type": "Task", "Resource": "CallLambda", "Next": "PassTest4"}, - "PassTest4": {"Type": "Pass", "End": true} -} -} -} -} -} diff --git a/tests/asl_Map_short/wfms_test_mc3.json b/tests/asl_Map_short/wfms_test_mc3.json deleted file mode 100644 index d19b09b1..00000000 --- a/tests/asl_Map_short/wfms_test_mc3.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "StartAt": "PassTest1", - "States": { - "PassTest2": {"Type":"Pass", "End": true}, - "PassTest1": {"Type":"Pass", "Next":"ExampleMapState"}, - "ExampleMapState": { - "Next": "PassTest2", - "Type": "Map", - "MaxConcurrency": 3, - "Iterator": { - "StartAt": "PassTest5", - "States": { - "PassTest5": {"Type": "Pass", "Next":"PassTest3"}, - "PassTest3": {"Type": "Pass", "Next":"CallLambda"}, - "CallLambda": {"Type": "Task", "Resource": "CallLambda", "Next": "PassTest4"}, - "PassTest4": {"Type": "Pass", "End": true} -} -} -} -} -} diff --git a/tests/asl_Map_short/wfms_thingspiratessay_test.data b/tests/asl_Map_short/wfms_thingspiratessay_test.data deleted file mode 100644 index 9af524e0..00000000 --- a/tests/asl_Map_short/wfms_thingspiratessay_test.data +++ /dev/null @@ -1,18 +0,0 @@ -{ -"test_name": "Map State Pirates Say Test", - -"workflow_name": "wfms_thingspiratessay_test/wfms_thingspiratessay_test.json", - -"event" : {"ThingsPiratesSay": [{ - "say": "Avast!"},{ - "say": "Yar!"},{ - "say": "Walk the Plank!"}], - "ThingsGiantsSay": [{ - "say": "Fee!"},{ - "say": "Fi!"},{ - "say": "Fo!"},{ - "say": "Fum!"}] - }, -"expectedResponse" : [ {"say": "Avast!" },{ "say": "Yar!" }, { "say": "Walk the Plank!" } ] -} - diff --git a/tests/asl_Map_short/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json b/tests/asl_Map_short/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json deleted file mode 100644 index 98b3cbec..00000000 --- a/tests/asl_Map_short/wfms_thingspiratessay_test/wfms_thingspiratessay_test.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "StartAt": "PiratesSay", - "States": { - "PiratesSay": { - "Type": "Map", - "ItemsPath": "$.ThingsPiratesSay", - "Iterator": { - "StartAt": "SayWord", - "States": { - "SayWord": { - "Type": "Pass", - "End": true - } - } - }, - "End": true - } - } -} diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index 39589fe7..fd65ee03 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -85,7 +85,6 @@ def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_use if gpu_usage is not None: self._settings["gpu_usage"] = gpu_usage - #self._gpu_usage = self._settings["num_gpu"] self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0) @@ -97,7 +96,7 @@ def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_use self.upload_workflow() self.deploy_workflow() - time.sleep(5) + time.sleep(15) def _get_json_file(self, filename): json_data = {} From e334db14036fdea29da359d327908504fb0e5b2c Mon Sep 17 00:00:00 2001 From: ksatzke Date: Mon, 12 Oct 2020 15:16:36 +0000 Subject: [PATCH 22/64] final adjustments to values.yaml --- deploy/helm/microfunctions/values.yaml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 42ea4ea9..4ac0a205 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -19,8 +19,8 @@ #------------------------------------------------------------------------------ # MicroFunction management workflow #------------------------------------------------------------------------------ -#imageRepo: "registry.kube-system.svc.cluster.local" -imageRepo: "localhost:5000" # kubespray deployment: imageRepo: "localhost:5000" +imageRepo: "registry.kube-system.svc.cluster.local" +#imageRepo: "localhost:5000" # kubespray deployment manager: #httpProxy: "http://:" #httpsProxy: "http://:" @@ -45,7 +45,6 @@ manager: memory: 1Gi sandbox: imagePathPython: "/microfn/sandbox" - #imagePathPython: "/microfn/sandbox_gpu" imagePathJava: "/microfn/sandbox_java" imageTag: "latest" imagePullPolicy: "Always" @@ -62,8 +61,8 @@ manager: # MicroFunction Datalayer #------------------------------------------------------------------------------ datalayer: - replicas: 1 # kubespray deployment - #replicas: 3 + #replicas: 1 # kubespray deployment + replicas: 3 imagePath: "/microfn/datalayer" imageTag: "latest" imagePullPolicy: "Always" @@ -81,8 +80,8 @@ datalayer: # Riak global data storage #------------------------------------------------------------------------------ riak: - replicas: 1 # kubespray deployment - #replicas: 3 + #replicas: 1 # kubespray deployment + replicas: 3 imagePath: "/microfn/riak" imageTag: "latest" imagePullPolicy: "Always" From 297a8e0a59aa00dc5d8f2f6fc3781fd31a41f118 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 13 Oct 2020 14:30:04 +0000 Subject: [PATCH 23/64] addressing comments from PR review, first part --- ManagementService/python/addWorkflow.py | 2 +- Sandbox/Dockerfile_gpu | 4 +- Sandbox/Dockerfile_java_gpu | 66 ------------------ Sandbox/Makefile | 2 +- mfn_sdk/mfn_sdk/mfnclient.py | 2 +- .../requirements/facer_requirements.txt | 1 + tests/asl_Face_Recognition/zips/facer.zip | Bin 97352 -> 97181 bytes .../python/requirements.txt | 1 - .../requirements/tensorf_requirements.txt} | 0 tests/mfn_test_utils.py | 4 +- 10 files changed, 8 insertions(+), 74 deletions(-) delete mode 100644 Sandbox/Dockerfile_java_gpu create mode 100644 tests/asl_Face_Recognition/requirements/facer_requirements.txt delete mode 100644 tests/asl_Tensorflow_HelloWorld/python/requirements.txt rename tests/{asl_DLIB/python/requirements.txt => asl_Tensorflow_HelloWorld/requirements/tensorf_requirements.txt} (100%) diff --git a/ManagementService/python/addWorkflow.py b/ManagementService/python/addWorkflow.py index 9731c1d1..52a22c36 100644 --- a/ManagementService/python/addWorkflow.py +++ b/ManagementService/python/addWorkflow.py @@ -19,7 +19,7 @@ def handle(value, sapi): assert isinstance(value, dict) - data = value + data = value # data may contain "null" value response = {} response_data = {} diff --git a/Sandbox/Dockerfile_gpu b/Sandbox/Dockerfile_gpu index 52ca2ef1..66b189d1 100644 --- a/Sandbox/Dockerfile_gpu +++ b/Sandbox/Dockerfile_gpu @@ -53,8 +53,8 @@ RUN cmake --build /dlib/build RUN cd /dlib; python3 /dlib/setup.py install # Install the face recognition package and tensorflow -RUN pip3 install face_recognition -RUN pip3 install tensorflow==2.1.0 +#RUN pip3 install face_recognition +#RUN pip3 install tensorflow==2.1.0 # Java (for queue service) RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless diff --git a/Sandbox/Dockerfile_java_gpu b/Sandbox/Dockerfile_java_gpu deleted file mode 100644 index 507d0975..00000000 --- a/Sandbox/Dockerfile_java_gpu +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2020 The KNIX Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#FROM ubuntu:18.04 -FROM nvidia/cuda:10.1-cudnn-devel-ubuntu18.04 - -# Install (as root) -# Base -RUN apt-get update --fix-missing -RUN apt-get -y --no-install-recommends install build-essential -RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 - -# Python -RUN apt-get -y --no-install-recommends install python3 python3-dev -RUN apt-get -y --no-install-recommends install python3-pip -RUN apt-get -y --no-install-recommends install zlib1g libssl1.0 libsasl2-2 ca-certificates - -RUN /usr/bin/python3 -m pip install --upgrade pip - -RUN /usr/bin/python3 -m pip install setuptools -RUN /usr/bin/python3 -m pip install thrift>=0.12.0 -RUN /usr/bin/python3 -m pip install anytree -RUN /usr/bin/python3 -m pip install ujsonpath -RUN /usr/bin/python3 -m pip install requests -RUN /usr/bin/python3 -m pip install retry -# remove warnings from anytree package -RUN /usr/bin/python3 -m pip install fastcache -# Needed for multi-language support (currently just Java) -RUN /usr/bin/python3 -m pip install thriftpy2 - -# Java -RUN apt-get -y --no-install-recommends install openjdk-8-jdk-headless - -RUN apt-get -y --no-install-recommends install maven - -# Add components (as mfn) -RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn -RUN mkdir /opt/mfn/logs - -COPY build/queueservice.jar /opt/mfn/ -ADD frontend/frontend /opt/mfn/frontend -ADD build/SandboxAgent.tar.gz /opt/mfn/ -ADD build/FunctionWorker.tar.gz /opt/mfn/ -ADD build/LoggingService.tar.gz /opt/mfn/ - -ADD build/JavaRequestHandler.tar.gz /opt/mfn/ - -RUN chmod +x /opt/mfn/JavaRequestHandler/setup_maven.sh -RUN /opt/mfn/JavaRequestHandler/./setup_maven.sh True -RUN mvn -Duser.home=/tmp -DskipTests -gs /opt/mfn/JavaRequestHandler/maven/sandbox-mvn-settings.xml -f /opt/mfn/JavaRequestHandler/maven/init-mvn.pom.xml dependency:resolve-plugins - -RUN chown mfn:mfn -R /opt/mfn -USER mfn -WORKDIR /opt/mfn -CMD ["python3", "/opt/mfn/SandboxAgent/sandboxagent.py"] diff --git a/Sandbox/Makefile b/Sandbox/Makefile index 54454208..749940c1 100644 --- a/Sandbox/Makefile +++ b/Sandbox/Makefile @@ -116,7 +116,7 @@ image_java: \ build/SandboxAgent.tar.gz $(call build_image,Dockerfile_java,microfn/sandbox_java) -push: image image_java +push: image image_gpu image_java $(call push_image,microfn/sandbox) $(call push_image,microfn/sandbox_gpu) $(call push_image,microfn/sandbox_java) diff --git a/mfn_sdk/mfn_sdk/mfnclient.py b/mfn_sdk/mfn_sdk/mfnclient.py index 394b2933..463b6837 100644 --- a/mfn_sdk/mfn_sdk/mfnclient.py +++ b/mfn_sdk/mfn_sdk/mfnclient.py @@ -449,7 +449,7 @@ def _get_state_names_and_resource(self, desired_state_type, wf_dict): return state_list - def add_workflow(self,name,filename=None, gpu_usage="None"): + def add_workflow(self,name,filename=None, gpu_usage=None): """ add a workflow returns an existing workflow if the name exists, registers a new workflow name if it doesn't exist diff --git a/tests/asl_Face_Recognition/requirements/facer_requirements.txt b/tests/asl_Face_Recognition/requirements/facer_requirements.txt new file mode 100644 index 00000000..23d75aa7 --- /dev/null +++ b/tests/asl_Face_Recognition/requirements/facer_requirements.txt @@ -0,0 +1 @@ +face_recognition diff --git a/tests/asl_Face_Recognition/zips/facer.zip b/tests/asl_Face_Recognition/zips/facer.zip index aca9d5b9a8e3b514369f478cfb926512e5694822..d7e982551c80a57021597e4434a255855526fc39 100644 GIT binary patch delta 37 ncmX@{gLUqA)`l&NZ>pv{Rx{2OVP#_g0wy4w3#4CFF#<^d0Nn}@ delta 207 zcmbRHo%O^I)`l&NZ>oZsS-2RO7#J9g-A)GhIagV81KA)fz#zj=lv-GtS(KWanpaY+ zS5i?D8p6rItmJz?&NKRcTxkV210%~TW(Ec@QIeWhoL`idlV8p?{Z$p?{47Q$Ic8i& nO8_lkU| Date: Tue, 20 Oct 2020 14:17:15 +0200 Subject: [PATCH 24/64] adding ansible inventory group for GPU host configuration --- deploy/ansible/README.md | 1 + deploy/ansible/inventory.cfg.sample | 2 ++ deploy/ansible/scripts/install-nvidia-docker.sh | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/deploy/ansible/README.md b/deploy/ansible/README.md index 4057560e..fdfed5b2 100644 --- a/deploy/ansible/README.md +++ b/deploy/ansible/README.md @@ -110,6 +110,7 @@ To setup KNIX on localhost, or a single remote host, or a cluster of hosts # For a cluster of hosts (preferably 3 or more), all host names must be added to [riak] group. # Only one host name (referred to as the ) should be added for other groups [elasticsearch], [management], [frontend], and [nginx]. + # Hosts with a NVIDIA GPU should be added to group [gpu_workstations] so that they can be used by KNIX workflow deployments. ``` 3. Update `settings.json` diff --git a/deploy/ansible/inventory.cfg.sample b/deploy/ansible/inventory.cfg.sample index b02de809..524d6bbd 100644 --- a/deploy/ansible/inventory.cfg.sample +++ b/deploy/ansible/inventory.cfg.sample @@ -10,3 +10,5 @@ knix-test [nginx] knix-test +[gpu_workstations] +gpuhost diff --git a/deploy/ansible/scripts/install-nvidia-docker.sh b/deploy/ansible/scripts/install-nvidia-docker.sh index 7149ecf8..c60d6e89 100755 --- a/deploy/ansible/scripts/install-nvidia-docker.sh +++ b/deploy/ansible/scripts/install-nvidia-docker.sh @@ -16,7 +16,7 @@ git clone https://github.com/NVIDIA/ansible-role-nvidia-docker.git ansible-role-nvidia-docker cd ansible-role-nvidia-docker cp ../installnvidiadocker.yaml tests/installnvidiadocker.yaml -ansible-playbook --inventory ../inventory.cfg tests/installnvidiadocker.yaml +ansible-playbook gpu_workstations --inventory ../inventory.cfg tests/installnvidiadocker.yaml cd .. rm -r -f ansible-role-nvidia-docker From 6c3efd5a73909c23dea4926e2ffc562c44827c08 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 5 Nov 2020 10:16:53 +0000 Subject: [PATCH 25/64] fixing errors in GPU deployment description --- deploy/helm/microfunctions/README_GPU_Installation.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/deploy/helm/microfunctions/README_GPU_Installation.md b/deploy/helm/microfunctions/README_GPU_Installation.md index 908c4fec..23238259 100644 --- a/deploy/helm/microfunctions/README_GPU_Installation.md +++ b/deploy/helm/microfunctions/README_GPU_Installation.md @@ -166,7 +166,7 @@ Note: This drop-in only works with kubeadm and kubelet v1.11+ ```bat [Service] -Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf –kubeconfig=/etc/kubernetes/kubelet.conf" +Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf -–kubeconfig=/etc/kubernetes/kubelet.conf" Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true" @@ -179,8 +179,12 @@ EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env EnvironmentFile=-/etc/default/kubelet ExecStart= -ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGSOn the GPU node, reload and restart kubelet to apply previous changes to the configuration. +ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS +``` + +On the GPU node, reload and restart kubelet to apply previous changes to the configuration. +```bat sudo systemctl daemon-reload sudo systemctl restart kubelet ``` From eaae6be3e7b1c6e5ccff96ebce16d978b44cdf85 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 5 Nov 2020 10:26:31 +0000 Subject: [PATCH 26/64] fixing errors in GPU deployment description --- deploy/helm/microfunctions/README_GPU_Installation.md | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy/helm/microfunctions/README_GPU_Installation.md b/deploy/helm/microfunctions/README_GPU_Installation.md index 23238259..2589c817 100644 --- a/deploy/helm/microfunctions/README_GPU_Installation.md +++ b/deploy/helm/microfunctions/README_GPU_Installation.md @@ -180,6 +180,7 @@ EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env EnvironmentFile=-/etc/default/kubelet ExecStart= ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS + ``` On the GPU node, reload and restart kubelet to apply previous changes to the configuration. From f7070105e25c3d5ae3ffb28028bc39bd67c894c4 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 5 Nov 2020 10:36:46 +0000 Subject: [PATCH 27/64] fixing errors in GPU deployment description --- deploy/helm/microfunctions/README_GPU_Installation.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/helm/microfunctions/README_GPU_Installation.md b/deploy/helm/microfunctions/README_GPU_Installation.md index 2589c817..578ce849 100644 --- a/deploy/helm/microfunctions/README_GPU_Installation.md +++ b/deploy/helm/microfunctions/README_GPU_Installation.md @@ -254,7 +254,8 @@ kubectl label nodes gpuhost accelerator=nvidia-gtx-1050 ``` 13. To check nodes for accelerator label, run -```bat kubectl get nodes -L accelerator +```bat +kubectl get nodes -L accelerator ``` on Kubernetes master. From 46577ada8cc0edb70d16d1613a89069b159f42a6 Mon Sep 17 00:00:00 2001 From: li80211 Date: Wed, 11 Nov 2020 12:18:50 +0100 Subject: [PATCH 28/64] 1st cut on API modifications to allow configuration of mfn GPU requirements with GUI --- GUI/app/pages/functions/FunctionTableCtrl.js | 6 +++--- GUI/app/pages/workflows/WorkflowImportCtrl.js | 2 +- ManagementService/python/addFunction.py | 1 + .../schema/mfndata-workflow-example.json | 4 +++- mfn_sdk/mfn_sdk/mfnclient.py | 12 ++++++------ tests/mfn_test_utils.py | 2 +- 6 files changed, 15 insertions(+), 12 deletions(-) diff --git a/GUI/app/pages/functions/FunctionTableCtrl.js b/GUI/app/pages/functions/FunctionTableCtrl.js index ae824cf9..d06e741b 100644 --- a/GUI/app/pages/functions/FunctionTableCtrl.js +++ b/GUI/app/pages/functions/FunctionTableCtrl.js @@ -636,7 +636,7 @@ }, - data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : mFunction.name, "runtime" : mFunction.runtime } } }) + data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage": mFunction.gpu_usage } } }) } $http(req).then(function successCallback(response) { @@ -646,7 +646,7 @@ console.log('new function id:' + response.data.data.function.id); toastr.success('Your function has been created successfully!'); $scope.reloadFunctions(); - $scope.open('app/pages/functions/modals/codeEditorModal.html', 'lg', mFunction.id, mFunction.name, mFunction.status, mFunction.runtime); + $scope.open('app/pages/functions/modals/codeEditorModal.html', 'lg', mFunction.id, mFunction.name, mFunction.status, mFunction.runtime, mFunction.gpu_usage); } else { console.log("Failure status returned by addFunction"); @@ -690,7 +690,7 @@ }, - data: JSON.stringify({ "action" : "modifyFunction", "data" : { "user" : { "token" : token } , "function" : { "id": mFunction.id, "name" : mFunction.name, "runtime" : mFunction.runtime } } }) + data: JSON.stringify({ "action" : "modifyFunction", "data" : { "user" : { "token" : token } , "function" : { "id": mFunction.id, "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage" : mFunction.gpu_usage } } }) } $http(req).then(function successCallback(response) { diff --git a/GUI/app/pages/workflows/WorkflowImportCtrl.js b/GUI/app/pages/workflows/WorkflowImportCtrl.js index 5c5f43e0..8842e251 100644 --- a/GUI/app/pages/workflows/WorkflowImportCtrl.js +++ b/GUI/app/pages/workflows/WorkflowImportCtrl.js @@ -658,7 +658,7 @@ }, - data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : functionName, "runtime" : functionRuntime } } }) + data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : functionName, "runtime" : functionRuntime, "gpu_usage": functionGpuusage } } }) } $http(req).then(function successCallback(response) { diff --git a/ManagementService/python/addFunction.py b/ManagementService/python/addFunction.py index 8e7c52ae..2ce4f372 100644 --- a/ManagementService/python/addFunction.py +++ b/ManagementService/python/addFunction.py @@ -42,6 +42,7 @@ def handle(value, sapi): else: f["name"] = function["name"] f["runtime"] = function["runtime"] + f["gpu_usage"] = function["gpu_usage"] f["modified"] = time.time() f["id"] = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest() diff --git a/ManagementService/schema/mfndata-workflow-example.json b/ManagementService/schema/mfndata-workflow-example.json index 6f0a0933..97696a55 100644 --- a/ManagementService/schema/mfndata-workflow-example.json +++ b/ManagementService/schema/mfndata-workflow-example.json @@ -52,6 +52,7 @@ "addFunction": { "name": "addFunction", "runtime": "Python 3.7", + "gpu_usage": "0.", "id": "00000000-0000-0000-0000-222222222222", "modified": 1581498771.096527 } @@ -92,6 +93,7 @@ "name":"addFunction", "id":"00000000-0000-0000-0000-222222222222", "runtime":"Python 3.7", + "gpu_usage":"0.", "type": "code", "ref": "grain_source_00000000-0000-0000-0000-222222222222" } @@ -160,4 +162,4 @@ "sbox_11111111-1111-1111-1111-111111111111;wf_maps_11111111-1111-1111-1111-111111111111": {}, "sbox_11111111-1111-1111-1111-111111111111;wf_sets_11111111-1111-1111-1111-111111111111": {}, "sbox_11111111-1111-1111-1111-111111111111;wf_counters_11111111-1111-1111-1111-111111111111": {} -} \ No newline at end of file +} diff --git a/mfn_sdk/mfn_sdk/mfnclient.py b/mfn_sdk/mfn_sdk/mfnclient.py index 463b6837..43030c23 100644 --- a/mfn_sdk/mfn_sdk/mfnclient.py +++ b/mfn_sdk/mfn_sdk/mfnclient.py @@ -330,14 +330,14 @@ def find_function(self,name): return res[0] @deprecated(reason="Grains have been renamed to functions, use add_function(..) instead") - def addGrain(self,name,runtime='Python 3.6'): - return self.add_function(name,runtime) + def addGrain(self,name,runtime='Python 3.6',gpu_usage="0."): + return self.add_function(name,runtime,gpu_usage) @deprecated(reason="Grains have been renamed to functions, use add_function(..) instead") - def add_grain(self,name,runtime='Python 3.6'): - return self.add_function(name, runtime) + def add_grain(self,name,runtime='Python 3.6',gpu_usage="0."): + return self.add_function(name, runtime, gpu_usage) - def add_function(self,name,runtime='Python 3.6'): + def add_function(self,name,runtime='Python 3.6',gpu_usage="0."): """ add a function returns an existing function if the name exists, registers a new function name if it doesn't exist @@ -346,7 +346,7 @@ def add_function(self,name,runtime='Python 3.6'): for f in self.functions: if f._name == name: return f - data = self.action('addFunction',{'function':{'name':name,'runtime':runtime}}) + data = self.action('addFunction',{'function':{'name':name,'runtime':runtime, 'gpu_usage': gpu_usage}}) gd = data['function'] f = Function(self,gd) self._functions.append(f) diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index 10f4f5db..4703975e 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -226,7 +226,7 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No else: print("ERROR: invalid workflow description.") assert False - + print("RESOURCE_INFO_MAPi: " + json.dumps(resource_info_map)) return resource_info_map def _delete_resource_if_existing(self, existing_resources, resource_name): From dd3a1c381a794923691a8b96f8628a237871bcf5 Mon Sep 17 00:00:00 2001 From: abeckn Date: Wed, 11 Nov 2020 17:54:54 +0000 Subject: [PATCH 29/64] adding GUI support for indicating assigned GPU cores in function table --- GUI/app/pages/functions/FunctionTableCtrl.js | 6 +++--- GUI/app/pages/functions/widgets/editableRowTable.html | 8 +++++++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/GUI/app/pages/functions/FunctionTableCtrl.js b/GUI/app/pages/functions/FunctionTableCtrl.js index d06e741b..0ef7094d 100644 --- a/GUI/app/pages/functions/FunctionTableCtrl.js +++ b/GUI/app/pages/functions/FunctionTableCtrl.js @@ -171,7 +171,6 @@ return selected.length ? mFunction.runtime : 'Not set'; }; - function createTemporaryWorkflow(functionIndex) { var req = { method: 'POST', @@ -636,7 +635,7 @@ }, - data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage": mFunction.gpu_usage } } }) + data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage": mFunction.gpu_cores } } }) } $http(req).then(function successCallback(response) { @@ -690,7 +689,7 @@ }, - data: JSON.stringify({ "action" : "modifyFunction", "data" : { "user" : { "token" : token } , "function" : { "id": mFunction.id, "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage" : mFunction.gpu_usage } } }) + data: JSON.stringify({ "action" : "modifyFunction", "data" : { "user" : { "token" : token } , "function" : { "id": mFunction.id, "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage" : mFunction.gpu_cores } } }) } $http(req).then(function successCallback(response) { @@ -736,6 +735,7 @@ name: '', status: 'undeployed', runtime: 'Python 3.6', + gpu_usage: '0', modified: '0' }; $scope.functions.push($scope.inserted); diff --git a/GUI/app/pages/functions/widgets/editableRowTable.html b/GUI/app/pages/functions/widgets/editableRowTable.html index 8712e8ab..c7dd3f84 100644 --- a/GUI/app/pages/functions/widgets/editableRowTable.html +++ b/GUI/app/pages/functions/widgets/editableRowTable.html @@ -24,6 +24,7 @@ Name Runtime + Assigned GPU Cores Last Modified Actions @@ -45,6 +46,11 @@ {{ showRuntime(mFunction) }} + + + {{ mFunction.gpu_usage || 'Not set' }} + + {{ showLastModified(mFunction) }} @@ -66,7 +72,7 @@

- + From 4d734faf14a7ac5db315a96f40efd91ff0e5b980 Mon Sep 17 00:00:00 2001 From: abeckn Date: Wed, 11 Nov 2020 17:59:42 +0000 Subject: [PATCH 30/64] adding GUI support for indicating assigned GPU cores in function table (bugfix) --- GUI/app/pages/functions/FunctionTableCtrl.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GUI/app/pages/functions/FunctionTableCtrl.js b/GUI/app/pages/functions/FunctionTableCtrl.js index 0ef7094d..f3b16a23 100644 --- a/GUI/app/pages/functions/FunctionTableCtrl.js +++ b/GUI/app/pages/functions/FunctionTableCtrl.js @@ -635,7 +635,7 @@ }, - data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage": mFunction.gpu_cores } } }) + data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage": mFunction.gpu_usage } } }) } $http(req).then(function successCallback(response) { @@ -689,7 +689,7 @@ }, - data: JSON.stringify({ "action" : "modifyFunction", "data" : { "user" : { "token" : token } , "function" : { "id": mFunction.id, "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage" : mFunction.gpu_cores } } }) + data: JSON.stringify({ "action" : "modifyFunction", "data" : { "user" : { "token" : token } , "function" : { "id": mFunction.id, "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage" : mFunction.gpu_usage } } }) } $http(req).then(function successCallback(response) { From 232b3c0f3acc0667b6d47d3684bffe28b5040280 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Mon, 16 Nov 2020 12:55:00 +0000 Subject: [PATCH 31/64] fixing bug in deployWorkflow choosing the wrong sandbox image --- ManagementService/python/deployWorkflow.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 4a6350e8..4a2bf1ed 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -221,11 +221,11 @@ def start_docker_sandbox(host_to_deploy, uid, sid, wid, wname, sandbox_image_nam try: print("Starting sandbox docker container for: " + uid + " " + sid + " " + wid + " " + sandbox_image_name) print("Docker daemon: " + "tcp://" + host_to_deploy[1] + ":2375" + ", environment variables: " + str(env_vars)) - #client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc, runtime="nvidia") - client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc) - #client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc) + if sandbox_image_name.endswith("gpu"): + client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc, runtime="nvidia") + else: + client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, auto_remove=True, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc) # TEST/DEVELOPMENT: no auto_remove to access sandbox logs - #client.containers.run(sandbox_image_name, init=True, detach=True, ports={"8080/tcp": None}, ulimits=ulimit_list, name=sid, environment=env_vars, extra_hosts={host_to_deploy[0]:host_to_deploy[1]}, log_config=lc) except Exception as exc: print("Error launching sandbox: " + str(host_to_deploy) + " " + uid + " " + sid + " " + wid) print(traceback.format_exc()) From d494c862c201f39a16325ee120141823a3f91787 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Mon, 16 Nov 2020 13:45:36 +0000 Subject: [PATCH 32/64] fixing bugs on GPU configuration in Management functions --- ManagementService/python/deployWorkflow.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 4a2bf1ed..b3d7765f 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -537,14 +537,11 @@ def handle(value, sapi): else: # We're running BARE METAL mode # _XXX_: due to the queue service still being in java in the sandbox - gpu_usage=0 - runtime="Python" # HARDCODED if gpu_usage == 0: sandbox_image_name = "microfn/sandbox" # default value elif gpu_usage != 0 and runtime == "Python": sandbox_image_name = "microfn/sandbox_gpu" # sandbox uses GPU - if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map): sandbox_image_name = "microfn/sandbox_java" @@ -564,7 +561,7 @@ def handle(value, sapi): gpu_hosts[hostname] = hostip # instruct hosts to start the sandbox and deploy workflow - if runtime=="Java" or sandbox_image_name == "microfn/sandbox": # can use any host + if sandbox_image_name == "microfn/sandbox" or sandbox_image_name=="microfn/sandbox_java": # can use any host picked_hosts = hosts elif len(gpu_hosts) > 0: picked_hosts = gpu_hosts From a6f84a8802fdf37f918a0d001a294e8c3a6e0b6c Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 17 Nov 2020 08:35:02 +0000 Subject: [PATCH 33/64] debug GPU parameter modifucation via GUI --- ManagementService/python/modifyFunction.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ManagementService/python/modifyFunction.py b/ManagementService/python/modifyFunction.py index 17d03fbe..52ed8f3f 100644 --- a/ManagementService/python/modifyFunction.py +++ b/ManagementService/python/modifyFunction.py @@ -49,7 +49,8 @@ def handle(value, sapi): f["name"] = function["name"] f["runtime"] = function["runtime"] f["modified"] = time.time() - + f["gpu_usage"] = function["gpu_usage"] + sapi.put(email + "_grain_" + function["id"], json.dumps(f), True, True) sapi.put(email + "_list_grains", json.dumps(functions), True, True) From 9055a308b7a93cf9cdcf0d35bbf44e776d7566d6 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Wed, 18 Nov 2020 11:03:21 +0100 Subject: [PATCH 34/64] cleanup, tests are passing on gpu machine --- ManagementService/python/deployWorkflow.py | 2 +- deploy/ansible/init_once.yaml | 14 +++++++------- .../helm/microfunctions/README_GPU_Installation.md | 6 +++--- tests/mfn_test_utils.py | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index b3d7765f..75707910 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -540,7 +540,7 @@ def handle(value, sapi): if gpu_usage == 0: sandbox_image_name = "microfn/sandbox" # default value - elif gpu_usage != 0 and runtime == "Python": + elif gpu_usage > 0: sandbox_image_name = "microfn/sandbox_gpu" # sandbox uses GPU if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map): sandbox_image_name = "microfn/sandbox_java" diff --git a/deploy/ansible/init_once.yaml b/deploy/ansible/init_once.yaml index 0e17e8ba..3d7db84f 100644 --- a/deploy/ansible/init_once.yaml +++ b/deploy/ansible/init_once.yaml @@ -153,13 +153,13 @@ ExecStart= ExecStart=/usr/bin/dockerd -H unix:// -H tcp://0.0.0.0:2375 - - name: override configuration for docker daemon to disable tls - copy: - dest: /etc/docker/daemon.json - content: | - { - "tls": false - } +# - name: override configuration for docker daemon to disable tls +# copy: +# dest: /etc/docker/daemon.json +# content: | +# { +# "tls": false +# } - name: configure docker proxy copy: diff --git a/deploy/helm/microfunctions/README_GPU_Installation.md b/deploy/helm/microfunctions/README_GPU_Installation.md index 578ce849..6ff132dc 100644 --- a/deploy/helm/microfunctions/README_GPU_Installation.md +++ b/deploy/helm/microfunctions/README_GPU_Installation.md @@ -55,17 +55,17 @@ sudo apt-get install -y \ ca-certificates \ curl \ software-properties-common -sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - sudo add-apt-repository \ "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \ $(lsb_release -cs) \ stable" -sudo apt-get update && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 19.03 | head -1 | awk '{print $3}') +sudo apt-get update && sudo apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 19.03 | head -1 | awk '{print $3}') ``` **Docker installation test** ```bat -ksatzke@gpuhost:~$ docker –version +ksatzke@gpuhost:~$ docker -–version Docker version 19.03.11, build 42e35e61f3 ``` diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index 0d2b5c62..2d105e7e 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -226,7 +226,7 @@ def _get_resource_info_map(self, workflow_description=None, resource_info_map=No else: print("ERROR: invalid workflow description.") assert False - print("RESOURCE_INFO_MAPi: " + json.dumps(resource_info_map)) + #print("RESOURCE_INFO_MAP: " + json.dumps(resource_info_map)) return resource_info_map def _delete_resource_if_existing(self, existing_resources, resource_name): From ecc1b8fe62c28652055e66e3c6cd98a3ab712eab Mon Sep 17 00:00:00 2001 From: ksatzke Date: Wed, 18 Nov 2020 17:31:49 +0100 Subject: [PATCH 35/64] first cut on logic to deduce sandbox GPU requirements from function deployment info via GUI --- ManagementService/python/deployWorkflow.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 75707910..e67f2ca2 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -152,6 +152,9 @@ def compile_resource_info_map(resource_names, uploaded_resources, email, sapi, d if "runtime" in resource_metadata: resource_info["runtime"] = resource_metadata["runtime"] + if "gpu_usage" in resource_metadata: + resource_info["gpu_usage"] = resource_metadata["gpu_usage"] + num_chunks_str = dlc.get("grain_source_zip_num_chunks_" + resource_id) try: num_chunks = int(num_chunks_str) @@ -504,13 +507,21 @@ def handle(value, sapi): #dlc.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info)) # _XXX_: important! # put must not be queued as the function currently waits for the container to become ready - + # case 1: gpu_usage is explicitly set in workflow metadata if "gpu_usage" in wfmeta and wfmeta["gpu_usage"] != "None": gpu_usage = float(wfmeta["gpu_usage"]) else: gpu_usage = 0. - print("deduced gpu_usage: " + str(gpu_usage)) + print("deduced gpu_usage from workflow metadata: " + str(gpu_usage)) + + print("print deployment_info[resources] to evaluate: " + str(deployment_info["resources"])) + # case 2: gpu_usage is set in deployment info + for res in deployment_info["resources"]: + if "gpu_usage" in deployment_info["resources"][res].keys(): + result_gpu = float(deployment_info["resources"][res]["gpu_usage"]) + if result_gpu > 0.: + gpu_usage = result_gpu sapi.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info), True, False) @@ -537,7 +548,7 @@ def handle(value, sapi): else: # We're running BARE METAL mode # _XXX_: due to the queue service still being in java in the sandbox - + print("gpu_usage before decision:" + str(gpu_usage)) if gpu_usage == 0: sandbox_image_name = "microfn/sandbox" # default value elif gpu_usage > 0: @@ -555,6 +566,7 @@ def handle(value, sapi): picked_hosts = {} for hostname in hosts: + print("current host: " + str(hosts[hostname])) #if hostname.endswith("_gpu"): if "has_gpu" in hosts[hostname]: hostip = hosts[hostname] From 3fbcfb1d541f9ffc6d8d5f4e767664b69e9d9ed8 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Wed, 18 Nov 2020 18:10:07 +0100 Subject: [PATCH 36/64] cleaning asl_Tensorflow_HelloWorld test --- tests/asl_Tensorflow_HelloWorld/python/tensorf.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/asl_Tensorflow_HelloWorld/python/tensorf.py b/tests/asl_Tensorflow_HelloWorld/python/tensorf.py index fa131a3a..731e510b 100644 --- a/tests/asl_Tensorflow_HelloWorld/python/tensorf.py +++ b/tests/asl_Tensorflow_HelloWorld/python/tensorf.py @@ -12,15 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -#import json +import tensorflow as tf + def handle(event, context): - import tensorflow as tf # Simple hello world using TensorFlow x = [[2.]] + hello = tf.constant('Hello, TensorFlow!') print('tensorflow version', tf.__version__) print('hello, {}'.format(tf.matmul(x, x))) + #return "Hello from Tensorflow " + str(tf.__version__) #return "GPU available: " + str(tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None)) return "GPU available: " + str(tf.test.is_built_with_cuda()) From 99e04b030945c46b276849dbe47ee7fb5aa7c0d1 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Mon, 30 Nov 2020 09:21:37 +0000 Subject: [PATCH 37/64] adapted values.yaml to lab testbed vagrant conf --- deploy/helm/microfunctions/values.yaml | 28 ++++++++++++++------------ 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index da6715d3..fd6b6822 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -19,12 +19,12 @@ #------------------------------------------------------------------------------ # MicroFunction management workflow #------------------------------------------------------------------------------ -imageRepo: "registry.kube-system.svc.cluster.local" -#imageRepo: "localhost:5000" # kubespray deployment +#imageRepo: "registry.kube-system.svc.cluster.local" +imageRepo: "localhost:5000" # kubespray deployment manager: - #httpProxy: "http://:" - #httpsProxy: "http://:" - httpGatewayPort: 30336 # kubespray deployment + #httpProxy: "http://192.109.76.93:8080" + httpsProxy: "http://192.109.76.93:8080" + #httpGatewayPort: 31380 # kubespray deployment nameOverride: "microfunctions" newWorkflow: hpa: @@ -61,8 +61,8 @@ manager: # MicroFunction Datalayer #------------------------------------------------------------------------------ datalayer: - #replicas: 1 # kubespray deployment - replicas: 3 + replicas: 1 # kubespray deployment + #replicas: 3 imagePath: "/microfn/datalayer" imageTag: "latest" imagePullPolicy: "Always" @@ -80,8 +80,8 @@ datalayer: # Riak global data storage #------------------------------------------------------------------------------ riak: - #replicas: 1 # kubespray deployment - replicas: 3 + replicas: 1 # kubespray deployment + #replicas: 3 imagePath: "/microfn/riak" imageTag: "latest" imagePullPolicy: "Always" @@ -90,10 +90,10 @@ riak: ClientPortProtobuf: 8087 resources: limits: - cpu: 4 + cpu: 2 memory: 8Gi requests: - cpu: 4 + cpu: 2 memory: 8Gi DataStorage: 8Gi #DataStorageClass: local-volume @@ -106,8 +106,8 @@ riak: LevelDbMaximumMemory: 4294967296 AntiEntropy: "passive" # Should be <= resources.limits.cpu - ErlangSchedulersTotal: 4 - ErlangSchedulersOnline: 4 + ErlangSchedulersTotal: 2 + ErlangSchedulersOnline: 2 ErlangSchedulersForceWakeupInterval: 500 ErlangSchedulersCompactionOfLoad: "false" @@ -139,6 +139,8 @@ nginx: # Please use --set nginx.managementService to specify the FQDN that the service will get from your Knative setup # _helpers.tpl would default it to wf-mfn1-management.knix.example.com #managementService: wf-mfn1-management.knix.example.com + managementService: wf-mfn-management.default.192.168.8.161.xip.io + # wf-mfn-management.default.192.168.8.161.xip.io imagePath: "/microfn/nginx" imageTag: "latest" httpPort: 32180 From 5db4cfbde0e5e5fdd7144a62c9b36c864dfc99f9 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 1 Dec 2020 11:30:41 +0000 Subject: [PATCH 38/64] updated values for kubespray setup --- deploy/helm/microfunctions/values.yaml | 43 ++++++++++++-------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 8ff5b86e..a1400ab0 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -19,12 +19,13 @@ #------------------------------------------------------------------------------ # MicroFunction management workflow #------------------------------------------------------------------------------ +imageRepo: "localhost:5000" #imageRepo: "registry.kube-system.svc.cluster.local" -imageRepo: "localhost:5000" # kubespray deployment manager: - #httpProxy: "http://192.109.76.93:8080" + #httpProxy: "http://:" httpsProxy: "http://192.109.76.93:8080" - #httpGatewayPort: 31380 # kubespray deployment + #httpGatewayPort: 80 + #httpsGatewayPort: 443 nameOverride: "microfunctions" newWorkflow: hpa: @@ -58,10 +59,10 @@ manager: managementSandbox: resources: limits: - cpu: 4 + cpu: 1 memory: 4Gi requests: - cpu: 2 + cpu: 1 memory: 2Gi createServiceAccounts: true @@ -69,8 +70,7 @@ manager: # MicroFunction Datalayer #------------------------------------------------------------------------------ datalayer: - replicas: 1 # kubespray deployment - #replicas: 3 + replicas: 1 imagePath: "/microfn/datalayer" imageTag: "latest" imagePullPolicy: "Always" @@ -79,18 +79,17 @@ datalayer: javaOpts: "-Xmx4096m" resources: limits: - cpu: 4 + cpu: 1 memory: 8Gi requests: - cpu: 4 + cpu: 1 memory: 8Gi #------------------------------------------------------------------------------ # Riak global data storage #------------------------------------------------------------------------------ riak: - replicas: 1 # kubespray deployment - #replicas: 3 + replicas: 1 imagePath: "/microfn/riak" imageTag: "latest" imagePullPolicy: "Always" @@ -99,12 +98,12 @@ riak: ClientPortProtobuf: 8087 resources: limits: - cpu: 2 - memory: 8Gi + cpu: 1 + memory: 4Gi requests: - cpu: 2 - memory: 8Gi - DataStorage: 8Gi + cpu: 1 + memory: 4Gi + DataStorage: 4Gi #DataStorageClass: local-volume ClusterName: "riak" WaitForErlang: 180 @@ -115,8 +114,8 @@ riak: LevelDbMaximumMemory: 4294967296 AntiEntropy: "passive" # Should be <= resources.limits.cpu - ErlangSchedulersTotal: 2 - ErlangSchedulersOnline: 2 + ErlangSchedulersTotal: 1 + ErlangSchedulersOnline: 1 ErlangSchedulersForceWakeupInterval: 500 ErlangSchedulersCompactionOfLoad: "false" @@ -147,9 +146,7 @@ nginx: # In most installations, it would be {ksvc}.{namespace}.example.com # Please use --set nginx.managementService to specify the FQDN that the service will get from your Knative setup # _helpers.tpl would default it to wf-mfn1-management.knix.example.com - #managementService: wf-mfn1-management.knix.example.com - managementService: wf-mfn-management.default.192.168.8.161.xip.io - # wf-mfn-management.default.192.168.8.161.xip.io + managementService: wf-mfn-management.default.192.168.8.151.xip.io imagePath: "/microfn/nginx" imageTag: "latest" httpPort: 32180 @@ -180,8 +177,8 @@ triggersFrontend: statusReportIntervalSec: 30 resources: limits: - cpu: 4 + cpu: 1 memory: 8Gi requests: - cpu: 2 + cpu: 1 memory: 1Gi From ff2527487241856b33c76b55f7b5130043565df3 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 10 Dec 2020 14:46:20 +0000 Subject: [PATCH 39/64] remove blocking of concurrent gpu pods in k8s deployment caused by 'limits' definition for sandbox_gpu --- ManagementService/python/deployWorkflow.py | 8 ++++---- .../microfunctions/templates/management.yaml | 6 +----- deploy/helm/microfunctions/values.yaml | 20 ++++++++++--------- tests/asl_Map/test.py | 3 ++- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 9eee1d3f..f6e50d3e 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -345,8 +345,8 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F if not management and use_gpus > 0. and runtime=="Python": # gpu using python function # overwrite values from values.yaml for new workflows - kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) - kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) + ###kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) + ###kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) imageName = kservice['spec']['template']['spec']['containers'][0]['image'] imageRepoName = imageName.split("/")[0] # kservice['spec']['template']['spec']['containers'][0]['image'] = "192.168.8.161:5000/microfn/sandbox_gpu" @@ -369,8 +369,8 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F # management container should not consume a CPU and use standard sandbox image if (labels['workflowid'] == "Management"): - kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = "0" - kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = "0" + ###kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = "0" + ###kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = "0" imageName = kservice['spec']['template']['spec']['containers'][0]['image'] imageRepoName = imageName.split("/")[0] # kservice['spec']['template']['spec']['containers'][0]['image'] = "192.168.8.161:5000/microfn/sandbox" diff --git a/deploy/helm/microfunctions/templates/management.yaml b/deploy/helm/microfunctions/templates/management.yaml index 01a74e63..2a481a4d 100644 --- a/deploy/helm/microfunctions/templates/management.yaml +++ b/deploy/helm/microfunctions/templates/management.yaml @@ -125,11 +125,7 @@ data: {{- /* Disable scale to zero with a minScale of 1. */ -}} "autoscaling.knative.dev/minScale": "1", {{- /* Limit scaling to 100 pods. */ -}} - "autoscaling.knative.dev/maxScale": "5", - {{- /* KubeShare GPU-related configurations */ -}} - "kubeshare/gpu_request": "0.4", - "kubeshare/gpu_limit": "1.0", - "kubeshare/gpu_mem": "3145728000" + "autoscaling.knative.dev/maxScale": "5" }, "labels": { "app": "microfunctions-workflow", diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 8e0bd6a2..ffff8f40 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -19,13 +19,12 @@ #------------------------------------------------------------------------------ # MicroFunction management workflow #------------------------------------------------------------------------------ -imageRepo: "192.168.8.161:5000" #imageRepo: "registry.kube-system.svc.cluster.local" +imageRepo: "192.168.8.161:5000" # kubespray deployment manager: - #httpProxy: "http://:" + #httpProxy: "http://192.109.76.93:8080" httpsProxy: "http://192.109.76.93:8080" - #httpGatewayPort: 80 - #httpsGatewayPort: 443 + #httpGatewayPort: 31380 # kubespray deployment nameOverride: "microfunctions" newWorkflow: hpa: @@ -62,7 +61,7 @@ manager: cpu: 4 memory: 4Gi requests: - cpu: 1 + cpu: 2 memory: 2Gi createServiceAccounts: true @@ -70,7 +69,8 @@ manager: # MicroFunction Datalayer #------------------------------------------------------------------------------ datalayer: - replicas: 1 + replicas: 1 # kubespray deployment + #replicas: 3 imagePath: "/microfn/datalayer" imageTag: "latest" imagePullPolicy: "Always" @@ -89,7 +89,8 @@ datalayer: # Riak global data storage #------------------------------------------------------------------------------ riak: - replicas: 1 + replicas: 1 # kubespray deployment + #replicas: 3 imagePath: "/microfn/riak" imageTag: "latest" imagePullPolicy: "Always" @@ -98,7 +99,7 @@ riak: ClientPortProtobuf: 8087 resources: limits: - cpu: 2 + cpu: 2 memory: 8Gi requests: cpu: 2 @@ -146,7 +147,8 @@ nginx: # In most installations, it would be {ksvc}.{namespace}.example.com # Please use --set nginx.managementService to specify the FQDN that the service will get from your Knative setup # _helpers.tpl would default it to wf-mfn1-management.knix.example.com - managementService: wf-mfn-management.default.192.168.8.151.xip.io + #managementService: wf-mfn1-management.knix.example.com + managementService: wf-mfn-management.default.192.168.8.161.xip.io imagePath: "/microfn/nginx" imageTag: "latest" httpPort: 32180 diff --git a/tests/asl_Map/test.py b/tests/asl_Map/test.py index 2dba98c6..0b1d57f1 100644 --- a/tests/asl_Map/test.py +++ b/tests/asl_Map/test.py @@ -49,7 +49,8 @@ def test_map_state(self): et = time.time() print ("test duration (s): %s" % str(et-st)) - for mc in range(4): # set maxConcurrency parameter + + for mc in range(0): # set maxConcurrency parameter """ creates and executes the Map state test workflow from the ASL description """ testtuplelist = [] From 5163edeb105429302001c1c75064289f7de68ed9 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 12 Jan 2021 13:51:50 +0000 Subject: [PATCH 40/64] adding support for configurable GPU core+memory sharing based on gpu-manager --- ManagementService/python/deployWorkflow.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 50c10b56..f6388d92 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -350,8 +350,18 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F imageName = kservice['spec']['template']['spec']['containers'][0]['image'] imageRepoName = imageName.split("/")[0] # kservice['spec']['template']['spec']['containers'][0]['image'] = "192.168.8.161:5000/microfn/sandbox_gpu" + # calculate requests resource parameters for gpu-manager + + gpu_core_request = str(use_gpus*100) # derived from GUI float input parameter giving core percentage + gpu_memory_request = "4" # hardoded to 4 x 245MB = 1GB kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox_gpu" - + kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) + kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['tencent.com/vcuda-memory'] = gpu_memory_request #str(use_gpus) + # calculate limits resource parameters for gpu-manager, need to identical to requests parameter + kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) + kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-memory'] = gpu_memory_request #str(use_gpus) + kservice['spec']['template']['metadata']['annotations']['tencent.com/vcuda-core-limit'] = "20" + # Special handling for the management container: never run on gpu if management: management_workflow_conf = {} From defacaf3324ef338543aa129eca6fb85709c9cf4 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Fri, 15 Jan 2021 11:34:15 +0000 Subject: [PATCH 41/64] fixing bug on GPU parameter calculations --- ManagementService/python/deployWorkflow.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index f6388d92..9d2bd338 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -351,16 +351,19 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F imageRepoName = imageName.split("/")[0] # kservice['spec']['template']['spec']['containers'][0]['image'] = "192.168.8.161:5000/microfn/sandbox_gpu" # calculate requests resource parameters for gpu-manager + + # gpu_total_memory = 7800 # hardcoded info (gtx1070), should give free GPU memory + gpu_core_request = str(int(use_gpus*100)) # derived from GUI float input parameter giving core percentage + gpu_memory_request = str(int(30 * use_gpus)) # adapted to gpu-manager memory parameter definition - gpu_core_request = str(use_gpus*100) # derived from GUI float input parameter giving core percentage - gpu_memory_request = "4" # hardoded to 4 x 245MB = 1GB kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox_gpu" + kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['tencent.com/vcuda-memory'] = gpu_memory_request #str(use_gpus) # calculate limits resource parameters for gpu-manager, need to identical to requests parameter kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-memory'] = gpu_memory_request #str(use_gpus) - kservice['spec']['template']['metadata']['annotations']['tencent.com/vcuda-core-limit'] = "20" + kservice['spec']['template']['metadata']['annotations']['tencent.com/vcuda-core-limit'] = str(int(30 * use_gpus)) #gpu_core_request #ToDo: check value # Special handling for the management container: never run on gpu if management: @@ -613,7 +616,7 @@ def handle(value, sapi): picked_hosts = gpu_hosts else: picked_hosts = hosts # fallback as there are no gpu hosts available - print("available GPU hosts is empty. Deploying on general purpose host") + print("available GPU hosts list is empty. Deploying on general purpose host") for hostname in picked_hosts: # loop over all hosts, need to pich gpu hosts for python/gpu workflows hostip = hosts[hostname] From 730e0f73af4ca5c76d03d3668998bd1cdf31651b Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 26 Jan 2021 16:28:56 +0000 Subject: [PATCH 42/64] WIP: adding logic for node GPU capacity queries to ManagementService --- ManagementService/python/deployWorkflow.py | 39 +++++++++++++++++-- Sandbox/Dockerfile_gpu | 4 +- .../microfunctions/templates/management.yaml | 10 ++--- 3 files changed, 42 insertions(+), 11 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 9d2bd338..426bdb13 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -319,7 +319,7 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F env.append({'name': 'WORKFLOWNAME', 'value': workflow_info["workflowName"]}) # apply gpu_usage fraction to k8s deployment configuration - print("GPU sage in create_k8s_service: "+ str(gpu_usage)) + print("GPU usage in create_k8s_service: "+ str(gpu_usage)) use_gpus = gpu_usage if runtime=="Java": # non gpu python function @@ -344,6 +344,38 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox" if not management and use_gpus > 0. and runtime=="Python": # gpu using python function + + + print('getting cluster node capacity info with token' + str(token)) + new_token = "eyJhbGciOiJSUzI1NiIsImtpZCI6IkZidDdPX0hyUVdRRDdob2VnU25fWkx4YWhaV1FtX29yREpUYV9iUTJhZlUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tYm16dnYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjI1MmFkNWNjLWQzNGYtNGQzMC1hZmE0LWJhODA4YzVlMGFlZiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.oMuC-xkaEcbf66B89F_fsBCV-2-5YebJffaFOPLi3_T59l7GpSD0155i_WFDKybgxxZO7uqpWA560lgdeBKfiEB9ugS66g-FKaglfqW-wHx9VIlxEwCYKhOWEORfBn3ArSe9CH2XkmC--NL12zx-2gveKHLH7pXNLzAYRywxoXMlSTsqpjE-mDa9jPrmhiyyk2nLdYWct5QwqSxSg94DRJ43FuaMwI7PXXqD1yLZLm8NEtEOSCrUgTCpOIyCis5qwjXdTAnEnu54pBd8wAV66kSez8ebXM-E-4e7K6gZQvJOITMxg-qMvA3EvuetgfADl5wTf_YLfUbv3cx2SASKMQ" + + resp = requests.get( + "https://kubernetes.default:"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/api/v1/nodes", + headers={"Authorization": "Bearer "+new_token, "Accept": "application/json"}, + verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + proxies={"https":""}) + if resp.status_code == 200: + print('queried cluster node capacities, result: ' + json.dumps(resp.json())) + data = json.loads(resp.text) + vmemory = 0 + vcore = 0 + print("received JSON data items: " + str(data["items"]) + " " + str(type(data["items"]))) + + #assert data["items"] is list # is list of dicts + for d in data["items"]: # iterate over the cluster nodes + print("item: " + str(d) + str(type(d)) ) + #assert type(d) is dict + res_capacity = d["status"]["capacity"] + print("res_capacity: " + str(res_capacity)) + if "tencent.com/vcuda-memory" in res_capacity.keys(): + vmemory += int(d["status"]["capacity"]["tencent.com/vcuda-memory"]) + vcore += int(d["status"]["capacity"]["tencent.com/vcuda-core"]) + print("found vcuda capability: " + str(vmemory) + " " + str(vcore)) + else: + print("this node has no vcuda capability!") + + print('queried cluster node capacities: vuda-memory: %s, vcuda-core: %s' % (str(vmemory), str(vcore))) + # overwrite values from values.yaml for new workflows ###kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) ###kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) @@ -355,15 +387,16 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F # gpu_total_memory = 7800 # hardcoded info (gtx1070), should give free GPU memory gpu_core_request = str(int(use_gpus*100)) # derived from GUI float input parameter giving core percentage gpu_memory_request = str(int(30 * use_gpus)) # adapted to gpu-manager memory parameter definition - - kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox_gpu" + kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox_gpu" kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['tencent.com/vcuda-memory'] = gpu_memory_request #str(use_gpus) # calculate limits resource parameters for gpu-manager, need to identical to requests parameter kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-memory'] = gpu_memory_request #str(use_gpus) kservice['spec']['template']['metadata']['annotations']['tencent.com/vcuda-core-limit'] = str(int(30 * use_gpus)) #gpu_core_request #ToDo: check value + #kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['aliyun.com/gpu-mem'] = "2" #str(use_gpus) + # Special handling for the management container: never run on gpu if management: diff --git a/Sandbox/Dockerfile_gpu b/Sandbox/Dockerfile_gpu index 66b189d1..fed18aaa 100644 --- a/Sandbox/Dockerfile_gpu +++ b/Sandbox/Dockerfile_gpu @@ -17,9 +17,7 @@ FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 # Install (as root) # Base -RUN apt-get update --fix-missing -RUN apt-get -y --no-install-recommends install build-essential -RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 +RUN apt-get update --fix-missing && apt-get -y --no-install-recommends install build-essential netbase unzip file libmagic1 # CUDA 10.1 dependencies and tools to build dlib RUN apt-get -y --no-install-recommends install libsm6 libxrender1 libxrender-dev libxext6 libglib2.0-0 git cmake diff --git a/deploy/helm/microfunctions/templates/management.yaml b/deploy/helm/microfunctions/templates/management.yaml index afddce89..6a2f40ef 100644 --- a/deploy/helm/microfunctions/templates/management.yaml +++ b/deploy/helm/microfunctions/templates/management.yaml @@ -34,9 +34,9 @@ rules: - apiGroups: ["serving.knative.dev"] resources: ["configurations", "routes", "revisions"] verbs: ["get", "list"] - # Allow reading/writing "pods" + # Allow reading/writing "pods" and "nodes" - apiGroups: [""] - resources: ["pods", "pods/status", "pods/log"] + resources: ["pods", "pods/status", "pods/log", "nodes"] verbs: ["*"] --- # Role binding for the management account @@ -66,10 +66,10 @@ metadata: name: "wf-{{ .Release.Name }}-role" namespace: {{ .Release.Namespace }} rules: - # Allow reading "endpoints" + # Allow reading "endpoints" and "nodes" - apiGroups: [""] - resources: ["endpoints","pods"] - verbs: ["get","list"] + resources: ["endpoints","pods","nodes"] + verbs: ["get", "watch", "list"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding From cd779afd387585fc69b1cb7e04b32d10094b51b1 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Wed, 27 Jan 2021 16:17:38 +0000 Subject: [PATCH 43/64] use vgpu parameters in kservice setup --- ManagementService/python/deployWorkflow.py | 58 +++++++++++----------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 426bdb13..3ea02400 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -348,34 +348,36 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F print('getting cluster node capacity info with token' + str(token)) new_token = "eyJhbGciOiJSUzI1NiIsImtpZCI6IkZidDdPX0hyUVdRRDdob2VnU25fWkx4YWhaV1FtX29yREpUYV9iUTJhZlUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tYm16dnYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjI1MmFkNWNjLWQzNGYtNGQzMC1hZmE0LWJhODA4YzVlMGFlZiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.oMuC-xkaEcbf66B89F_fsBCV-2-5YebJffaFOPLi3_T59l7GpSD0155i_WFDKybgxxZO7uqpWA560lgdeBKfiEB9ugS66g-FKaglfqW-wHx9VIlxEwCYKhOWEORfBn3ArSe9CH2XkmC--NL12zx-2gveKHLH7pXNLzAYRywxoXMlSTsqpjE-mDa9jPrmhiyyk2nLdYWct5QwqSxSg94DRJ43FuaMwI7PXXqD1yLZLm8NEtEOSCrUgTCpOIyCis5qwjXdTAnEnu54pBd8wAV66kSez8ebXM-E-4e7K6gZQvJOITMxg-qMvA3EvuetgfADl5wTf_YLfUbv3cx2SASKMQ" - - resp = requests.get( - "https://kubernetes.default:"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/api/v1/nodes", - headers={"Authorization": "Bearer "+new_token, "Accept": "application/json"}, - verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", - proxies={"https":""}) - if resp.status_code == 200: - print('queried cluster node capacities, result: ' + json.dumps(resp.json())) - data = json.loads(resp.text) - vmemory = 0 - vcore = 0 - print("received JSON data items: " + str(data["items"]) + " " + str(type(data["items"]))) + try: + resp = requests.get( + "https://kubernetes.default:"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/api/v1/nodes", + headers={"Authorization": "Bearer "+new_token, "Accept": "application/json"}, + verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + proxies={"https":""}) + if resp.status_code == 200: + #print('queried cluster node capacities, result: ' + json.dumps(resp.json())) + data = json.loads(resp.text) + vmemory = 0 + vcore = 0 + #print("received JSON data items: " + str(data["items"]) + " " + str(type(data["items"]))) - #assert data["items"] is list # is list of dicts - for d in data["items"]: # iterate over the cluster nodes - print("item: " + str(d) + str(type(d)) ) - #assert type(d) is dict - res_capacity = d["status"]["capacity"] - print("res_capacity: " + str(res_capacity)) - if "tencent.com/vcuda-memory" in res_capacity.keys(): - vmemory += int(d["status"]["capacity"]["tencent.com/vcuda-memory"]) - vcore += int(d["status"]["capacity"]["tencent.com/vcuda-core"]) - print("found vcuda capability: " + str(vmemory) + " " + str(vcore)) - else: - print("this node has no vcuda capability!") - + #assert data["items"] is list # is list of dicts + for d in data["items"]: # iterate over the cluster nodes + #print("item: " + str(d) + str(type(d)) ) + #assert type(d) is dict + res_capacity = d["status"]["capacity"] + #print("res_capacity: " + str(res_capacity)) + if "tencent.com/vcuda-memory" in res_capacity.keys(): + vmemory += int(d["status"]["capacity"]["tencent.com/vcuda-memory"]) + vcore += int(d["status"]["capacity"]["tencent.com/vcuda-core"]) + print("found vcuda capability: " + str(vmemory) + " " + str(vcore)) + else: + print("this node has no vcuda capability!") print('queried cluster node capacities: vuda-memory: %s, vcuda-core: %s' % (str(vmemory), str(vcore))) - + except requests.exceptions.HTTPError as e: + print(e) + print(resp.text) + # overwrite values from values.yaml for new workflows ###kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) ###kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) @@ -386,7 +388,7 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F # gpu_total_memory = 7800 # hardcoded info (gtx1070), should give free GPU memory gpu_core_request = str(int(use_gpus*100)) # derived from GUI float input parameter giving core percentage - gpu_memory_request = str(int(30 * use_gpus)) # adapted to gpu-manager memory parameter definition + gpu_memory_request = str(int(vmemory * use_gpus)) # adapted to gpu-manager memory parameter definition kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox_gpu" kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) @@ -394,7 +396,7 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F # calculate limits resource parameters for gpu-manager, need to identical to requests parameter kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-memory'] = gpu_memory_request #str(use_gpus) - kservice['spec']['template']['metadata']['annotations']['tencent.com/vcuda-core-limit'] = str(int(30 * use_gpus)) #gpu_core_request #ToDo: check value + kservice['spec']['template']['metadata']['annotations']['tencent.com/vcuda-core-limit'] = str(int(vmemory * use_gpus)) #gpu_core_request #ToDo: check value #kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['aliyun.com/gpu-mem'] = "2" #str(use_gpus) From 09f58e8896cf86441e82339a3d291339ac151cac Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 2 Feb 2021 09:45:12 +0000 Subject: [PATCH 44/64] adding capability to handle secret token for k8s core API --- ManagementService/python/deployWorkflow.py | 24 +++++++++++-------- deploy/helm/helm_deploy_with_gpu.sh | 18 ++++++++++++++ .../microfunctions/templates/management.yaml | 13 +++++----- deploy/helm/microfunctions/values.yaml | 15 ++++++++---- 4 files changed, 49 insertions(+), 21 deletions(-) create mode 100755 deploy/helm/helm_deploy_with_gpu.sh diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 3ea02400..d4f391f5 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -344,10 +344,18 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox" if not management and use_gpus > 0. and runtime=="Python": # gpu using python function - - print('getting cluster node capacity info with token' + str(token)) - new_token = "eyJhbGciOiJSUzI1NiIsImtpZCI6IkZidDdPX0hyUVdRRDdob2VnU25fWkx4YWhaV1FtX29yREpUYV9iUTJhZlUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tYm16dnYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjI1MmFkNWNjLWQzNGYtNGQzMC1hZmE0LWJhODA4YzVlMGFlZiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.oMuC-xkaEcbf66B89F_fsBCV-2-5YebJffaFOPLi3_T59l7GpSD0155i_WFDKybgxxZO7uqpWA560lgdeBKfiEB9ugS66g-FKaglfqW-wHx9VIlxEwCYKhOWEORfBn3ArSe9CH2XkmC--NL12zx-2gveKHLH7pXNLzAYRywxoXMlSTsqpjE-mDa9jPrmhiyyk2nLdYWct5QwqSxSg94DRJ43FuaMwI7PXXqD1yLZLm8NEtEOSCrUgTCpOIyCis5qwjXdTAnEnu54pBd8wAV66kSez8ebXM-E-4e7K6gZQvJOITMxg-qMvA3EvuetgfADl5wTf_YLfUbv3cx2SASKMQ" + # first set default values + vcore = 100 + vmemory = 31 + # use token obtained from kubernetes master to update cluster node properties + + if os.getenv("API_TOKEN") is not None: + new_token=os.getenv("API_TOKEN") + print('getting cluster node capacity info with token' + str(new_token)) + else: + new_token="default" + try: resp = requests.get( "https://kubernetes.default:"+os.getenv("KUBERNETES_SERVICE_PORT_HTTPS")+"/api/v1/nodes", @@ -355,16 +363,11 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", proxies={"https":""}) if resp.status_code == 200: - #print('queried cluster node capacities, result: ' + json.dumps(resp.json())) data = json.loads(resp.text) vmemory = 0 vcore = 0 - #print("received JSON data items: " + str(data["items"]) + " " + str(type(data["items"]))) - #assert data["items"] is list # is list of dicts for d in data["items"]: # iterate over the cluster nodes - #print("item: " + str(d) + str(type(d)) ) - #assert type(d) is dict res_capacity = d["status"]["capacity"] #print("res_capacity: " + str(res_capacity)) if "tencent.com/vcuda-memory" in res_capacity.keys(): @@ -372,12 +375,13 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F vcore += int(d["status"]["capacity"]["tencent.com/vcuda-core"]) print("found vcuda capability: " + str(vmemory) + " " + str(vcore)) else: - print("this node has no vcuda capability!") + print("this node has no vcuda capability, skipping") print('queried cluster node capacities: vuda-memory: %s, vcuda-core: %s' % (str(vmemory), str(vcore))) except requests.exceptions.HTTPError as e: + print("Error: could not get cluster node vcuda capacities!") print(e) print(resp.text) - + # overwrite values from values.yaml for new workflows ###kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) ###kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) diff --git a/deploy/helm/helm_deploy_with_gpu.sh b/deploy/helm/helm_deploy_with_gpu.sh new file mode 100755 index 00000000..9dc70672 --- /dev/null +++ b/deploy/helm/helm_deploy_with_gpu.sh @@ -0,0 +1,18 @@ +# Check all possible clusters, as your .KUBECONFIG may have multiple contexts: +#kubectl config view -o jsonpath='{"Cluster name\tServer\n"}{range .clusters[*]}{.name}{"\t"}{.cluster.server}{"\n"}{end}' + +# Select name of cluster you want to interact with from above output: +#export CLUSTER_NAME="cluster.local" +CLUSTER_NAME=$(kubectl config view -o jsonpath="{.clusters[0].name}") + +# Point to the API server referring the cluster name +APISERVER=$(kubectl config view -o jsonpath="{.clusters[?(@.name==\"$CLUSTER_NAME\")].cluster.server}") + +# Gets the token value a access api/v1/nodes +TOKEN=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode) + +echo $TOKEN + +helm install mfn $PWD/microfunctions --set apikey=$TOKEN +helm upgrade mfn $PWD/microfunctions --set apikey=$TOKEN + diff --git a/deploy/helm/microfunctions/templates/management.yaml b/deploy/helm/microfunctions/templates/management.yaml index 6a2f40ef..fe4ab6ee 100644 --- a/deploy/helm/microfunctions/templates/management.yaml +++ b/deploy/helm/microfunctions/templates/management.yaml @@ -34,9 +34,9 @@ rules: - apiGroups: ["serving.knative.dev"] resources: ["configurations", "routes", "revisions"] verbs: ["get", "list"] - # Allow reading/writing "pods" and "nodes" + # Allow reading/writing "pods" - apiGroups: [""] - resources: ["pods", "pods/status", "pods/log", "nodes"] + resources: ["pods", "pods/status", "pods/log"] verbs: ["*"] --- # Role binding for the management account @@ -68,8 +68,8 @@ metadata: rules: # Allow reading "endpoints" and "nodes" - apiGroups: [""] - resources: ["endpoints","pods","nodes"] - verbs: ["get", "watch", "list"] + resources: ["endpoints","pods"] + verbs: ["get", "list"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -167,10 +167,11 @@ data: "env": [ {{/* not allowed in KNative {"name": "MFN_HOSTNAME", "valueFrom": { "fieldRef": { "fieldPath": "spec.nodeName" }}}, - */}} + */}} {"name": "MFN_DATALAYER", "value": "datalayer.{{ .Release.Namespace }}.svc:{{ .Values.datalayer.port }}" }, {"name": "MFN_ELASTICSEARCH", "value": {{ include "esConnect.url" . | quote }} }, - {"name": "MFN_MANAGEMENT", "value": "http://{{ include "nginx.mgmtService" . | regexFind "^[^.]+" }}.{{ .Release.Namespace }}.svc" } + {"name": "MFN_MANAGEMENT", "value": "http://{{ include "nginx.mgmtService" . | regexFind "^[^.]+" }}.{{ .Release.Namespace }}.svc" }, + {"name": "API_TOKEN", "value": "{{.Values.apiKey}}" } ], {{/* not allowed in KNative "lifecycle": {"preStop": {"exec":{"command":["python3","/sand/SandboxAgent/shutdown.py"]}}}, diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index ffff8f40..9ba9bc03 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -21,6 +21,8 @@ #------------------------------------------------------------------------------ #imageRepo: "registry.kube-system.svc.cluster.local" imageRepo: "192.168.8.161:5000" # kubespray deployment +apiKey: "abcdef" +#imageRepo: "localhost:5000" # kubespray deployment manager: #httpProxy: "http://192.109.76.93:8080" httpsProxy: "http://192.109.76.93:8080" @@ -48,6 +50,9 @@ manager: imagePathJava: "/microfn/sandbox_java" imageTag: "latest" imagePullPolicy: "Always" + env: + name: API_TOKEN + value: "123test" resources: limits: cpu: 1 @@ -100,11 +105,11 @@ riak: resources: limits: cpu: 2 - memory: 8Gi + memory: 4Gi requests: cpu: 2 - memory: 8Gi - DataStorage: 8Gi + memory: 4Gi + DataStorage: 4Gi #DataStorageClass: local-volume ClusterName: "riak" WaitForErlang: 180 @@ -115,8 +120,8 @@ riak: LevelDbMaximumMemory: 4294967296 AntiEntropy: "passive" # Should be <= resources.limits.cpu - ErlangSchedulersTotal: 2 - ErlangSchedulersOnline: 2 + ErlangSchedulersTotal: 4 + ErlangSchedulersOnline: 4 ErlangSchedulersForceWakeupInterval: 500 ErlangSchedulersCompactionOfLoad: "false" From 7311aece50eb887688c5e0c7d6e8fd21015bc0dd Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 2 Feb 2021 16:10:08 +0000 Subject: [PATCH 45/64] adding GUI and ManagementService changes for GPU parameters --- GUI/app/pages/functions/FunctionTableCtrl.js | 7 ++++--- .../functions/widgets/editableRowTable.html | 8 +++++++- GUI/app/pages/workflows/WorkflowImportCtrl.js | 2 +- ManagementService/management_init.py | 2 +- ManagementService/python/addFunction.py | 1 + ManagementService/python/addWorkflow.py | 2 ++ ManagementService/python/deployWorkflow.py | 20 +++++++++++++++++-- ManagementService/python/modifyFunction.py | 1 + .../schema/mfndata-workflow-example.json | 2 ++ deploy/helm/helm_deploy_with_gpu.sh | 2 +- 10 files changed, 38 insertions(+), 9 deletions(-) diff --git a/GUI/app/pages/functions/FunctionTableCtrl.js b/GUI/app/pages/functions/FunctionTableCtrl.js index f3b16a23..34635650 100644 --- a/GUI/app/pages/functions/FunctionTableCtrl.js +++ b/GUI/app/pages/functions/FunctionTableCtrl.js @@ -635,7 +635,7 @@ }, - data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage": mFunction.gpu_usage } } }) + data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage": mFunction.gpu_usage, "gpu_mem_usage": mFunction.gpu_mem_usage } } }) } $http(req).then(function successCallback(response) { @@ -645,7 +645,7 @@ console.log('new function id:' + response.data.data.function.id); toastr.success('Your function has been created successfully!'); $scope.reloadFunctions(); - $scope.open('app/pages/functions/modals/codeEditorModal.html', 'lg', mFunction.id, mFunction.name, mFunction.status, mFunction.runtime, mFunction.gpu_usage); + $scope.open('app/pages/functions/modals/codeEditorModal.html', 'lg', mFunction.id, mFunction.name, mFunction.status, mFunction.runtime, mFunction.gpu_usage, mFunction.gpu_mem_usage); } else { console.log("Failure status returned by addFunction"); @@ -689,7 +689,7 @@ }, - data: JSON.stringify({ "action" : "modifyFunction", "data" : { "user" : { "token" : token } , "function" : { "id": mFunction.id, "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage" : mFunction.gpu_usage } } }) + data: JSON.stringify({ "action" : "modifyFunction", "data" : { "user" : { "token" : token } , "function" : { "id": mFunction.id, "name" : mFunction.name, "runtime" : mFunction.runtime, "gpu_usage" : mFunction.gpu_usage, "gpu_mem_usage": mFunction.gpu_mem_usage } } }) } $http(req).then(function successCallback(response) { @@ -736,6 +736,7 @@ status: 'undeployed', runtime: 'Python 3.6', gpu_usage: '0', + gpu_mem_usage: '0', modified: '0' }; $scope.functions.push($scope.inserted); diff --git a/GUI/app/pages/functions/widgets/editableRowTable.html b/GUI/app/pages/functions/widgets/editableRowTable.html index c7dd3f84..920d1087 100644 --- a/GUI/app/pages/functions/widgets/editableRowTable.html +++ b/GUI/app/pages/functions/widgets/editableRowTable.html @@ -24,7 +24,8 @@ Name Runtime - Assigned GPU Cores + GPU Cores (%) + GPU Memory (GB) Last Modified Actions @@ -50,6 +51,11 @@ {{ mFunction.gpu_usage || 'Not set' }} + + + + {{ mFunction.gpu_mem_usage || 'Not set' }} + diff --git a/GUI/app/pages/workflows/WorkflowImportCtrl.js b/GUI/app/pages/workflows/WorkflowImportCtrl.js index 8842e251..6fdc0aa7 100644 --- a/GUI/app/pages/workflows/WorkflowImportCtrl.js +++ b/GUI/app/pages/workflows/WorkflowImportCtrl.js @@ -658,7 +658,7 @@ }, - data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : functionName, "runtime" : functionRuntime, "gpu_usage": functionGpuusage } } }) + data: JSON.stringify({ "action" : "addFunction", "data" : { "user" : { "token" : token } , "function" : { "name" : functionName, "runtime" : functionRuntime, "gpu_usage": functionGpuusage, "gpu_mem_usage": functionGpuMemusage } } }) } $http(req).then(function successCallback(response) { diff --git a/ManagementService/management_init.py b/ManagementService/management_init.py index 4cb83de6..880a1473 100755 --- a/ManagementService/management_init.py +++ b/ManagementService/management_init.py @@ -416,7 +416,7 @@ def printUsage(): sys.path.append(workflowdir) if os.getenv("KUBERNETES_PORT", None) != None: import deployWorkflow - url, endpoint_key = deployWorkflow.create_k8s_deployment(email, workflow_info, "Python", 0, management=True) + url, endpoint_key = deployWorkflow.create_k8s_deployment(email, workflow_info, "Python", 0, 0, management=True) DLCLIENT_MANAGEMENT.putMapEntry("Management_workflow_endpoint_map", endpoint_key, url) # Kubernetes mode only has one url endpoint_list = [url] diff --git a/ManagementService/python/addFunction.py b/ManagementService/python/addFunction.py index 2ce4f372..ec36e54c 100644 --- a/ManagementService/python/addFunction.py +++ b/ManagementService/python/addFunction.py @@ -43,6 +43,7 @@ def handle(value, sapi): f["name"] = function["name"] f["runtime"] = function["runtime"] f["gpu_usage"] = function["gpu_usage"] + f["gpu_mem_usage"] = function["gpu_mem_usage"] f["modified"] = time.time() f["id"] = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest() diff --git a/ManagementService/python/addWorkflow.py b/ManagementService/python/addWorkflow.py index 9e0fc87d..800232cf 100644 --- a/ManagementService/python/addWorkflow.py +++ b/ManagementService/python/addWorkflow.py @@ -86,6 +86,8 @@ def handle(value, sapi): #wf["gpu_usage"] = None if "gpu_usage" in workflow: wf["gpu_usage"] = str(workflow["gpu_usage"]) + #if "gpu_mem_usage" in workflow: + wf["gpu_mem_usage"] = str(workflow["gpu_mem_usage"]) wf['associatedTriggerableTables'] = {} wf['associatedTriggers'] = {} wf["id"] = hashlib.md5(str(uuid.uuid4()).encode()).hexdigest().lower() diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index d4f391f5..50223654 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -155,6 +155,9 @@ def compile_resource_info_map(resource_names, uploaded_resources, email, sapi, d if "gpu_usage" in resource_metadata: resource_info["gpu_usage"] = resource_metadata["gpu_usage"] + if "gpu_mem_usage" in resource_metadata: + resource_info["gpu_mem_usage"] = resource_metadata["gpu_mem_usage"] + num_chunks_str = dlc.get("grain_source_zip_num_chunks_" + resource_id) try: num_chunks = int(num_chunks_str) @@ -265,7 +268,7 @@ def get_workflow_host_port(host_to_deploy, sid): return success, host_port -def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=False): +def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, gpu_mem_usage, management=False): # KUBERNETES MODE new_workflow_conf = {} conf_file = '/opt/mfn/SandboxAgent/conf/new_workflow.conf' @@ -320,6 +323,8 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, management=F # apply gpu_usage fraction to k8s deployment configuration print("GPU usage in create_k8s_service: "+ str(gpu_usage)) + print("GPU mem usage in create_k8s_service: "+ str(gpu_mem_usage)) + use_gpus = gpu_usage if runtime=="Java": # non gpu python function @@ -589,7 +594,13 @@ def handle(value, sapi): else: gpu_usage = 0. + if "gpu_mem_usage" in wfmeta and wfmeta["gpu_mem_usage"] != "None": + gpu_mem_usage = float(wfmeta["gpu_mem_usage"]) + else: + gpu_mem_usage = 0. + print("deduced gpu_usage from workflow metadata: " + str(gpu_usage)) + print("deduced gpu_mem_usage from workflow metadata: " + str(gpu_mem_usage)) print("print deployment_info[resources] to evaluate: " + str(deployment_info["resources"])) # case 2: gpu_usage is set in deployment info @@ -599,6 +610,11 @@ def handle(value, sapi): if result_gpu > 0.: gpu_usage = result_gpu + if "gpu_mem_usage" in deployment_info["resources"][res].keys(): + result_mem_gpu = float(deployment_info["resources"][res]["gpu_mem_usage"]) + if result_mem_gpu > 0.: + gpu_mem_usage = result_mem_gpu + sapi.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info), True, False) status = "deploying" @@ -611,7 +627,7 @@ def handle(value, sapi): else: runtime = "Python" - url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime, gpu_usage) + url, endpoint_key = create_k8s_deployment(email, workflow_info, runtime, gpu_usage, gpu_mem_usage) if url is not None and len(url) > 0: status = "deploying" sapi.addSetEntry(workflow_info["workflowId"] + "_workflow_endpoints", str(url), is_private=True) diff --git a/ManagementService/python/modifyFunction.py b/ManagementService/python/modifyFunction.py index 52ed8f3f..a10d5eac 100644 --- a/ManagementService/python/modifyFunction.py +++ b/ManagementService/python/modifyFunction.py @@ -50,6 +50,7 @@ def handle(value, sapi): f["runtime"] = function["runtime"] f["modified"] = time.time() f["gpu_usage"] = function["gpu_usage"] + f["gpu_mem_usage"] = function["gpu_mem_usage"] sapi.put(email + "_grain_" + function["id"], json.dumps(f), True, True) diff --git a/ManagementService/schema/mfndata-workflow-example.json b/ManagementService/schema/mfndata-workflow-example.json index 97696a55..7de810a7 100644 --- a/ManagementService/schema/mfndata-workflow-example.json +++ b/ManagementService/schema/mfndata-workflow-example.json @@ -53,6 +53,7 @@ "name": "addFunction", "runtime": "Python 3.7", "gpu_usage": "0.", + "gpu_mem_usage": "0.", "id": "00000000-0000-0000-0000-222222222222", "modified": 1581498771.096527 } @@ -94,6 +95,7 @@ "id":"00000000-0000-0000-0000-222222222222", "runtime":"Python 3.7", "gpu_usage":"0.", + "gpu_mem_usage":"0.", "type": "code", "ref": "grain_source_00000000-0000-0000-0000-222222222222" } diff --git a/deploy/helm/helm_deploy_with_gpu.sh b/deploy/helm/helm_deploy_with_gpu.sh index 9dc70672..20e96a46 100755 --- a/deploy/helm/helm_deploy_with_gpu.sh +++ b/deploy/helm/helm_deploy_with_gpu.sh @@ -10,7 +10,7 @@ APISERVER=$(kubectl config view -o jsonpath="{.clusters[?(@.name==\"$CLUSTER_NAM # Gets the token value a access api/v1/nodes TOKEN=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode) - +echo "This is the found api access token:" echo $TOKEN helm install mfn $PWD/microfunctions --set apikey=$TOKEN From 8520d5f99b4d404f81cbd4eec7e8cdc65373fc6c Mon Sep 17 00:00:00 2001 From: ksatzke Date: Wed, 3 Feb 2021 15:07:12 +0000 Subject: [PATCH 46/64] fixing bugs on GPU memory parameter calculations --- ManagementService/python/deployWorkflow.py | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 50223654..050c7520 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -326,25 +326,22 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, gpu_mem_usag print("GPU mem usage in create_k8s_service: "+ str(gpu_mem_usage)) use_gpus = gpu_usage + use_mem_gpus = gpu_mem_usage if runtime=="Java": # non gpu python function # overwrite values from values.yaml for new workflows - #kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) - #kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) # only change the image name imageName = kservice['spec']['template']['spec']['containers'][0]['image'] imageRepoName = imageName.split("/")[0] - #kservice['spec']['template']['spec']['containers'][0]['image'] = "192.168.8.161:5000/microfn/sandbox_java" kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox_java" if not management and use_gpus == 0. and runtime=="Python": # non gpu python function # overwrite values from values.yaml for new workflows - kservice['spec']['template']['spec']['containers'][0]['resources']['limits'].pop('nvidia.com/gpu', None) # ['nvidia.com/gpu'] = str(use_gpus) - kservice['spec']['template']['spec']['containers'][0]['resources']['requests'].pop('nvidia.com/gpu', None) # ['nvidia.com/gpu'] = str(use_gpus) + #kservice['spec']['template']['spec']['containers'][0]['resources']['limits'].pop('nvidia.com/gpu', None) # ['nvidia.com/gpu'] = str(use_gpus) + #kservice['spec']['template']['spec']['containers'][0]['resources']['requests'].pop('nvidia.com/gpu', None) # ['nvidia.com/gpu'] = str(use_gpus) imageName = kservice['spec']['template']['spec']['containers'][0]['image'] imageRepoName = imageName.split("/")[0] - #kservice['spec']['template']['spec']['containers'][0]['image'] = "192.168.8.161:5000/microfn/sandbox" kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox" @@ -381,23 +378,26 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, gpu_mem_usag print("found vcuda capability: " + str(vmemory) + " " + str(vcore)) else: print("this node has no vcuda capability, skipping") - print('queried cluster node capacities: vuda-memory: %s, vcuda-core: %s' % (str(vmemory), str(vcore))) + print('queried cluster node capacities: vcuda-memory: %s, vcuda-core: %s' % (str(vmemory), str(vcore))) except requests.exceptions.HTTPError as e: print("Error: could not get cluster node vcuda capacities!") print(e) print(resp.text) # overwrite values from values.yaml for new workflows - ###kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['nvidia.com/gpu'] = str(use_gpus) - ###kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['nvidia.com/gpu'] = str(use_gpus) imageName = kservice['spec']['template']['spec']['containers'][0]['image'] imageRepoName = imageName.split("/")[0] - # kservice['spec']['template']['spec']['containers'][0]['image'] = "192.168.8.161:5000/microfn/sandbox_gpu" - # calculate requests resource parameters for gpu-manager # gpu_total_memory = 7800 # hardcoded info (gtx1070), should give free GPU memory - gpu_core_request = str(int(use_gpus*100)) # derived from GUI float input parameter giving core percentage - gpu_memory_request = str(int(vmemory * use_gpus)) # adapted to gpu-manager memory parameter definition + gpu_core_request = str(int(use_gpus*100)) # derived from GUI float input parameter, yielding core percentage as required by gpu-manager + #gpu_memory_request = str(int(vmemory * use_gpus)) # adapted to gpu-manager memory parameter definition + gpu_memory_request = str(int(use_mem_gpus*4.0)) # gpu-manager requires gpu memory parameter in units of 256 MB + print ("memory request set to %s vcuda units " % gpu_memory_request) + + if int(gpu_memory_request) > int(vmemory): + print("only up to %s GB GPU memory available on the cluster nodes!" % str(int(vmemory))) + gpu_memory_request = str(int(vmemory)) # limit to max available memory + print ("memory set to %s GB " % gpu_memory_request) kservice['spec']['template']['spec']['containers'][0]['image'] = imageRepoName+"/microfn/sandbox_gpu" kservice['spec']['template']['spec']['containers'][0]['resources']['requests']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) @@ -405,7 +405,7 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, gpu_mem_usag # calculate limits resource parameters for gpu-manager, need to identical to requests parameter kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-core'] = gpu_core_request #str(use_gpus) kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['tencent.com/vcuda-memory'] = gpu_memory_request #str(use_gpus) - kservice['spec']['template']['metadata']['annotations']['tencent.com/vcuda-core-limit'] = str(int(vmemory * use_gpus)) #gpu_core_request #ToDo: check value + kservice['spec']['template']['metadata']['annotations']['tencent.com/vcuda-core-limit'] = str(int(vmemory)) #gpu_core_request #ToDo: check value #kservice['spec']['template']['spec']['containers'][0]['resources']['limits']['aliyun.com/gpu-mem'] = "2" #str(use_gpus) From 338fcd37e3dcf9850323e187ff565f81cc782ba8 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Mon, 8 Feb 2021 08:34:47 +0000 Subject: [PATCH 47/64] fixing more bugs on GPU memory parameter calculations --- ManagementService/python/deployWorkflow.py | 2 +- SandboxAgent/deployment.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 050c7520..3ac97094 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -389,7 +389,7 @@ def create_k8s_deployment(email, workflow_info, runtime, gpu_usage, gpu_mem_usag imageRepoName = imageName.split("/")[0] # gpu_total_memory = 7800 # hardcoded info (gtx1070), should give free GPU memory - gpu_core_request = str(int(use_gpus*100)) # derived from GUI float input parameter, yielding core percentage as required by gpu-manager + gpu_core_request = str(int(use_gpus)) # derived from GUI float input parameter, yielding core percentage as required by gpu-manager #gpu_memory_request = str(int(vmemory * use_gpus)) # adapted to gpu-manager memory parameter definition gpu_memory_request = str(int(use_mem_gpus*4.0)) # gpu-manager requires gpu memory parameter in units of 256 MB print ("memory request set to %s vcuda units " % gpu_memory_request) diff --git a/SandboxAgent/deployment.py b/SandboxAgent/deployment.py index 146e49b1..675b1454 100644 --- a/SandboxAgent/deployment.py +++ b/SandboxAgent/deployment.py @@ -373,7 +373,7 @@ def _install_sandbox_requirements(self, parameters): else: cmd = "python " cmd = cmd + "-m pip install --user" - cmd += " --no-compile --no-clean" + cmd += " --no-compile --no-clean --no-cache-dir" for opt in additional_installer_options: cmd = cmd + " " + opt + " " + additional_installer_options[opt] From 56974468fb3e5635218ecd51aad47253f62fb3dd Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 16 Feb 2021 09:12:53 +0000 Subject: [PATCH 48/64] fixing bugs in deployment script, adjusting values --- deploy/helm/helm_deploy_with_gpu.sh | 4 ++-- deploy/helm/microfunctions/values.yaml | 28 +++++++++++++------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/deploy/helm/helm_deploy_with_gpu.sh b/deploy/helm/helm_deploy_with_gpu.sh index 20e96a46..0921bd16 100755 --- a/deploy/helm/helm_deploy_with_gpu.sh +++ b/deploy/helm/helm_deploy_with_gpu.sh @@ -13,6 +13,6 @@ TOKEN=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kuber echo "This is the found api access token:" echo $TOKEN -helm install mfn $PWD/microfunctions --set apikey=$TOKEN -helm upgrade mfn $PWD/microfunctions --set apikey=$TOKEN +helm install mfn $PWD/microfunctions --set apiKey=$TOKEN --set persistence.storageClass=manual +helm upgrade mfn $PWD/microfunctions --set apiKey=$TOKEN --set persistence.storageClass=manual diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 9ba9bc03..ed46d94d 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -56,10 +56,10 @@ manager: resources: limits: cpu: 1 - memory: 2Gi + memory: 4Gi requests: cpu: 1 - memory: 1Gi + memory: 2Gi managementSandbox: resources: limits: @@ -84,10 +84,10 @@ datalayer: javaOpts: "-Xmx4096m" resources: limits: - cpu: 4 + cpu: 1 memory: 8Gi requests: - cpu: 4 + cpu: 1 memory: 8Gi #------------------------------------------------------------------------------ @@ -104,13 +104,13 @@ riak: ClientPortProtobuf: 8087 resources: limits: - cpu: 2 - memory: 4Gi + cpu: 1 + memory: 8Gi requests: - cpu: 2 - memory: 4Gi - DataStorage: 4Gi - #DataStorageClass: local-volume + cpu: 1 + memory: 8Gi + DataStorage: 8Gi + DataStorageClass: manual ClusterName: "riak" WaitForErlang: 180 ErlangDistributionPortRangeMinimum: 6000 @@ -120,8 +120,8 @@ riak: LevelDbMaximumMemory: 4294967296 AntiEntropy: "passive" # Should be <= resources.limits.cpu - ErlangSchedulersTotal: 4 - ErlangSchedulersOnline: 4 + ErlangSchedulersTotal: 2 + ErlangSchedulersOnline: 2 ErlangSchedulersForceWakeupInterval: 500 ErlangSchedulersCompactionOfLoad: "false" @@ -184,8 +184,8 @@ triggersFrontend: statusReportIntervalSec: 30 resources: limits: - cpu: 2 + cpu: 1 memory: 8Gi requests: - cpu: 2 + cpu: 1 memory: 1Gi From 051b32796f72338d38bca17fbff7fd9023ac8988 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Fri, 26 Feb 2021 12:05:41 +0100 Subject: [PATCH 49/64] fixing bugs in available_hosts scripts --- deploy/ansible/sandbox.yaml | 9 +++++++++ deploy/ansible/scripts/available_hosts.py | 21 +++++++++++++-------- deploy/helm/microfunctions/Chart.yaml | 2 +- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/deploy/ansible/sandbox.yaml b/deploy/ansible/sandbox.yaml index f9e4c788..76083b02 100644 --- a/deploy/ansible/sandbox.yaml +++ b/deploy/ansible/sandbox.yaml @@ -43,6 +43,11 @@ - sandbox installation directory {{ install_dir }} - riak_connect = {{ riak_connect }} + - name: get GPU environment + shell: "su - -c 'echo $KNIX_node_hasGPU'" + become: true + register: KNIX_node_hasGPU + - name: get http_proxy shell: "su - -c 'echo $http_proxy'" become: true @@ -62,12 +67,14 @@ http_proxy: "{{ http_proxy.stdout }}" https_proxy: "{{ https_proxy.stdout }}" no_proxy: "{{ no_proxy.stdout }}" + KNIX_node_hasGPU: "{{ KNIX_node_hasGPU.stdout }}" - debug: msg: - http_proxy = {{ http_proxy }} - https_proxy = {{ https_proxy }} - no_proxy = {{ no_proxy }} + - KNIX_node_hasGPU = {{KNIX_node_hasGPU }} ################# - name: create sandbox folder @@ -161,6 +168,7 @@ no_proxy={{ no_proxy }} HTTP_PROXY={{ http_proxy }} HTTPS_PROXY={{ https_proxy }} + KNIX_node_hasGPU={{ KNIX_node_hasGPU }} mode: '0755' - name: create available_hosts.sh @@ -194,6 +202,7 @@ no_proxy: "{{ no_proxy }}" HTTP_PROXY: "{{ http_proxy }}" HTTPS_PROXY: "{{ https_proxy }}" + KNIX_node_hasGPU: "{{ KNIX_node_hasGPU }}" register: availablehosts - debug: diff --git a/deploy/ansible/scripts/available_hosts.py b/deploy/ansible/scripts/available_hosts.py index 5020264f..432b42e9 100755 --- a/deploy/ansible/scripts/available_hosts.py +++ b/deploy/ansible/scripts/available_hosts.py @@ -22,6 +22,7 @@ import riak import socket import subprocess +#import platform ### global variables set at runtime DLCLIENT=None @@ -61,14 +62,13 @@ def dl_get(key): def add_host(hostname,hostip=None): if hostip is None: hostip = socket.gethostbyname(hostname) - has_gpu = False - try: - has_gpu = ("NVIDIA" in subprocess.check_output('nvcc --version.split(' ')).decode()')) - except Exception: - print("No suitable GPU available on this host!") - pass + print("Adding host: " + str(hostname)) - print("Adding host: " + str(hostname) + ", has gpu: "+ str(has_gpu)) + hasGPU = False + # get environment of current hostname + if os.environ['KNIX_node_hasGPU'] == "True": + print("found GPU Environent: " +str(os.environ['KNIX_node_hasGPU']) ) + hasGPU = True v = dl_get("available_hosts") if v.encoded_data is not None and len(v.encoded_data) > 0: @@ -76,13 +76,18 @@ def add_host(hostname,hostip=None): print("existing hosts: " + str(hosts)) if isinstance(hosts,list): hosts = {host: socket.gethostbyname(host) for host in hosts} + + hosts['has_GPU'] = hasGPU + else: hosts = {} if hostname != None and hostname not in hosts: hosts[hostname] = hostip - hosts[hostname]["has_gpu"] = True + if hasGPU == True: + hosts[hostname]["has_gpu"] = hasGPU v.encoded_data = json.dumps(hosts).encode() v.store() + print("found hosts: " + str(hosts)) return hosts diff --git a/deploy/helm/microfunctions/Chart.yaml b/deploy/helm/microfunctions/Chart.yaml index d2fbd8c6..0b489fb8 100644 --- a/deploy/helm/microfunctions/Chart.yaml +++ b/deploy/helm/microfunctions/Chart.yaml @@ -16,4 +16,4 @@ apiVersion: v1 appVersion: "1.0" description: High Performance Serverless system name: MicroFunctions -version: auto +version: 0.8.8-8-g4813e44-dirty From 70ce3ec313f9f33c3f4603d613dea33adb10d414 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 2 Mar 2021 10:41:04 +0000 Subject: [PATCH 50/64] resolving bugs in host selection logic for deployment --- ManagementService/python/deployWorkflow.py | 41 +++++++++++++++------- deploy/ansible/scripts/available_hosts.py | 4 +-- riak/src/workflow_triggers.erl | 3 +- 3 files changed, 32 insertions(+), 16 deletions(-) mode change 100755 => 100644 riak/src/workflow_triggers.erl diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 3ac97094..c8c06175 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -650,32 +650,45 @@ def handle(value, sapi): # TODO: intelligence on how to pick hosts hosts = sapi.get("available_hosts", True) - print("available_hosts: " + str(hosts)) - if hosts is not None and hosts != "": - hosts = json.loads(hosts) + # hostst is string representation of list or dict + print("available_hosts: " + hosts) + hosts = json.loads(hosts) + + if hosts is not None and hosts != "" and isinstance(hosts,dict): + host_has_gpu = False deployed_hosts = {} gpu_hosts = {} picked_hosts = {} - - for hostname in hosts: - print("current host: " + str(hosts[hostname])) - #if hostname.endswith("_gpu"): - if "has_gpu" in hosts[hostname]: + plain_hosts={} + hostname_t = "" + for hostname in hosts: # individual host dict + nodeHasGPU = hosts["has_gpu"] # check if host has a GPU + if hostname != "has_gpu": # skip this key + hostname_t = hostname + #print("current hostnae: " + str(hostname) + str(hosts[hostname])) hostip = hosts[hostname] - gpu_hosts[hostname] = hostip - + plain_hosts[hostname] = hostip # add to general hosts + if nodeHasGPU: + gpu_hosts[hostname] = hostip # add to GPU hosts + hostname = hostname_t # instruct hosts to start the sandbox and deploy workflow + print("selected host:" + str(hostname) + " " + str(hostip)) + #print("calulated host:" + str(gpu_hosts) + " " + str(plain_hosts)) if sandbox_image_name == "microfn/sandbox" or sandbox_image_name=="microfn/sandbox_java": # can use any host - picked_hosts = hosts + picked_hosts = plain_hosts + #hosts["has_gpu"] = False + #print("picked_hosts: " + str(picked_hosts)) elif len(gpu_hosts) > 0: picked_hosts = gpu_hosts else: - picked_hosts = hosts # fallback as there are no gpu hosts available + picked_hosts = plain_hosts # fallback as there are no gpu hosts available print("available GPU hosts list is empty. Deploying on general purpose host") for hostname in picked_hosts: # loop over all hosts, need to pich gpu hosts for python/gpu workflows hostip = hosts[hostname] host_to_deploy = (hostname, hostip) + print("host_to_deploy: " + str(host_to_deploy) ) + #host_to_deploy = ("userslfu99", "192.168.8.99") success, endpoint_key = start_docker_sandbox(host_to_deploy, email, workflow_info["sandboxId"], workflow_info["workflowId"], workflow_info["workflowName"], sandbox_image_name) if success: deployed_hosts[hostname] = hostip @@ -697,7 +710,9 @@ def handle(value, sapi): sapi.putMapEntry(workflow_info["workflowId"] + "_sandbox_status_map", endpoint_key, json.dumps(sbinfo), is_private=True) #endpoints = sapi.retrieveMap(workflow_info["workflowId"] + "_workflow_endpoints", True) #sapi.log(str(endpoints)) - + elif hosts is not None and hosts != "" and isinstance(hosts,list): + print("hosts is not dict type!") + if not bool(deployed_hosts): status = "failed" else: diff --git a/deploy/ansible/scripts/available_hosts.py b/deploy/ansible/scripts/available_hosts.py index 432b42e9..8171f776 100755 --- a/deploy/ansible/scripts/available_hosts.py +++ b/deploy/ansible/scripts/available_hosts.py @@ -77,14 +77,14 @@ def add_host(hostname,hostip=None): if isinstance(hosts,list): hosts = {host: socket.gethostbyname(host) for host in hosts} - hosts['has_GPU'] = hasGPU + hosts['has_gpu'] = hasGPU else: hosts = {} if hostname != None and hostname not in hosts: hosts[hostname] = hostip if hasGPU == True: - hosts[hostname]["has_gpu"] = hasGPU + hosts["has_gpu"] = hasGPU v.encoded_data = json.dumps(hosts).encode() v.store() print("found hosts: " + str(hosts)) diff --git a/riak/src/workflow_triggers.erl b/riak/src/workflow_triggers.erl old mode 100755 new mode 100644 index acbdf9b8..9895c83b --- a/riak/src/workflow_triggers.erl +++ b/riak/src/workflow_triggers.erl @@ -242,7 +242,7 @@ handle_nometadata() -> generate_trigger_message(Key, Value, Table) -> Message = {[{<<"trigger_type">>, <<"storage">>}, - {<<"key">>, Key}, {<<"value">>, Value}, + {<<"key">>, Key}, {<<"source">>, list_to_binary(Table)}]}, MessageEncoded = jiffy:encode(Message), MessageEncoded. @@ -305,3 +305,4 @@ test() -> io:format((?LOG_PREFIX) ++ " ~p~n", [Message]), io:format((?LOG_PREFIX) ++ " ~p~n", [MessageEncoded]), io:format((?LOG_PREFIX) ++ "Execid ~p~n", [Execid]). + From e0e4a86521636a4948f91f5f3677b31d7c954876 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 25 Mar 2021 12:51:09 +0000 Subject: [PATCH 51/64] fixing a bug in workflow GPU resource calculation --- ManagementService/python/deployWorkflow.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index c8c06175..8af42ab9 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -607,14 +607,17 @@ def handle(value, sapi): for res in deployment_info["resources"]: if "gpu_usage" in deployment_info["resources"][res].keys(): result_gpu = float(deployment_info["resources"][res]["gpu_usage"]) + print("gpu_usage in loop: " + str(result_gpu)) if result_gpu > 0.: - gpu_usage = result_gpu + gpu_usage += result_gpu if "gpu_mem_usage" in deployment_info["resources"][res].keys(): result_mem_gpu = float(deployment_info["resources"][res]["gpu_mem_usage"]) if result_mem_gpu > 0.: - gpu_mem_usage = result_mem_gpu - + gpu_mem_usage += result_mem_gpu + print("gpu_mem_usage in loop: " + str(result_mem_gpu)) + + print("GPUINFO" + str(gpu_usage)+ " " + str(gpu_mem_usage)) sapi.put("deployment_info_workflow_" + workflow["id"], json.dumps(deployment_info), True, False) status = "deploying" From d92a6041f67bf20a78ec35d483c5860780999462 Mon Sep 17 00:00:00 2001 From: ksatzke Date: Thu, 1 Apr 2021 07:55:45 +0000 Subject: [PATCH 52/64] extending mfn SDK to handle GPU parameters --- deploy/helm/helm_deploy_with_gpu.sh | 2 ++ deploy/helm/microfunctions/values.yaml | 2 +- mfn_sdk/mfn_sdk/mfnclient.py | 16 ++++++++-------- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/deploy/helm/helm_deploy_with_gpu.sh b/deploy/helm/helm_deploy_with_gpu.sh index 0921bd16..911d00c6 100755 --- a/deploy/helm/helm_deploy_with_gpu.sh +++ b/deploy/helm/helm_deploy_with_gpu.sh @@ -13,6 +13,8 @@ TOKEN=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kuber echo "This is the found api access token:" echo $TOKEN +#REGISTRY=192.168.8.181:5000 + helm install mfn $PWD/microfunctions --set apiKey=$TOKEN --set persistence.storageClass=manual helm upgrade mfn $PWD/microfunctions --set apiKey=$TOKEN --set persistence.storageClass=manual diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index ed46d94d..6f946f29 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -20,7 +20,7 @@ # MicroFunction management workflow #------------------------------------------------------------------------------ #imageRepo: "registry.kube-system.svc.cluster.local" -imageRepo: "192.168.8.161:5000" # kubespray deployment +imageRepo: "localhost:5000" # kubespray deployment apiKey: "abcdef" #imageRepo: "localhost:5000" # kubespray deployment manager: diff --git a/mfn_sdk/mfn_sdk/mfnclient.py b/mfn_sdk/mfn_sdk/mfnclient.py index a5a4d376..b90de47b 100644 --- a/mfn_sdk/mfn_sdk/mfnclient.py +++ b/mfn_sdk/mfn_sdk/mfnclient.py @@ -333,14 +333,14 @@ def find_function(self,name): return res[0] @deprecated(reason="Grains have been renamed to functions, use add_function(..) instead") - def addGrain(self,name,runtime='Python 3.6',gpu_usage="0."): - return self.add_function(name,runtime,gpu_usage) + def addGrain(self,name,runtime='Python 3.6',gpu_usage="0.",gpu_mem_usage="0."): + return self.add_function(name,runtime,gpu_usage,gpu_mem_usage) @deprecated(reason="Grains have been renamed to functions, use add_function(..) instead") - def add_grain(self,name,runtime='Python 3.6',gpu_usage="0."): - return self.add_function(name, runtime, gpu_usage) + def add_grain(self,name,runtime='Python 3.6',gpu_usage="0.",gpu_mem_usage="0."): + return self.add_function(name, runtime, gpu_usage,gpu_mem_usage) - def add_function(self,name,runtime='Python 3.6',gpu_usage="0."): + def add_function(self,name,runtime='Python 3.6',gpu_usage="0.",gpu_mem_usage="0."): """ add a function returns an existing function if the name exists, registers a new function name if it doesn't exist @@ -349,7 +349,7 @@ def add_function(self,name,runtime='Python 3.6',gpu_usage="0."): for f in self.functions: if f._name == name: return f - data = self.action('addFunction',{'function':{'name':name,'runtime':runtime, 'gpu_usage': gpu_usage}}) + data = self.action('addFunction',{'function':{'name':name,'runtime':runtime, 'gpu_usage': gpu_usage, 'gpu_mem_usage': gpu_mem_usage}}) gd = data['function'] f = Function(self,gd) self._functions.append(f) @@ -452,7 +452,7 @@ def _get_state_names_and_resource(self, desired_state_type, wf_dict): return state_list - def add_workflow(self,name,filename=None, gpu_usage=None): + def add_workflow(self,name,filename=None, gpu_usage=None, gpu_mem_usage=None): """ add a workflow returns an existing workflow if the name exists, registers a new workflow name if it doesn't exist @@ -461,7 +461,7 @@ def add_workflow(self,name,filename=None, gpu_usage=None): for wf in self._workflows: if wf._name == name: return wf - data = self.action('addWorkflow',{'workflow':{'name':name, "gpu_usage":gpu_usage}}) + data = self.action('addWorkflow',{'workflow':{'name':name, "gpu_usage":gpu_usage, "gpu_mem_usage":gpu_mem_usage}}) wfd = data['workflow'] wf = Workflow(self,wfd) self._workflows.append(wf) From 48f7546de1d1a0bec4674487fd8717835046b9ce Mon Sep 17 00:00:00 2001 From: ksatzke Date: Tue, 13 Apr 2021 15:33:22 +0000 Subject: [PATCH 53/64] fixing bugs on ASL tests using GPUs --- deploy/helm/helm_deploy_with_gpu.sh | 2 ++ deploy/helm/microfunctions/values.yaml | 7 +++++-- tests/asl_DLIB/test.py | 2 +- tests/asl_Face_Recognition/test.py | 2 +- tests/asl_Tensorflow_HelloWorld/test.py | 2 +- tests/mfn_test_utils.py | 10 ++++++++-- 6 files changed, 18 insertions(+), 7 deletions(-) diff --git a/deploy/helm/helm_deploy_with_gpu.sh b/deploy/helm/helm_deploy_with_gpu.sh index 911d00c6..e37cb3ce 100755 --- a/deploy/helm/helm_deploy_with_gpu.sh +++ b/deploy/helm/helm_deploy_with_gpu.sh @@ -16,5 +16,7 @@ echo $TOKEN #REGISTRY=192.168.8.181:5000 helm install mfn $PWD/microfunctions --set apiKey=$TOKEN --set persistence.storageClass=manual + + helm upgrade mfn $PWD/microfunctions --set apiKey=$TOKEN --set persistence.storageClass=manual diff --git a/deploy/helm/microfunctions/values.yaml b/deploy/helm/microfunctions/values.yaml index 6f946f29..b25f2c32 100644 --- a/deploy/helm/microfunctions/values.yaml +++ b/deploy/helm/microfunctions/values.yaml @@ -19,8 +19,11 @@ #------------------------------------------------------------------------------ # MicroFunction management workflow #------------------------------------------------------------------------------ -#imageRepo: "registry.kube-system.svc.cluster.local" -imageRepo: "localhost:5000" # kubespray deployment +imageRepo: "registry.kube-system.svc.cluster.local:5000" +#imageRepo: "localhost:5000" # kubespray deployment + +#imageRepo: "localhost:5000" # kubespray deployment + apiKey: "abcdef" #imageRepo: "localhost:5000" # kubespray deployment manager: diff --git a/tests/asl_DLIB/test.py b/tests/asl_DLIB/test.py index 600233ee..9705075c 100644 --- a/tests/asl_DLIB/test.py +++ b/tests/asl_DLIB/test.py @@ -34,6 +34,6 @@ def test_dlib(self): testtuplelist =[(inp1, res1)] - test = MFNTest(test_name = "Dlib_Test", gpu_usage = "1") + test = MFNTest(test_name = "Dlib_Test", gpu_usage = "1", gpu_mem_usage="6") test.exec_tests(testtuplelist) diff --git a/tests/asl_Face_Recognition/test.py b/tests/asl_Face_Recognition/test.py index 4487e4ba..414c2a1d 100644 --- a/tests/asl_Face_Recognition/test.py +++ b/tests/asl_Face_Recognition/test.py @@ -34,7 +34,7 @@ def test_tensorflow(self): res1 = '"[(68, 497, 175, 390)]"' testtuplelist =[(inp1, res1)] - test = MFNTest(test_name = "FaceRecognition__Test", gpu_usage="1") + test = MFNTest(test_name = "FaceRecognition__Test", gpu_usage="50", gpu_mem_usage="10") #time.sleep(10) # wait for deployment test.exec_tests(testtuplelist) diff --git a/tests/asl_Tensorflow_HelloWorld/test.py b/tests/asl_Tensorflow_HelloWorld/test.py index 0b47f872..eeb730eb 100644 --- a/tests/asl_Tensorflow_HelloWorld/test.py +++ b/tests/asl_Tensorflow_HelloWorld/test.py @@ -34,7 +34,7 @@ def test_tensorflow(self): res1 = '"GPU available: True"' testtuplelist =[(inp1, res1)] - test = MFNTest(test_name = "Tensorflow__Test", gpu_usage = "1") + test = MFNTest(test_name = "Tensorflow__Test", gpu_usage = "50", gpu_mem_usage="10") #time.sleep(10) # wait for deployment test.exec_tests(testtuplelist) diff --git a/tests/mfn_test_utils.py b/tests/mfn_test_utils.py index c67b6be2..54f5de74 100644 --- a/tests/mfn_test_utils.py +++ b/tests/mfn_test_utils.py @@ -43,7 +43,7 @@ class MfnAppTextFormat(): mfntestfailed = MfnAppTextFormat.STYLE_BOLD + MfnAppTextFormat.COLOR_RED + 'FAILED' + MfnAppTextFormat.END + MfnAppTextFormat.END class MFNTest(): - def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_user=False, delete_user=False, gpu_usage=None): + def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_user=False, delete_user=False, gpu_usage=None, gpu_mem_usage=None): self._settings = self._get_settings() @@ -86,6 +86,9 @@ def __init__(self, test_name=None, timeout=None, workflow_filename=None, new_use if gpu_usage is not None: self._settings["gpu_usage"] = gpu_usage + if gpu_mem_usage is not None: + self._settings["gpu_mem_usage"] = gpu_mem_usage + self._log_clear_timestamp = int(time.time() * 1000.0 * 1000.0) # will be the deployed workflow object in self._client @@ -120,6 +123,8 @@ def _get_settings(self): settings.setdefault("timeout", 60) settings.setdefault("gpu_usage", "None") + settings.setdefault("gpu_mem_usage", "None") + return settings def _get_resource_info(self, resource_ref): @@ -303,7 +308,8 @@ def get_deployment_error(self): def deploy_workflow(self): try: gpu_usage=self._settings["gpu_usage"] - wf = self._client.add_workflow(self._workflow_name, None, gpu_usage) + gpu_mem_usage=self._settings["gpu_mem_usage"] + wf = self._client.add_workflow(self._workflow_name, None, gpu_usage, gpu_mem_usage) wf.json = json.dumps(self._workflow_description) wf.deploy(self._settings["timeout"]) self._workflow = wf From 7a1b157c69ff259486230ee065bbe8e05148e1f0 Mon Sep 17 00:00:00 2001 From: Istemi Ekin Akkus <5419814+iakkus@users.noreply.github.com> Date: Mon, 26 Apr 2021 15:43:33 +0200 Subject: [PATCH 54/64] fix to helm template management.yaml --- deploy/helm/microfunctions/templates/management.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/helm/microfunctions/templates/management.yaml b/deploy/helm/microfunctions/templates/management.yaml index 300ac1cd..8c77894c 100644 --- a/deploy/helm/microfunctions/templates/management.yaml +++ b/deploy/helm/microfunctions/templates/management.yaml @@ -138,9 +138,9 @@ data: }, "spec": { {{/* not allowed in KNative - "nodeSelector": {{ toJson .Values.manager.nodeSelector }}, - "tolerations": {{ toJson .Values.manager.tolerations }}, - */}} + "nodeSelector": {{ toJson .Values.manager.nodeSelector }}, + "tolerations": {{ toJson .Values.manager.tolerations }}, + */}}, {{/* not allowed in KNative "affinity": { "podAffinity": { From 191e0da13ea47dddc8cbd74ce90ecc9ffa40816c Mon Sep 17 00:00:00 2001 From: Istemi Ekin Akkus <5419814+iakkus@users.noreply.github.com> Date: Mon, 26 Apr 2021 15:51:40 +0200 Subject: [PATCH 55/64] Revert "fix to helm template management.yaml" This reverts commit 7a1b157c69ff259486230ee065bbe8e05148e1f0. --- deploy/helm/microfunctions/templates/management.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/helm/microfunctions/templates/management.yaml b/deploy/helm/microfunctions/templates/management.yaml index 8c77894c..300ac1cd 100644 --- a/deploy/helm/microfunctions/templates/management.yaml +++ b/deploy/helm/microfunctions/templates/management.yaml @@ -138,9 +138,9 @@ data: }, "spec": { {{/* not allowed in KNative - "nodeSelector": {{ toJson .Values.manager.nodeSelector }}, - "tolerations": {{ toJson .Values.manager.tolerations }}, - */}}, + "nodeSelector": {{ toJson .Values.manager.nodeSelector }}, + "tolerations": {{ toJson .Values.manager.tolerations }}, + */}} {{/* not allowed in KNative "affinity": { "podAffinity": { From 4528bc6872d852001ce6947fba34e1df7f2614bf Mon Sep 17 00:00:00 2001 From: Istemi Ekin Akkus <5419814+iakkus@users.noreply.github.com> Date: Mon, 26 Apr 2021 17:59:13 +0200 Subject: [PATCH 56/64] fix to helm template management.yaml after merging with develop --- deploy/helm/microfunctions/templates/management.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/helm/microfunctions/templates/management.yaml b/deploy/helm/microfunctions/templates/management.yaml index 300ac1cd..ad1b45ea 100644 --- a/deploy/helm/microfunctions/templates/management.yaml +++ b/deploy/helm/microfunctions/templates/management.yaml @@ -176,7 +176,7 @@ data: */}} {"name": "MFN_DATALAYER", "value": "datalayer.{{ .Release.Namespace }}.svc:{{ .Values.datalayer.port }}" }, {"name": "MFN_ELASTICSEARCH", "value": {{ include "esConnect.url" . | quote }} }, - {"name": "MFN_MANAGEMENT", "value": "http://wf-{{ .Release.Name }}-management.{{ .Release.Namespace }}.svc" } + {"name": "MFN_MANAGEMENT", "value": "http://wf-{{ .Release.Name }}-management.{{ .Release.Namespace }}.svc" }, {"name": "API_TOKEN", "value": "{{.Values.apiKey}}" } ], {{/* not allowed in KNative From 6913fbb08650acf70a9eeb5dccd60133c2840ab2 Mon Sep 17 00:00:00 2001 From: Istemi Ekin Akkus <5419814+iakkus@users.noreply.github.com> Date: Fri, 7 May 2021 08:40:41 +0200 Subject: [PATCH 57/64] make Dockerfile installation instructions follow the same order --- Sandbox/Dockerfile_gpu | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/Sandbox/Dockerfile_gpu b/Sandbox/Dockerfile_gpu index 35713988..969ecc7b 100644 --- a/Sandbox/Dockerfile_gpu +++ b/Sandbox/Dockerfile_gpu @@ -17,11 +17,9 @@ FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 # Install (as root) # Base -RUN apt-get update --fix-missing && apt-get -y --no-install-recommends install build-essential netbase unzip file libmagic1 - -# CUDA 10.1 dependencies and tools to build dlib -RUN apt-get -y --no-install-recommends install libsm6 libxrender1 libxrender-dev libxext6 libglib2.0-0 git cmake -RUN apt-get install -y --no-install-recommends libnvinfer6=6.0.1-1+cuda10.1 libnvinfer-dev=6.0.1-1+cuda10.1 libnvinfer-plugin6=6.0.1-1+cuda10.1 +RUN apt-get update --fix-missing +RUN apt-get -y --no-install-recommends install build-essential +RUN apt-get -y --no-install-recommends install netbase unzip file libmagic1 # Python RUN apt-get -y --no-install-recommends install python3 python3-dev @@ -41,6 +39,21 @@ RUN /usr/bin/python3 -m pip install fastcache # Needed for multi-language support (currently just Java) RUN /usr/bin/python3 -m pip install thriftpy2 +# Add components (as mfn) +RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn +RUN mkdir /opt/mfn/logs + +RUN /usr/bin/python3 -m pip install redis +ADD build/redis-server.tar.gz /opt/mfn/ +ADD frontend/frontend /opt/mfn/frontend +ADD build/SandboxAgent.tar.gz /opt/mfn/ +ADD build/FunctionWorker.tar.gz /opt/mfn/ +ADD build/LoggingService.tar.gz /opt/mfn/ + +# CUDA 10.1 dependencies and tools to build dlib +RUN apt-get -y --no-install-recommends install libsm6 libxrender1 libxrender-dev libxext6 libglib2.0-0 git cmake +RUN apt-get install -y --no-install-recommends libnvinfer6=6.0.1-1+cuda10.1 libnvinfer-dev=6.0.1-1+cuda10.1 libnvinfer-plugin6=6.0.1-1+cuda10.1 + # Install dlib for CUDA RUN git clone https://github.com/davisking/dlib.git RUN mkdir -p /dlib/build @@ -51,19 +64,9 @@ RUN cmake --build /dlib/build RUN cd /dlib; python3 /dlib/setup.py install # Install the face recognition package and tensorflow -#RUN pip3 install face_recognition -#RUN pip3 install tensorflow==2.1.0 - -# Add components (as mfn) -RUN groupadd -o -g 1000 -r mfn && useradd -d /opt/mfn -u 1000 -m -r -g mfn mfn -RUN mkdir /opt/mfn/logs +#RUN /usr/bin/python3 -m pip install face_recognition +#RUN /usr/bin/python3 -m pip install tensorflow==2.1.0 -RUN /usr/bin/python3 -m pip install redis -ADD build/redis-server.tar.gz /opt/mfn/ -ADD frontend/frontend /opt/mfn/frontend -ADD build/SandboxAgent.tar.gz /opt/mfn/ -ADD build/FunctionWorker.tar.gz /opt/mfn/ -ADD build/LoggingService.tar.gz /opt/mfn/ RUN chown mfn:mfn -R /opt/mfn USER mfn From 3647851111f0124091f30425d5a8fa3357b5ad48 Mon Sep 17 00:00:00 2001 From: Istemi Ekin Akkus Date: Fri, 7 May 2021 12:01:51 +0000 Subject: [PATCH 58/64] ansible: fix available hosts script --- deploy/ansible/scripts/available_hosts.py | 61 ++++++++++++++--------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/deploy/ansible/scripts/available_hosts.py b/deploy/ansible/scripts/available_hosts.py index 8171f776..375086ef 100755 --- a/deploy/ansible/scripts/available_hosts.py +++ b/deploy/ansible/scripts/available_hosts.py @@ -67,7 +67,7 @@ def add_host(hostname,hostip=None): hasGPU = False # get environment of current hostname if os.environ['KNIX_node_hasGPU'] == "True": - print("found GPU Environent: " +str(os.environ['KNIX_node_hasGPU']) ) + print("found GPU environment: " +str(os.environ['KNIX_node_hasGPU']) ) hasGPU = True v = dl_get("available_hosts") @@ -76,17 +76,27 @@ def add_host(hostname,hostip=None): print("existing hosts: " + str(hosts)) if isinstance(hosts,list): hosts = {host: socket.gethostbyname(host) for host in hosts} - - hosts['has_gpu'] = hasGPU - else: hosts = {} - if hostname != None and hostname not in hosts: - hosts[hostname] = hostip - if hasGPU == True: - hosts["has_gpu"] = hasGPU - v.encoded_data = json.dumps(hosts).encode() - v.store() + + cur_entry2 = {} + + if hostname is not None and hostname in hosts: + cur_entry = hosts[hostname] + if isinstance(cur_entry, str): + hostip = cur_entry + del hosts[hostname] + elif isinstance(cur_entry, dict): + cur_entry2 = cur_entry + + cur_entry2["ip"] = hostip + cur_entry2["has_gpu"] = hasGPU + + hosts[hostname] = cur_entry2 + + v.encoded_data = json.dumps(hosts).encode() + v.store() + print("found hosts: " + str(hosts)) return hosts @@ -100,10 +110,13 @@ def remove_host(hostname): hosts = {host: socket.gethostbyname(host) for host in hosts} else: hosts = {} + if hostname != None and hostname in hosts: del hosts[hostname] v.encoded_data = json.dumps(hosts).encode() v.store() + + print("found hosts: " + str(hosts)) return hosts @@ -114,22 +127,24 @@ def remove_host(hostname): workflowid = "Management" hosts = [] set_bucket_name(sandboxid,workflowid) - try: - host=defaulthost - if len(sys.argv) > 2: - host = sys.argv[2] - if sys.argv[1] == "add": - hosts = add_host(host) - elif sys.argv[1] == "remove": - hosts = remove_host(host) - else: - raise Exception() - except Exception as e: - print(e) + + host=defaulthost + if len(sys.argv) > 2: + host = sys.argv[2] + + if len(sys.argv) <= 1: print("usage: python "+sys.argv[0]+" [add|remove] ()") print(" optional defaults to %s" % defaulthost) + sys.exit(1) + + if sys.argv[1] == "add": + hosts = add_host(host) + elif sys.argv[1] == "remove": + hosts = remove_host(host) + else: v = dl_get("available_hosts") if v.encoded_data is not None and len(v.encoded_data) > 0: hosts = json.loads((v.encoded_data).decode()) - print("Current available_hosts=" + str(hosts)) + + print("Current available_hosts=" + str(hosts)) From 3187c4945fe2990d84ef1215ff91aafcc16ea7b9 Mon Sep 17 00:00:00 2001 From: Istemi Ekin Akkus <5419814+iakkus@users.noreply.github.com> Date: Fri, 7 May 2021 14:48:08 +0200 Subject: [PATCH 59/64] management: fix deployWorkflow for bare metal with gpu hosts --- ManagementService/python/deployWorkflow.py | 63 ++++++++++------------ 1 file changed, 27 insertions(+), 36 deletions(-) diff --git a/ManagementService/python/deployWorkflow.py b/ManagementService/python/deployWorkflow.py index 3a9c7e04..1c6bd12d 100644 --- a/ManagementService/python/deployWorkflow.py +++ b/ManagementService/python/deployWorkflow.py @@ -642,14 +642,13 @@ def handle(value, sapi): status = "failed" else: # We're running BARE METAL mode - # _XXX_: due to the queue service still being in java in the sandbox print("gpu_usage before decision:" + str(gpu_usage)) - if gpu_usage == 0: - sandbox_image_name = "microfn/sandbox" # default value - elif gpu_usage > 0: + if gpu_usage > 0: sandbox_image_name = "microfn/sandbox_gpu" # sandbox uses GPU - if any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map): + elif any(resource_info_map[res_name]["runtime"] == "Java" for res_name in resource_info_map): sandbox_image_name = "microfn/sandbox_java" + else: + sandbox_image_name = "microfn/sandbox" # default value # TODO: intelligence on how to pick hosts hosts = sapi.get("available_hosts", True) @@ -657,38 +656,33 @@ def handle(value, sapi): print("available_hosts: " + hosts) hosts = json.loads(hosts) + deployed_hosts = {} if hosts is not None and hosts != "" and isinstance(hosts,dict): host_has_gpu = False - deployed_hosts = {} gpu_hosts = {} - picked_hosts = {} + picked_hosts = None plain_hosts={} - hostname_t = "" for hostname in hosts: # individual host dict - nodeHasGPU = hosts["has_gpu"] # check if host has a GPU - if hostname != "has_gpu": # skip this key - hostname_t = hostname - #print("current hostnae: " + str(hostname) + str(hosts[hostname])) - hostip = hosts[hostname] - plain_hosts[hostname] = hostip # add to general hosts - if nodeHasGPU: - gpu_hosts[hostname] = hostip # add to GPU hosts - hostname = hostname_t + host_has_gpu = hosts[hostname]["has_gpu"] # check if host has a GPU + hostip = hosts[hostname]["ip"] + plain_hosts[hostname] = hostip # add to general hosts + if host_has_gpu: + gpu_hosts[hostname] = hostip # add to GPU hosts # instruct hosts to start the sandbox and deploy workflow print("selected host:" + str(hostname) + " " + str(hostip)) - #print("calulated host:" + str(gpu_hosts) + " " + str(plain_hosts)) - if sandbox_image_name == "microfn/sandbox" or sandbox_image_name=="microfn/sandbox_java": # can use any host - picked_hosts = plain_hosts - #hosts["has_gpu"] = False - #print("picked_hosts: " + str(picked_hosts)) - elif len(gpu_hosts) > 0: + #print("founds hosts:" + str(gpu_hosts) + " " + str(plain_hosts)) + if sandbox_image_name == "microfn/sandbox_gpu" and gpu_hosts: picked_hosts = gpu_hosts - else: - picked_hosts = plain_hosts # fallback as there are no gpu hosts available - print("available GPU hosts list is empty. Deploying on general purpose host") + elif sandbox_image_name == "microfn/sandbox_gpu": + # can't deploy; no gpu hosts available. + picked_hosts = {} + elif sandbox_image_name == "microfn/sandbox" or sandbox_image_name=="microfn/sandbox_java": # can use any host + picked_hosts = plain_hosts + + print("picked_hosts: " + str(picked_hosts)) for hostname in picked_hosts: # loop over all hosts, need to pich gpu hosts for python/gpu workflows - hostip = hosts[hostname] + hostip = hosts[hostname]["ip"] host_to_deploy = (hostname, hostip) print("host_to_deploy: " + str(host_to_deploy) ) #host_to_deploy = ("userslfu99", "192.168.8.99") @@ -713,17 +707,14 @@ def handle(value, sapi): sapi.putMapEntry(workflow_info["workflowId"] + "_sandbox_status_map", endpoint_key, json.dumps(sbinfo), is_private=True) #endpoints = sapi.retrieveMap(workflow_info["workflowId"] + "_workflow_endpoints", True) #sapi.log(str(endpoints)) - elif hosts is not None and hosts != "" and isinstance(hosts,list): - print("hosts is not dict type!") - - if not bool(deployed_hosts): - status = "failed" - else: - #sapi.log("deployed on hosts: " + json.dumps(deployed_hosts)) - sapi.put(email + "_workflow_hosts_" + workflow["id"], json.dumps(deployed_hosts), True) else: - print("available_hosts is empty. Not deploying") + print("available_hosts is empty or not a dictionary; not deploying...") + + if not bool(deployed_hosts): status = "failed" + else: + #sapi.log("deployed on hosts: " + json.dumps(deployed_hosts)) + sapi.put(email + "_workflow_hosts_" + workflow["id"], json.dumps(deployed_hosts), True) # Update workflow status wfmeta["status"] = status From 700e29818b215bea4bd87d35245978cafc1b6b9b Mon Sep 17 00:00:00 2001 From: Istemi Ekin Akkus <5419814+iakkus@users.noreply.github.com> Date: Fri, 14 May 2021 10:13:20 +0200 Subject: [PATCH 60/64] update ansible readme; fixes #117 --- deploy/ansible/README.md | 75 ++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/deploy/ansible/README.md b/deploy/ansible/README.md index b948d800..93f2a78f 100644 --- a/deploy/ansible/README.md +++ b/deploy/ansible/README.md @@ -1,5 +1,5 @@