Skip to content

Dev/mip 802/svm impl scikit #1414

Dev/mip 802/svm impl scikit

Dev/mip 802/svm impl scikit #1414

Workflow file for this run

name: Production Env Tests
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
run_tests:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v3
- name: Set up python
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: 1.3.2 # TODO https://github.com/pgjones/hypercorn/issues/102
virtualenvs-create: true
virtualenvs-in-project: true
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ hashFiles('poetry.lock') }}
- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --no-root
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Load MONETDB cached image
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache/monetdb
key: ${{ runner.os }}-buildx-3-monetdb-${{hashFiles('monetdb/**')}}-${{ hashFiles('exareme2/udfgen/udfio.py')}}
restore-keys: |
${{ runner.os }}-buildx-3-monetdb-
- name: Build MONETDB docker image
uses: docker/build-push-action@v3
with:
context: .
file: monetdb/Dockerfile
push: false
load: true
tags: madgik/exareme2_db:dev
cache-from: type=local,src=/tmp/.buildx-cache/monetdb
cache-to: type=local,dest=/tmp/.buildx-cache-new/monetdb
- name: Load MIPDB container cached image
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache/mipdb
key: ${{ runner.os }}-buildx-mipdb-${{hashFiles('mipdb/**')}}
restore-keys: |
${{ runner.os }}-buildx-mipdb-
- name: Build MIPDB container docker image
uses: docker/build-push-action@v3
with:
context: .
file: mipdb/Dockerfile
push: false
load: true
tags: madgik/exareme2_mipdb:dev
cache-from: type=local,src=/tmp/.buildx-cache/mipdb
cache-to: type=local,dest=/tmp/.buildx-cache-new/mipdb
- name: Load RABBITMQ cached image
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache/rabbitmq
key: ${{ runner.os }}-buildx-rabbitmq-${{hashFiles( 'rabbitmq/**' )}}
restore-keys: |
${{ runner.os }}-buildx-rabbitmq-
- name: Build RABBITMQ docker image
uses: docker/build-push-action@v3
with:
context: .
file: rabbitmq/Dockerfile
push: false
load: true
tags: madgik/exareme2_rabbitmq:dev
cache-from: type=local,src=/tmp/.buildx-cache/rabbitmq
cache-to: type=local,dest=/tmp/.buildx-cache-new/rabbitmq
- name: Load NODE service cached image
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache/node
key: ${{ runner.os }}-buildx-node-${{hashFiles('exareme2/**')}}
restore-keys: |
${{ runner.os }}-buildx-node-
- name: Build NODE service docker image
uses: docker/build-push-action@v3
with:
context: .
file: exareme2/node/Dockerfile
push: false
load: true
tags: madgik/exareme2_node:dev
cache-from: type=local,src=/tmp/.buildx-cache/node
cache-to: type=local,dest=/tmp/.buildx-cache-new/node
- name: Load CONTROLLER service cached image
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache/controller
key: ${{ runner.os }}-buildx-controller-${{hashFiles('exareme2/**')}}
restore-keys: |
${{ runner.os }}-buildx-controller-
- name: Build CONTROLLER service docker image
uses: docker/build-push-action@v3
with:
context: .
file: exareme2/controller/Dockerfile
push: false
load: true
tags: madgik/exareme2_controller:dev
cache-from: type=local,src=/tmp/.buildx-cache/controller
cache-to: type=local,dest=/tmp/.buildx-cache-new/controller
# Temp fix
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Move Docker images cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Create k8s Kind Cluster
uses: helm/[email protected]
with:
cluster_name: kind
config: tests/prod_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml
- name: Install Helm
uses: azure/setup-helm@v3
with:
version: 3.9.1
id: install
- name: Taint Nodes
run: |
kubectl taint nodes master node-role.kubernetes.io/control-plane-
kubectl label node master master=true
kubectl label node localnode1 worker=true
kubectl label node localnode2 worker=true
kubectl label node localnode3 worker=true
- name: Get container disk space
run: df -h
- name: Free up space, by removing dotnet, android and haskell unused libs.
run: |
rm -rf /usr/share/dotnet
rm -rf /opt/ghc
sudo rm -rf /usr/local/lib/android
- name: Get container disk space
run: df -h
- name: Load docker images to kind containers and delete them locally
run: |
kind load docker-image madgik/exareme2_node:dev
docker image rm madgik/exareme2_node:dev
kind load docker-image madgik/exareme2_controller:dev --nodes kind-control-plane
docker image rm madgik/exareme2_controller:dev
kind load docker-image madgik/exareme2_db:dev
docker image rm madgik/exareme2_db:dev
kind load docker-image madgik/exareme2_mipdb:dev
docker image rm madgik/exareme2_mipdb:dev
kind load docker-image madgik/exareme2_rabbitmq:dev
docker image rm madgik/exareme2_rabbitmq:dev
- name: Get container disk space
run: df -h
- name: Copy prod_env_tests values.yaml
run: cp -r tests/prod_env_tests/deployment_configs/kubernetes_values.yaml kubernetes/values.yaml
- name: Print Helm Templates
run: helm template kubernetes/
- name: Deploy Helm
run: helm install exareme2 kubernetes/ --debug
- name: Wait for MONETDB container to start
uses: jakejarvis/wait-action@master
with:
time: '60s' #https://team-1617704806227.atlassian.net/browse/MIP-248
- name: Initialize MONETDB from mipdb container
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb init --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb init --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb init --ip $DB_IP --port $DB_PORT'
- name: Load dementia data model into localnodes
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/dementia_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/dementia_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/dementia_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
- name: Load dementia dataset csvs with suffix '0' into localnode 1
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd0.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi0.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata0.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
- name: Load dementia dataset csvs with suffix '1,3,5,7,9' into localnode 2
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd1.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd3.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd5.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd7.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd9.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi1.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi3.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi5.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi7.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi9.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata1.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata3.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata5.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata7.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata9.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
- name: Load dementia datasets csvs with suffix '2,4,6,8' into localnode 3
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd2.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd4.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd6.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd8.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi2.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi4.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi6.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi8.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata2.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata4.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata6.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata8.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
- name: Load tbi data model into localnodes
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/tbi_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/tbi_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/tbi_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
- name: Load tbi dataset csvs with suffix '0' into localnode 1
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi0.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
- name: Load tbi dataset csvs with suffix '1,3,5,7,9' into localnode 2
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi1.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi3.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi5.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi7.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi9.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
- name: Load tbi datasets csvs with suffix '2,4,6,8' into localnode 3
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi2.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi4.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi6.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi8.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
- name: Load longitudinal dementia data model into localnodes
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/longitudinal_dementia_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/longitudinal_dementia_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/longitudinal_dementia_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
- name: Load longitudinal dementia datasets csvs into localnodes
run: |
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/longitudinal_dementia_v_0_1/longitudinal_dementia0.csv -d longitudinal_dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/longitudinal_dementia_v_0_1/longitudinal_dementia1.csv -d longitudinal_dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[2].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/longitudinal_dementia_v_0_1/longitudinal_dementia2.csv -d longitudinal_dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
- name: Wait for all services to start
uses: jakejarvis/wait-action@master
with:
time: '60s' #https://team-1617704806227.atlassian.net/browse/MIP-248
- name: Controller logs
run: kubectl logs -l app=exareme2-controller --tail -1
- name: Globalnode logs
run: kubectl logs -l nodeType=globalnode -c node --tail -1
- name: Localnode logs
run: kubectl logs -l nodeType=localnode -c node --tail -1
- name: Controller logs (post run)
uses: webiny/[email protected]
with:
run: kubectl logs -l app=exareme2-controller --tail -1
- name: Globalnode logs (post run)
uses: webiny/[email protected]
with:
run: kubectl logs -l nodeType=globalnode -c node --tail -1
- name: Localnode logs (post run)
uses: webiny/[email protected]
with:
run: kubectl logs -l nodeType=localnode -c node --tail -1
- name: Run production env tests
run: poetry run pytest tests/prod_env_tests --verbosity=4