From d7dac43d38671dfbc9f1d0ff3108232cb983430e Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 8 Apr 2022 14:59:51 -0500 Subject: [PATCH 001/106] chore(cluster-autoscaler): Added EKS 1.21 support for the cluster autoscaler (#1885) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-autoscaler.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gen3/bin/kube-setup-autoscaler.sh b/gen3/bin/kube-setup-autoscaler.sh index 1b5a0e0fc..01a6cdd95 100644 --- a/gen3/bin/kube-setup-autoscaler.sh +++ b/gen3/bin/kube-setup-autoscaler.sh @@ -30,6 +30,9 @@ function get_autoscaler_version(){ local casv case ${k8s_version} in + "1.21+") + casv="v1.21.2" + ;; "1.20+") casv="v1.20.0" ;; From 2cc9b0be6c0c52ed50686de0d74ee03f79b1646f Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Fri, 8 Apr 2022 15:00:29 -0500 Subject: [PATCH 002/106] feat(cohort-middleware): increase resources (#1894) --- .../cohort-middleware/cohort-middleware-deploy.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index 204381ebb..c7395ad1c 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -99,8 +99,8 @@ spec: imagePullPolicy: Always resources: requests: - cpu: "0.5" - memory: 256Mi + cpu: 500m + memory: 4Gi limits: - cpu: "0.5" - memory: 256Mi + cpu: 500m + memory: 4Gi From be0670e15ceb8ffcec184c7f1c884fa330a5185a Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 11 Apr 2022 10:23:09 -0500 Subject: [PATCH 003/106] Update values.yaml (#1897) --- kube/services/datadog/values.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 7cada86cc..03cd70037 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -250,6 +250,18 @@ agents: ## get guaranteed delivery of the metrics in Datadog-per-namespace setup! # enabled: true + + # agents.tolerations -- Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6) + tolerations: + - effect: NoSchedule + key: role + operator: Equal + value: workflow + - effect: NoSchedule + key: role + operator: Equal + value: jupyter + ## Define the Datadog image to work with image: From 31e7d5c6e63b9386aeadb4eca7f35b30a52198fb Mon Sep 17 00:00:00 2001 From: rnerella92 <82972483+rnerella92@users.noreply.github.com> Date: Mon, 11 Apr 2022 11:35:47 -0500 Subject: [PATCH 004/106] Bringing DICOM Viewer to cloud automation (#1839) --- .github/workflows/image_build_push.yaml | 11 + .secrets.baseline | 325 +----------------- gen3/bin/kube-roll-all.sh | 12 + gen3/bin/kube-setup-dicom-server.sh | 61 ++++ gen3/bin/kube-setup-dicom-viewer.sh | 9 + .../dicom-server/dicom-server-deploy.yaml | 34 ++ .../dicom-server/dicom-server-service.yaml | 14 + .../dicom-viewer/dicom-viewer-deploy.yaml | 26 ++ .../dicom-viewer/dicom-viewer-service.yaml | 15 + .../gen3.nginx.conf/dicom-server-service.conf | 10 + .../gen3.nginx.conf/dicom-viewer-service.conf | 10 + 11 files changed, 212 insertions(+), 315 deletions(-) create mode 100644 gen3/bin/kube-setup-dicom-server.sh create mode 100644 gen3/bin/kube-setup-dicom-viewer.sh create mode 100644 kube/services/dicom-server/dicom-server-deploy.yaml create mode 100644 kube/services/dicom-server/dicom-server-service.yaml create mode 100644 kube/services/dicom-viewer/dicom-viewer-deploy.yaml create mode 100644 kube/services/dicom-viewer/dicom-viewer-service.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/dicom-server-service.conf create mode 100644 kube/services/revproxy/gen3.nginx.conf/dicom-viewer-service.conf diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml index 46bcfe884..51543f0fe 100644 --- a/.github/workflows/image_build_push.yaml +++ b/.github/workflows/image_build_push.yaml @@ -29,3 +29,14 @@ jobs: ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + awshelper: + name: AwsHelper Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" + OVERRIDE_REPO_NAME: "awshelper" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.secrets.baseline b/.secrets.baseline index bb402e439..5bb288384 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1,9 +1,9 @@ { "exclude": { - "files": "^.secrets.baseline$|^./.secrets.baseline$", + "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-03-29T17:29:39Z", + "generated_at": "2022-04-07T20:39:12Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -61,14 +61,12 @@ "Chef/repo/data_bags/README.md": [ { "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366", - "is_secret": false, "is_verified": false, "line_number": 45, "type": "Secret Keyword" }, { "hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee", - "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -77,7 +75,6 @@ "Docker/Jenkins-CI-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 124, "type": "Secret Keyword" @@ -86,7 +83,6 @@ "Docker/Jenkins-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 132, "type": "Secret Keyword" @@ -95,7 +91,6 @@ "Docker/Jenkins/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 113, "type": "Secret Keyword" @@ -104,7 +99,6 @@ "Docker/sidecar/service.key": [ { "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", - "is_secret": false, "is_verified": false, "line_number": 1, "type": "Private Key" @@ -113,7 +107,6 @@ "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ { "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -122,7 +115,6 @@ "ansible/roles/awslogs/defaults/main.yaml": [ { "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Basic Auth Credentials" @@ -131,14 +123,12 @@ "ansible/roles/slurm/README.md": [ { "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Base64 High Entropy String" }, { "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Secret Keyword" @@ -147,7 +137,6 @@ "apis_configs/config_helper.py": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, "is_verified": false, "line_number": 66, "type": "Basic Auth Credentials" @@ -156,7 +145,6 @@ "apis_configs/fence_credentials.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 23, "type": "Secret Keyword" @@ -165,21 +153,18 @@ "apis_configs/fence_settings.py": [ { "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Basic Auth Credentials" }, { "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", - "is_secret": false, "is_verified": false, "line_number": 58, "type": "Secret Keyword" }, { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 80, "type": "Basic Auth Credentials" @@ -188,7 +173,6 @@ "apis_configs/indexd_settings.py": [ { "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc", - "is_secret": false, "is_verified": false, "line_number": 59, "type": "Basic Auth Credentials" @@ -197,7 +181,6 @@ "apis_configs/peregrine_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -206,7 +189,6 @@ "apis_configs/sheepdog_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -215,7 +197,6 @@ "doc/Gen3-data-upload.md": [ { "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -224,7 +205,6 @@ "doc/api.md": [ { "hashed_secret": "625de83a7517422051911680cc803921ff99db90", - "is_secret": false, "is_verified": false, "line_number": 47, "type": "Hex High Entropy String" @@ -233,28 +213,24 @@ "doc/gen3OnK8s.md": [ { "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069", - "is_secret": false, "is_verified": false, "line_number": 113, "type": "Secret Keyword" }, { "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2", - "is_secret": false, "is_verified": false, "line_number": 143, "type": "Secret Keyword" }, { "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48", - "is_secret": false, "is_verified": false, "line_number": 170, "type": "Secret Keyword" }, { "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce", - "is_secret": false, "is_verified": false, "line_number": 189, "type": "Secret Keyword" @@ -263,7 +239,6 @@ "doc/kube-setup-data-ingestion-job.md": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Secret Keyword" @@ -272,7 +247,6 @@ "doc/logs.md": [ { "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Secret Keyword" @@ -281,7 +255,6 @@ "doc/slurm_cluster.md": [ { "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6", - "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" @@ -290,14 +263,12 @@ "files/dashboard/usage-reports/package-lock.json": [ { "hashed_secret": "65ecd0650541b6caecdb6986f1871c2e6a95bdfe", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "e35a49e53bb97044b35cc0e4d963b4ac49e9ac7e", - "is_secret": false, "is_verified": false, "line_number": 15, "type": "Base64 High Entropy String" @@ -306,14 +277,12 @@ "gen3/bin/api.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 407, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 477, "type": "Secret Keyword" @@ -322,7 +291,6 @@ "gen3/bin/kube-dev-namespace.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 135, "type": "Secret Keyword" @@ -331,7 +299,6 @@ "gen3/bin/kube-setup-argo.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 182, "type": "Secret Keyword" @@ -340,7 +307,6 @@ "gen3/bin/kube-setup-certs.sh": [ { "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", - "is_secret": false, "is_verified": false, "line_number": 50, "type": "Secret Keyword" @@ -349,14 +315,12 @@ "gen3/bin/kube-setup-dashboard.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 40, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 41, "type": "Secret Keyword" @@ -365,30 +329,34 @@ "gen3/bin/kube-setup-data-ingestion-job.sh": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" }, { "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c", - "is_secret": false, "is_verified": false, "line_number": 102, "type": "Secret Keyword" } ], + "gen3/bin/kube-setup-dicom-server.sh": [ + { + "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_verified": false, + "line_number": 43, + "type": "Secret Keyword" + } + ], "gen3/bin/kube-setup-jenkins.sh": [ { "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 22, "type": "Secret Keyword" @@ -397,7 +365,6 @@ "gen3/bin/kube-setup-metadata.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 35, "type": "Secret Keyword" @@ -406,21 +373,18 @@ "gen3/bin/kube-setup-revproxy.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 32, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 49, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -429,21 +393,18 @@ "gen3/bin/kube-setup-secrets.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 79, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 82, "type": "Secret Keyword" }, { "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8", - "is_secret": false, "is_verified": false, "line_number": 95, "type": "Secret Keyword" @@ -452,14 +413,12 @@ "gen3/bin/kube-setup-sftp.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 36, "type": "Secret Keyword" }, { "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4", - "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -468,7 +427,6 @@ "gen3/bin/kube-setup-sheepdog.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Secret Keyword" @@ -477,28 +435,24 @@ "gen3/bin/kube-setup-sower-jobs.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 120, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 122, "type": "Secret Keyword" @@ -507,21 +461,18 @@ "gen3/bin/kube-setup-ssjdispatcher.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 117, "type": "Secret Keyword" }, { "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93", - "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 197, "type": "Secret Keyword" @@ -530,14 +481,12 @@ "gen3/lib/aws.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 550, "type": "Secret Keyword" }, { "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", - "is_secret": false, "is_verified": false, "line_number": 570, "type": "Secret Keyword" @@ -546,14 +495,12 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ { "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Basic Auth Credentials" }, { "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db", - "is_secret": false, "is_verified": false, "line_number": 286, "type": "Secret Keyword" @@ -562,7 +509,6 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -571,7 +517,6 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -580,7 +525,6 @@ "gen3/lib/logs/utils.sh": [ { "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5", - "is_secret": false, "is_verified": false, "line_number": 3, "type": "Secret Keyword" @@ -589,7 +533,6 @@ "gen3/lib/manifestDefaults/hatchery/hatchery.json": [ { "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039", - "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -598,14 +541,12 @@ "gen3/lib/onprem.sh": [ { "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", - "is_secret": false, "is_verified": false, "line_number": 68, "type": "Secret Keyword" }, { "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", - "is_secret": false, "is_verified": false, "line_number": 84, "type": "Secret Keyword" @@ -614,14 +555,12 @@ "gen3/lib/secrets/rotate-postgres.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 250, "type": "Secret Keyword" @@ -630,49 +569,42 @@ "gen3/lib/testData/etlconvert/expected2.yaml": [ { "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", - "is_secret": false, "is_verified": false, "line_number": 13, "type": "Base64 High Entropy String" }, { "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", - "is_secret": false, "is_verified": false, "line_number": 16, "type": "Base64 High Entropy String" }, { "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Base64 High Entropy String" }, { "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", - "is_secret": false, "is_verified": false, "line_number": 35, "type": "Base64 High Entropy String" }, { "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", - "is_secret": false, "is_verified": false, "line_number": 36, "type": "Base64 High Entropy String" @@ -681,7 +613,6 @@ "gen3/test/secretsTest.sh": [ { "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", - "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" @@ -690,28 +621,24 @@ "gen3/test/terraformTest.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 156, "type": "Secret Keyword" }, { "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", - "is_secret": false, "is_verified": false, "line_number": 172, "type": "Base64 High Entropy String" }, { "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", - "is_secret": false, "is_verified": false, "line_number": 175, "type": "Base64 High Entropy String" }, { "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c", - "is_secret": false, "is_verified": false, "line_number": 175, "type": "Secret Keyword" @@ -720,7 +647,6 @@ "kube/services/datadog/values.yaml": [ { "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661", - "is_secret": false, "is_verified": false, "line_number": 7, "type": "Secret Keyword" @@ -729,392 +655,336 @@ "kube/services/fenceshib/fenceshib-configmap.yaml": [ { "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c", - "is_secret": false, "is_verified": false, "line_number": 375, "type": "Base64 High Entropy String" }, { "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f", - "is_secret": false, "is_verified": false, "line_number": 376, "type": "Base64 High Entropy String" }, { "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b", - "is_secret": false, "is_verified": false, "line_number": 377, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5", - "is_secret": false, "is_verified": false, "line_number": 378, "type": "Base64 High Entropy String" }, { "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a", - "is_secret": false, "is_verified": false, "line_number": 379, "type": "Base64 High Entropy String" }, { "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9", - "is_secret": false, "is_verified": false, "line_number": 380, "type": "Base64 High Entropy String" }, { "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12", - "is_secret": false, "is_verified": false, "line_number": 381, "type": "Base64 High Entropy String" }, { "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7", - "is_secret": false, "is_verified": false, "line_number": 382, "type": "Base64 High Entropy String" }, { "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a", - "is_secret": false, "is_verified": false, "line_number": 383, "type": "Base64 High Entropy String" }, { "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3", - "is_secret": false, "is_verified": false, "line_number": 384, "type": "Base64 High Entropy String" }, { "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c", - "is_secret": false, "is_verified": false, "line_number": 385, "type": "Base64 High Entropy String" }, { "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52", - "is_secret": false, "is_verified": false, "line_number": 386, "type": "Base64 High Entropy String" }, { "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110", - "is_secret": false, "is_verified": false, "line_number": 387, "type": "Base64 High Entropy String" }, { "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7", - "is_secret": false, "is_verified": false, "line_number": 388, "type": "Base64 High Entropy String" }, { "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e", - "is_secret": false, "is_verified": false, "line_number": 389, "type": "Base64 High Entropy String" }, { "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1", - "is_secret": false, "is_verified": false, "line_number": 390, "type": "Base64 High Entropy String" }, { "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53", - "is_secret": false, "is_verified": false, "line_number": 391, "type": "Base64 High Entropy String" }, { "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a", - "is_secret": false, "is_verified": false, "line_number": 392, "type": "Base64 High Entropy String" }, { "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294", - "is_secret": false, "is_verified": false, "line_number": 393, "type": "Base64 High Entropy String" }, { "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41", - "is_secret": false, "is_verified": false, "line_number": 394, "type": "Base64 High Entropy String" }, { "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1", - "is_secret": false, "is_verified": false, "line_number": 395, "type": "Base64 High Entropy String" }, { "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb", - "is_secret": false, "is_verified": false, "line_number": 396, "type": "Base64 High Entropy String" }, { "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433", - "is_secret": false, "is_verified": false, "line_number": 397, "type": "Base64 High Entropy String" }, { "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a", - "is_secret": false, "is_verified": false, "line_number": 398, "type": "Base64 High Entropy String" }, { "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2", - "is_secret": false, "is_verified": false, "line_number": 399, "type": "Base64 High Entropy String" }, { "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165", - "is_secret": false, "is_verified": false, "line_number": 419, "type": "Base64 High Entropy String" }, { "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9", - "is_secret": false, "is_verified": false, "line_number": 420, "type": "Base64 High Entropy String" }, { "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f", - "is_secret": false, "is_verified": false, "line_number": 423, "type": "Base64 High Entropy String" }, { "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7", - "is_secret": false, "is_verified": false, "line_number": 424, "type": "Base64 High Entropy String" }, { "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577", - "is_secret": false, "is_verified": false, "line_number": 425, "type": "Base64 High Entropy String" }, { "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4", - "is_secret": false, "is_verified": false, "line_number": 426, "type": "Base64 High Entropy String" }, { "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92", - "is_secret": false, "is_verified": false, "line_number": 427, "type": "Base64 High Entropy String" }, { "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09", - "is_secret": false, "is_verified": false, "line_number": 428, "type": "Base64 High Entropy String" }, { "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084", - "is_secret": false, "is_verified": false, "line_number": 429, "type": "Base64 High Entropy String" }, { "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a", - "is_secret": false, "is_verified": false, "line_number": 430, "type": "Base64 High Entropy String" }, { "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5", - "is_secret": false, "is_verified": false, "line_number": 431, "type": "Base64 High Entropy String" }, { "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0", - "is_secret": false, "is_verified": false, "line_number": 432, "type": "Base64 High Entropy String" }, { "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3", - "is_secret": false, "is_verified": false, "line_number": 433, "type": "Base64 High Entropy String" }, { "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b", - "is_secret": false, "is_verified": false, "line_number": 434, "type": "Base64 High Entropy String" }, { "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708", - "is_secret": false, "is_verified": false, "line_number": 435, "type": "Base64 High Entropy String" }, { "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72", - "is_secret": false, "is_verified": false, "line_number": 436, "type": "Base64 High Entropy String" }, { "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5", - "is_secret": false, "is_verified": false, "line_number": 437, "type": "Base64 High Entropy String" }, { "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f", - "is_secret": false, "is_verified": false, "line_number": 439, "type": "Base64 High Entropy String" }, { "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529", - "is_secret": false, "is_verified": false, "line_number": 440, "type": "Base64 High Entropy String" }, { "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb", - "is_secret": false, "is_verified": false, "line_number": 441, "type": "Base64 High Entropy String" }, { "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a", - "is_secret": false, "is_verified": false, "line_number": 442, "type": "Base64 High Entropy String" }, { "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688", - "is_secret": false, "is_verified": false, "line_number": 443, "type": "Base64 High Entropy String" }, { "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58", - "is_secret": false, "is_verified": false, "line_number": 444, "type": "Base64 High Entropy String" }, { "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e", - "is_secret": false, "is_verified": false, "line_number": 445, "type": "Base64 High Entropy String" }, { "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46", - "is_secret": false, "is_verified": false, "line_number": 446, "type": "Base64 High Entropy String" }, { "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f", - "is_secret": false, "is_verified": false, "line_number": 448, "type": "Base64 High Entropy String" }, { "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829", - "is_secret": false, "is_verified": false, "line_number": 449, "type": "Base64 High Entropy String" }, { "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41", - "is_secret": false, "is_verified": false, "line_number": 450, "type": "Base64 High Entropy String" }, { "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b", - "is_secret": false, "is_verified": false, "line_number": 451, "type": "Base64 High Entropy String" }, { "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903", - "is_secret": false, "is_verified": false, "line_number": 452, "type": "Base64 High Entropy String" }, { "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02", - "is_secret": false, "is_verified": false, "line_number": 453, "type": "Base64 High Entropy String" @@ -1123,7 +993,6 @@ "kube/services/jobs/indexd-authz-job.yaml": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, "is_verified": false, "line_number": 70, "type": "Basic Auth Credentials" @@ -1132,14 +1001,12 @@ "kube/services/monitoring/grafana-values.yaml": [ { "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385", - "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4", - "is_secret": false, "is_verified": false, "line_number": 166, "type": "Secret Keyword" @@ -1148,14 +1015,12 @@ "kube/services/ohdsi-atlas/README.md": [ { "hashed_secret": "6e71f9f2b1e96de5a712f899ed26477ebc260a73", - "is_secret": false, "is_verified": false, "line_number": 105, "type": "Secret Keyword" }, { "hashed_secret": "317b889ca9fa8789dc1b85714568b1bdf2c7baf3", - "is_secret": false, "is_verified": false, "line_number": 108, "type": "Secret Keyword" @@ -1164,7 +1029,6 @@ "kube/services/revproxy/helpers.js": [ { "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" @@ -1173,7 +1037,6 @@ "kube/services/revproxy/helpersTest.js": [ { "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", - "is_secret": false, "is_verified": false, "line_number": 22, "type": "JSON Web Token" @@ -1182,924 +1045,792 @@ "package-lock.json": [ { "hashed_secret": "c95b6bc99445e7ed9177040f5ef94d0cdb38fb21", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "a896da46c897d3a0d007843006621f78dbcabf51", - "is_secret": false, "is_verified": false, "line_number": 19, "type": "Base64 High Entropy String" }, { "hashed_secret": "84b662fc9a2a275f90d0afafe6ce08a4d0928ac8", - "is_secret": false, "is_verified": false, "line_number": 28, "type": "Base64 High Entropy String" }, { "hashed_secret": "6ebe9724873357aaea25e329efb726fa61b843e7", - "is_secret": false, "is_verified": false, "line_number": 39, "type": "Base64 High Entropy String" }, { "hashed_secret": "f1dbba169db046906924ccd784068a2306096634", - "is_secret": false, "is_verified": false, "line_number": 44, "type": "Base64 High Entropy String" }, { "hashed_secret": "2c7bd6cdc39b5b8a0f32aa11988a0ec769526cdb", - "is_secret": false, "is_verified": false, "line_number": 52, "type": "Base64 High Entropy String" }, { "hashed_secret": "1addd61f68d977408128e530959437821a6d8b66", - "is_secret": false, "is_verified": false, "line_number": 57, "type": "Base64 High Entropy String" }, { "hashed_secret": "9787d966f19a0d8d0021b31d34cfdfcebdb9c28a", - "is_secret": false, "is_verified": false, "line_number": 65, "type": "Base64 High Entropy String" }, { "hashed_secret": "76693e518c3c8123e9a197821b506292322a0a95", - "is_secret": false, "is_verified": false, "line_number": 70, "type": "Base64 High Entropy String" }, { "hashed_secret": "fa83dcbf0f435ee38066d19a2a43815510f96bc4", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Base64 High Entropy String" }, { "hashed_secret": "017a7eab3d63331ecfe768927c8907a5a31888e5", - "is_secret": false, "is_verified": false, "line_number": 91, "type": "Base64 High Entropy String" }, { "hashed_secret": "92b56edda4f2906f548fe77c015490e6ba2ee4c3", - "is_secret": false, "is_verified": false, "line_number": 96, "type": "Base64 High Entropy String" }, { "hashed_secret": "936b0959aa13f1decc76be1d80acaac0860847b7", - "is_secret": false, "is_verified": false, "line_number": 101, "type": "Base64 High Entropy String" }, { "hashed_secret": "4bad86c43b7cd06efc130272d8e4de2b32636371", - "is_secret": false, "is_verified": false, "line_number": 109, "type": "Base64 High Entropy String" }, { "hashed_secret": "d11716ecfa623706b733654d78f4e7af3c117efa", - "is_secret": false, "is_verified": false, "line_number": 143, "type": "Base64 High Entropy String" }, { "hashed_secret": "0cc93dfdf4ae08bc374b99af985b25d2427f71d8", - "is_secret": false, "is_verified": false, "line_number": 148, "type": "Base64 High Entropy String" }, { "hashed_secret": "80f8d53f3fedde239f695d6a4c44c78b4aff0a44", - "is_secret": false, "is_verified": false, "line_number": 153, "type": "Base64 High Entropy String" }, { "hashed_secret": "83307cb75a4a44ba528f4a0aefcec2a8018dc6d8", - "is_secret": false, "is_verified": false, "line_number": 158, "type": "Base64 High Entropy String" }, { "hashed_secret": "c96d81662cc7919208154e7152fa0033391b7bcd", - "is_secret": false, "is_verified": false, "line_number": 166, "type": "Base64 High Entropy String" }, { "hashed_secret": "7156492f40fb2479a45780b3d2959c29b27b6374", - "is_secret": false, "is_verified": false, "line_number": 181, "type": "Base64 High Entropy String" }, { "hashed_secret": "885304335818f51938422166d361cddacfd626d0", - "is_secret": false, "is_verified": false, "line_number": 186, "type": "Base64 High Entropy String" }, { "hashed_secret": "915ca894a8ec19ffcd55555e6c8daac1fe882751", - "is_secret": false, "is_verified": false, "line_number": 191, "type": "Base64 High Entropy String" }, { "hashed_secret": "7ea379a1bf787a21401c8c39f285e4e84b478d72", - "is_secret": false, "is_verified": false, "line_number": 201, "type": "Base64 High Entropy String" }, { "hashed_secret": "8e948a3b773d1a2e4b6f4220216efa734315246d", - "is_secret": false, "is_verified": false, "line_number": 209, "type": "Base64 High Entropy String" }, { "hashed_secret": "1a321d0b0d9b6d75888ce7ae121ac222cec1eddd", - "is_secret": false, "is_verified": false, "line_number": 217, "type": "Base64 High Entropy String" }, { "hashed_secret": "1a6bfe25744ad6c6ce27c3a52dbd98c15be12a5c", - "is_secret": false, "is_verified": false, "line_number": 222, "type": "Base64 High Entropy String" }, { "hashed_secret": "04450eaacfa844f84926d04d6a07534cde99b28e", - "is_secret": false, "is_verified": false, "line_number": 227, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4c295435d09bbdfb91ced9040379166d67ccbd2", - "is_secret": false, "is_verified": false, "line_number": 232, "type": "Base64 High Entropy String" }, { "hashed_secret": "bb2bf296d6e086b471d45a26af9fd57f55289a75", - "is_secret": false, "is_verified": false, "line_number": 237, "type": "Base64 High Entropy String" }, { "hashed_secret": "9579b6a23d94d56f2f163233b716d8752e6b3bde", - "is_secret": false, "is_verified": false, "line_number": 256, "type": "Base64 High Entropy String" }, { "hashed_secret": "796925256bc0f4dc43cdfab7fbff852eace18f42", - "is_secret": false, "is_verified": false, "line_number": 287, "type": "Base64 High Entropy String" }, { "hashed_secret": "7e280af4ec2d573144d98e89ed2e1dfd817ca48f", - "is_secret": false, "is_verified": false, "line_number": 295, "type": "Base64 High Entropy String" }, { "hashed_secret": "941b3e7836a6f26d32311893ac5d9ad0a52c45ca", - "is_secret": false, "is_verified": false, "line_number": 300, "type": "Base64 High Entropy String" }, { "hashed_secret": "34743e1f7d9541c4a726b998f20baf828c694213", - "is_secret": false, "is_verified": false, "line_number": 305, "type": "Base64 High Entropy String" }, { "hashed_secret": "c4fea87bd49c4427d7215d57ada9ff3177e0c471", - "is_secret": false, "is_verified": false, "line_number": 310, "type": "Base64 High Entropy String" }, { "hashed_secret": "85324324e21d0dfbfb5248ac92fa0f289d2e25f8", - "is_secret": false, "is_verified": false, "line_number": 315, "type": "Base64 High Entropy String" }, { "hashed_secret": "19eea0e64f6a3311b04e472035df10c23f23dd0a", - "is_secret": false, "is_verified": false, "line_number": 352, "type": "Base64 High Entropy String" }, { "hashed_secret": "acce4ef8d841ffa646256da3af7b79ad5cb78158", - "is_secret": false, "is_verified": false, "line_number": 364, "type": "Base64 High Entropy String" }, { "hashed_secret": "22e7ae9b65ade417baac61e6f0d84a54783ba759", - "is_secret": false, "is_verified": false, "line_number": 369, "type": "Base64 High Entropy String" }, { "hashed_secret": "8e71b7828c7c554f05dbbabddd63301b5fc56771", - "is_secret": false, "is_verified": false, "line_number": 374, "type": "Base64 High Entropy String" }, { "hashed_secret": "fea0d9c5b0c53c41e6a0a961a49cccc170847120", - "is_secret": false, "is_verified": false, "line_number": 379, "type": "Base64 High Entropy String" }, { "hashed_secret": "ebe2160ede628e0faeac9fe70c215cd38d28d8f6", - "is_secret": false, "is_verified": false, "line_number": 384, "type": "Base64 High Entropy String" }, { "hashed_secret": "9cb2b0347722893cde39bbe83f9df7c3c6e1b7c3", - "is_secret": false, "is_verified": false, "line_number": 398, "type": "Base64 High Entropy String" }, { "hashed_secret": "344e37e02a35dd31cc7dc945b7fe7b2da88344c0", - "is_secret": false, "is_verified": false, "line_number": 403, "type": "Base64 High Entropy String" }, { "hashed_secret": "31a41817127c8d2b7b304c326b05d7319934e7a6", - "is_secret": false, "is_verified": false, "line_number": 413, "type": "Base64 High Entropy String" }, { "hashed_secret": "150852e9f1e877547306d59618a136fb535b40e3", - "is_secret": false, "is_verified": false, "line_number": 418, "type": "Base64 High Entropy String" }, { "hashed_secret": "277e32c5ba00ef90c6f76c7004fde2ecac6d2e18", - "is_secret": false, "is_verified": false, "line_number": 423, "type": "Base64 High Entropy String" }, { "hashed_secret": "b95e69c7f4328ea641952f875c3b079a1585c9d1", - "is_secret": false, "is_verified": false, "line_number": 431, "type": "Base64 High Entropy String" }, { "hashed_secret": "6b30fe731c8444c0263b57aacbdaedb771ec01a5", - "is_secret": false, "is_verified": false, "line_number": 436, "type": "Base64 High Entropy String" }, { "hashed_secret": "98eafa06e0c7e089c19e79dedf5989c3eb2f0568", - "is_secret": false, "is_verified": false, "line_number": 445, "type": "Base64 High Entropy String" }, { "hashed_secret": "bf47364c2d4ad0308ef016fe4a89f6c7dc21ef86", - "is_secret": false, "is_verified": false, "line_number": 464, "type": "Base64 High Entropy String" }, { "hashed_secret": "3e6c18abd5b90c63da0bd8b4c0d3a142e3d5a83d", - "is_secret": false, "is_verified": false, "line_number": 474, "type": "Base64 High Entropy String" }, { "hashed_secret": "209bf9cfe9000c6851cd4f94165d30ee1cd3dca1", - "is_secret": false, "is_verified": false, "line_number": 482, "type": "Base64 High Entropy String" }, { "hashed_secret": "cf09cb791688fe019284bfdc362abc41918645a5", - "is_secret": false, "is_verified": false, "line_number": 487, "type": "Base64 High Entropy String" }, { "hashed_secret": "6c1392daf02b9ba2a21c49c82508048525d5bc4b", - "is_secret": false, "is_verified": false, "line_number": 492, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4e2bf4f3a071b223da2f270d5a2348d65105d3e", - "is_secret": false, "is_verified": false, "line_number": 497, "type": "Base64 High Entropy String" }, { "hashed_secret": "98d583792218c3c06ecbcac66e5bedcdaabd63e7", - "is_secret": false, "is_verified": false, "line_number": 507, "type": "Base64 High Entropy String" }, { "hashed_secret": "575c9b4e0765ae6ab9a4f38eb1186ea361691f73", - "is_secret": false, "is_verified": false, "line_number": 514, "type": "Base64 High Entropy String" }, { "hashed_secret": "16225dde2ec301d038a0bdbda68de4a174fbfdd0", - "is_secret": false, "is_verified": false, "line_number": 519, "type": "Base64 High Entropy String" }, { "hashed_secret": "80d73b6f7e87f07e3ae70ef1e692aa9569574551", - "is_secret": false, "is_verified": false, "line_number": 524, "type": "Base64 High Entropy String" }, { "hashed_secret": "38952752ebde485c02a80bff1d81ebe95664bcca", - "is_secret": false, "is_verified": false, "line_number": 529, "type": "Base64 High Entropy String" }, { "hashed_secret": "150b60d278251f2470dd690016afe038bc1bb7f1", - "is_secret": false, "is_verified": false, "line_number": 534, "type": "Base64 High Entropy String" }, { "hashed_secret": "535582d92da3a4158e592ec29868bfd8467b8bce", - "is_secret": false, "is_verified": false, "line_number": 539, "type": "Base64 High Entropy String" }, { "hashed_secret": "23b096d9b48ed5d9a778d3db5807c5c7a2357c93", - "is_secret": false, "is_verified": false, "line_number": 544, "type": "Base64 High Entropy String" }, { "hashed_secret": "127f92724797904fb4e6de2dfff2c71c07739612", - "is_secret": false, "is_verified": false, "line_number": 549, "type": "Base64 High Entropy String" }, { "hashed_secret": "f74b21c2fc87ad48118b3723372ecfe25aaae730", - "is_secret": false, "is_verified": false, "line_number": 559, "type": "Base64 High Entropy String" }, { "hashed_secret": "bc788b9febb8e95114c2e78a9d5297f80bbedb2c", - "is_secret": false, "is_verified": false, "line_number": 564, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", - "is_secret": false, "is_verified": false, "line_number": 575, "type": "Base64 High Entropy String" }, { "hashed_secret": "36a64bd1be32f031420a87c448636720426e0072", - "is_secret": false, "is_verified": false, "line_number": 580, "type": "Base64 High Entropy String" }, { "hashed_secret": "06a3dc8802aa9b4f2f48ad081cbe64482ce9f491", - "is_secret": false, "is_verified": false, "line_number": 585, "type": "Base64 High Entropy String" }, { "hashed_secret": "6c8453f18e4aa0280d847454c9a803c12e2d14d7", - "is_secret": false, "is_verified": false, "line_number": 590, "type": "Base64 High Entropy String" }, { "hashed_secret": "3df46004e168f8d8e3422adfbf0b7c237a41f437", - "is_secret": false, "is_verified": false, "line_number": 595, "type": "Base64 High Entropy String" }, { "hashed_secret": "5c270f653b2fcd5b7c700b53f8543df4147a4aba", - "is_secret": false, "is_verified": false, "line_number": 600, "type": "Base64 High Entropy String" }, { "hashed_secret": "98a159a135963e5e65a546879c332b2c3942aec3", - "is_secret": false, "is_verified": false, "line_number": 605, "type": "Base64 High Entropy String" }, { "hashed_secret": "58d846ede841bbec0d67a42d03426806635fee2f", - "is_secret": false, "is_verified": false, "line_number": 610, "type": "Base64 High Entropy String" }, { "hashed_secret": "23e42656fba130d56c20abddb94b6b7bfcad69a8", - "is_secret": false, "is_verified": false, "line_number": 618, "type": "Base64 High Entropy String" }, { "hashed_secret": "f883f0bd87d8455814f491e2067bd3f62454c7c2", - "is_secret": false, "is_verified": false, "line_number": 623, "type": "Base64 High Entropy String" }, { "hashed_secret": "8ece0f01da9189bae69a60da116040400bbc10e5", - "is_secret": false, "is_verified": false, "line_number": 628, "type": "Base64 High Entropy String" }, { "hashed_secret": "75a3c0b9934bd460ff7af9763edb25d749ab7b4e", - "is_secret": false, "is_verified": false, "line_number": 633, "type": "Base64 High Entropy String" }, { "hashed_secret": "baac57cb314beab87420d1da6906a1d2377c7d73", - "is_secret": false, "is_verified": false, "line_number": 638, "type": "Base64 High Entropy String" }, { "hashed_secret": "d0a953de593a0a7b26b925a6476d8382cd31cb0e", - "is_secret": false, "is_verified": false, "line_number": 654, "type": "Base64 High Entropy String" }, { "hashed_secret": "8b15238d25347ab18f4cbbe191de9aed597c8ea4", - "is_secret": false, "is_verified": false, "line_number": 659, "type": "Base64 High Entropy String" }, { "hashed_secret": "1e2ab7a2fd9b6afcbe08afcb9dc652b76cf367d8", - "is_secret": false, "is_verified": false, "line_number": 668, "type": "Base64 High Entropy String" }, { "hashed_secret": "ae745d719f97b3ddb9791348b1f29ff8208c0c5c", - "is_secret": false, "is_verified": false, "line_number": 676, "type": "Base64 High Entropy String" }, { "hashed_secret": "b72a53c8bebd6540eeffeba5b0c28965bbb2a664", - "is_secret": false, "is_verified": false, "line_number": 681, "type": "Base64 High Entropy String" }, { "hashed_secret": "97cbb7fbdfe498c80489e26bcdc78fce5db9b258", - "is_secret": false, "is_verified": false, "line_number": 686, "type": "Base64 High Entropy String" }, { "hashed_secret": "bc98c415b1c6ee93adf8e97a4a536b6342337c19", - "is_secret": false, "is_verified": false, "line_number": 691, "type": "Base64 High Entropy String" }, { "hashed_secret": "5a6baaacb03a030567b857cb8cfe440407e6385e", - "is_secret": false, "is_verified": false, "line_number": 696, "type": "Base64 High Entropy String" }, { "hashed_secret": "e55a8322e5c7485be2f721155d9ed15afc586a4c", - "is_secret": false, "is_verified": false, "line_number": 705, "type": "Base64 High Entropy String" }, { "hashed_secret": "47709a15a1b02a87f65dfcd5f3e78e0d2206c95f", - "is_secret": false, "is_verified": false, "line_number": 710, "type": "Base64 High Entropy String" }, { "hashed_secret": "5782d0f39536b22f2c6aa29d3b815a57f43e4800", - "is_secret": false, "is_verified": false, "line_number": 719, "type": "Base64 High Entropy String" }, { "hashed_secret": "401f90e6afa890c5ee44071351e4a149e7c1f5e0", - "is_secret": false, "is_verified": false, "line_number": 724, "type": "Base64 High Entropy String" }, { "hashed_secret": "51f38b23af543da8b637a3bd62f5fb2c460e3b3d", - "is_secret": false, "is_verified": false, "line_number": 729, "type": "Base64 High Entropy String" }, { "hashed_secret": "8287678ab8009ae16b02930c9e260d1f28578fbe", - "is_secret": false, "is_verified": false, "line_number": 734, "type": "Base64 High Entropy String" }, { "hashed_secret": "d4c050e6914eb68a5c657fb8bb09f6ac5eae1e86", - "is_secret": false, "is_verified": false, "line_number": 739, "type": "Base64 High Entropy String" }, { "hashed_secret": "922ac7db4914c20910496a41c474631928d6c2f2", - "is_secret": false, "is_verified": false, "line_number": 750, "type": "Base64 High Entropy String" }, { "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", - "is_secret": false, "is_verified": false, "line_number": 771, "type": "Base64 High Entropy String" }, { "hashed_secret": "d7966031d8525b080d7234049cbb040ac9a3f908", - "is_secret": false, "is_verified": false, "line_number": 798, "type": "Base64 High Entropy String" }, { "hashed_secret": "ff3d359d573f78d89424e03ec8688eee19305f9f", - "is_secret": false, "is_verified": false, "line_number": 808, "type": "Base64 High Entropy String" }, { "hashed_secret": "949b4ff40f26797f9290fe46eaa8691caef5c5ab", - "is_secret": false, "is_verified": false, "line_number": 817, "type": "Base64 High Entropy String" }, { "hashed_secret": "ce4ea19f66e9140bdb497b19c6ae94c32ee565f0", - "is_secret": false, "is_verified": false, "line_number": 825, "type": "Base64 High Entropy String" }, { "hashed_secret": "f6368525e9e22577efc8d8b737794e845958ba92", - "is_secret": false, "is_verified": false, "line_number": 834, "type": "Base64 High Entropy String" }, { "hashed_secret": "1508bbaf29927b5348d4df62823dab122a0d3b48", - "is_secret": false, "is_verified": false, "line_number": 839, "type": "Base64 High Entropy String" }, { "hashed_secret": "12917e7235ce486ca51a296b896afa5e3b4fda54", - "is_secret": false, "is_verified": false, "line_number": 844, "type": "Base64 High Entropy String" }, { "hashed_secret": "49e05eb75fd04d8f44cf235d4e8eddc30a2b93e5", - "is_secret": false, "is_verified": false, "line_number": 849, "type": "Base64 High Entropy String" }, { "hashed_secret": "aa8ea120ddc5aaa27cb02e0b04ac1c53b249a724", - "is_secret": false, "is_verified": false, "line_number": 869, "type": "Base64 High Entropy String" }, { "hashed_secret": "b3e00452fd69737cc747d0661fa3b3949a4a0805", - "is_secret": false, "is_verified": false, "line_number": 876, "type": "Base64 High Entropy String" }, { "hashed_secret": "af2ceb518ddc689b0e2a03ffebb64d4499817c17", - "is_secret": false, "is_verified": false, "line_number": 887, "type": "Base64 High Entropy String" }, { "hashed_secret": "7da94b235f996b5c65b66c3e70b5eeaf97bab5d4", - "is_secret": false, "is_verified": false, "line_number": 892, "type": "Base64 High Entropy String" }, { "hashed_secret": "f8363d7113ba35fd06b33afe20c8ad21a3202197", - "is_secret": false, "is_verified": false, "line_number": 900, "type": "Base64 High Entropy String" }, { "hashed_secret": "6902b24068ea12c3a3e31596614aa6fa0fba3c39", - "is_secret": false, "is_verified": false, "line_number": 908, "type": "Base64 High Entropy String" }, { "hashed_secret": "2c732c0a0dccfc1588888172188ce9a1abb7166e", - "is_secret": false, "is_verified": false, "line_number": 916, "type": "Base64 High Entropy String" }, { "hashed_secret": "c59aac9ab2704f627d29c762e716ba84b15be3f1", - "is_secret": false, "is_verified": false, "line_number": 921, "type": "Base64 High Entropy String" }, { "hashed_secret": "20249a3c96028e5ad19143d86ec5d2ee233935ed", - "is_secret": false, "is_verified": false, "line_number": 937, "type": "Base64 High Entropy String" }, { "hashed_secret": "2a57a9814486d6f83257ec94e65d1024819611b8", - "is_secret": false, "is_verified": false, "line_number": 942, "type": "Base64 High Entropy String" }, { "hashed_secret": "d5e822897b1f37e6ce1a864e2ba9af8f9bfc5539", - "is_secret": false, "is_verified": false, "line_number": 950, "type": "Base64 High Entropy String" }, { "hashed_secret": "dbee1beb29275ad50ef0a68067ca144985beca2c", - "is_secret": false, "is_verified": false, "line_number": 957, "type": "Base64 High Entropy String" }, { "hashed_secret": "b0cb4b5554183f2c7bc1ca25d902db5769798a7a", - "is_secret": false, "is_verified": false, "line_number": 962, "type": "Base64 High Entropy String" }, { "hashed_secret": "29f79b77802802c5ae2d3c2acb9179280de37914", - "is_secret": false, "is_verified": false, "line_number": 967, "type": "Base64 High Entropy String" }, { "hashed_secret": "18469023a89dd192b5275d8b955c9fd2202e0c03", - "is_secret": false, "is_verified": false, "line_number": 983, "type": "Base64 High Entropy String" }, { "hashed_secret": "0d3ce7468071b4e48ba9cd014ade7037dc57ef41", - "is_secret": false, "is_verified": false, "line_number": 991, "type": "Base64 High Entropy String" }, { "hashed_secret": "955d2d24c472b4eb0b4488f935a0f65e38001df8", - "is_secret": false, "is_verified": false, "line_number": 996, "type": "Base64 High Entropy String" }, { "hashed_secret": "42e05c82cd06a9ed1d15e0f472c2efc4b3254cae", - "is_secret": false, "is_verified": false, "line_number": 1010, "type": "Base64 High Entropy String" }, { "hashed_secret": "7a87fb248397359e9c6ca6e46f39805789059102", - "is_secret": false, "is_verified": false, "line_number": 1018, "type": "Base64 High Entropy String" }, { "hashed_secret": "7fbf450bf4ee54f013454f70af3a9743c0909f54", - "is_secret": false, "is_verified": false, "line_number": 1034, "type": "Base64 High Entropy String" }, { "hashed_secret": "df8e0babfad52a541f6e470cf3a143402c2c2a1e", - "is_secret": false, "is_verified": false, "line_number": 1039, "type": "Base64 High Entropy String" }, { "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", - "is_secret": false, "is_verified": false, "line_number": 1044, "type": "Base64 High Entropy String" }, { "hashed_secret": "9e897caf5658aea914e1034f46663cadb5a76348", - "is_secret": false, "is_verified": false, "line_number": 1054, "type": "Base64 High Entropy String" }, { "hashed_secret": "3aec99f39b829f94874ccd0a0d90315c6690cb94", - "is_secret": false, "is_verified": false, "line_number": 1064, "type": "Base64 High Entropy String" }, { "hashed_secret": "eca5fc6e4f5f895143d3fcedefc42dfe6e79f918", - "is_secret": false, "is_verified": false, "line_number": 1069, "type": "Base64 High Entropy String" }, { "hashed_secret": "307a947aa422c67fdefb07178198a004fb2c0d94", - "is_secret": false, "is_verified": false, "line_number": 1074, "type": "Base64 High Entropy String" }, { "hashed_secret": "0ba2fc9a137313ae1fdda2b5476dedf0595bda3a", - "is_secret": false, "is_verified": false, "line_number": 1083, "type": "Base64 High Entropy String" @@ -2108,7 +1839,6 @@ "tf_files/aws/cognito/README.md": [ { "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70", - "is_secret": false, "is_verified": false, "line_number": 106, "type": "Secret Keyword" @@ -2117,14 +1847,12 @@ "tf_files/aws/commons/README.md": [ { "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed", - "is_secret": false, "is_verified": false, "line_number": 60, "type": "Secret Keyword" }, { "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e", - "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -2133,7 +1861,6 @@ "tf_files/aws/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, "line_number": 135, "type": "Hex High Entropy String" @@ -2142,14 +1869,12 @@ "tf_files/aws/modules/common-logging/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, "is_verified": false, "line_number": 57, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, "is_verified": false, "line_number": 59, "type": "Hex High Entropy String" @@ -2158,28 +1883,24 @@ "tf_files/aws/modules/common-logging/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -2188,21 +1909,18 @@ "tf_files/aws/modules/common-logging/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" @@ -2211,7 +1929,6 @@ "tf_files/aws/modules/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, "line_number": 113, "type": "Hex High Entropy String" @@ -2220,14 +1937,12 @@ "tf_files/aws/modules/management-logs/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, "is_verified": false, "line_number": 54, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, "is_verified": false, "line_number": 56, "type": "Hex High Entropy String" @@ -2236,28 +1951,24 @@ "tf_files/aws/modules/management-logs/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -2266,42 +1977,36 @@ "tf_files/aws/modules/management-logs/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Base64 High Entropy String" }, { "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" }, { "hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" @@ -2310,7 +2015,6 @@ "tf_files/aws/rds/sample.tfvars": [ { "hashed_secret": "76c3c4836dee37d8d0642949f84092a9a24bbf46", - "is_secret": false, "is_verified": false, "line_number": 7, "type": "Secret Keyword" @@ -2319,7 +2023,6 @@ "tf_files/aws/slurm/README.md": [ { "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", - "is_secret": false, "is_verified": false, "line_number": 83, "type": "Secret Keyword" @@ -2328,7 +2031,6 @@ "tf_files/azure/cloud.tf": [ { "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4", - "is_secret": false, "is_verified": false, "line_number": 424, "type": "Secret Keyword" @@ -2337,7 +2039,6 @@ "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -2346,7 +2047,6 @@ "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -2355,7 +2055,6 @@ "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 262, "type": "Secret Keyword" @@ -2364,21 +2063,18 @@ "tf_files/gcp/commons/sample.tfvars": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 11, "type": "Secret Keyword" }, { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105", - "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" @@ -2387,7 +2083,6 @@ "tf_files/shared/modules/k8s_configs/creds.tpl": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 8, "type": "Secret Keyword" diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 2b5975854..0a464b46a 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -290,6 +290,18 @@ else gen3_log_info "not deploying frontend-framework - no manifest entry for '.versions[\"frontend-framework\"]'" fi +if g3k_manifest_lookup '.versions["dicom-server"]' 2> /dev/null; then + gen3 kube-setup-dicom-server & +else + gen3_log_info "not deploying dicom-server - no manifest entry for '.versions[\"dicom-server\"]'" +fi + +if g3k_manifest_lookup '.versions["dicom-viewer"]' 2> /dev/null; then + gen3 kube-setup-dicom-viewer & +else + gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'" +fi + gen3_log_info "enable network policy" gen3 kube-setup-networkpolicy "enable" || true & diff --git a/gen3/bin/kube-setup-dicom-server.sh b/gen3/bin/kube-setup-dicom-server.sh new file mode 100644 index 000000000..1de617b84 --- /dev/null +++ b/gen3/bin/kube-setup-dicom-server.sh @@ -0,0 +1,61 @@ +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +setup_database_and_config() { + gen3_log_info "setting up dicom-server DB and config" + + if g3kubectl describe secret dicom-server-g3auto > /dev/null 2>&1; then + gen3_log_info "dicom-server-g3auto secret already configured" + return 0 + fi + if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + gen3_log_err "skipping db setup in non-adminvm environment" + return 0 + fi + + # Setup config file that dicom-server consumes + local secretsFolder="$(gen3_secrets_folder)/g3auto/dicom-server" + if [[ ! -f "$secretsFolder/orthanc_config_overwrites.json" ]]; then + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + if ! gen3 db setup orthanc; then + gen3_log_err "Failed setting up orthanc database for dicom-server" + return 1 + fi + fi + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + gen3_log_err "dbcreds not present in Gen3Secrets/" + return 1 + fi + + # TODO: generate and mount a cert + # "SslEnabled": true, + # "SslCertificate": "" + cat - > "$secretsFolder/orthanc_config_overwrites.json" < Date: Mon, 11 Apr 2022 18:29:50 -0700 Subject: [PATCH 005/106] Add DD_AGENT_HOST env var (#1899) --- kube/services/jenkins-worker/jenkins-worker-deployment.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kube/services/jenkins-worker/jenkins-worker-deployment.yaml b/kube/services/jenkins-worker/jenkins-worker-deployment.yaml index fb3f89882..4e13eea69 100644 --- a/kube/services/jenkins-worker/jenkins-worker-deployment.yaml +++ b/kube/services/jenkins-worker/jenkins-worker-deployment.yaml @@ -101,6 +101,10 @@ spec: secretKeyRef: name: jenkins-g3auto key: google_app_creds.json + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP resources: limits: cpu: 0.6 From 4f51331c61021bbaad0f6ba5b65f7160aeb01a79 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 11 Apr 2022 21:25:38 -0500 Subject: [PATCH 006/106] Update values.yaml (#1898) --- kube/services/datadog/values.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 03cd70037..5ad681b5d 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -2,6 +2,14 @@ # https://github.com/DataDog/helm-charts/tree/main/charts/datadog datadog: + ## dogstatsd configuration + ## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/ + ## To emit custom metrics from your Kubernetes application, use DogStatsD. + dogstatsd: + port: 8125 + useHostPort: true + nonLocalTraffic: true + # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". apiKeyExistingSecret: "datadog-agent" From f7f1a948cda7c34b7f704ea58e5ff2e474040b9d Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 12 Apr 2022 13:55:10 -0500 Subject: [PATCH 007/106] Chore/argo connectivity (#1900) * chore(argo-connectivity): Added rules to allow argo to connect to wts and cohort-middleware * chore(argo-connectivity): Added rules to allow argo to connect to wts and cohort-middleware Co-authored-by: Edward Malinowski --- .../gen3/services/argo_netpolicy.yaml | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 kube/services/netpolicy/gen3/services/argo_netpolicy.yaml diff --git a/kube/services/netpolicy/gen3/services/argo_netpolicy.yaml b/kube/services/netpolicy/gen3/services/argo_netpolicy.yaml new file mode 100644 index 000000000..0f4440611 --- /dev/null +++ b/kube/services/netpolicy/gen3/services/argo_netpolicy.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: netpolicy-argo +spec: + podSelector: + matchExpressions: + - key: app + operator: In + values: + - revproxy + - cohort-middleware + - wts + - indexd + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + ports: + - port: 80 + - port: 4000 + - port: 8080 + - port: 81 + - port: 82 + - port: 443 + egress: + - to: + - namespaceSelector: + matchLabels: + app: argo + policyTypes: + - Ingress + - Egress From 41953557c24b6f3b45d82e88f59317b7b5319b1a Mon Sep 17 00:00:00 2001 From: rnerella92 <82972483+rnerella92@users.noreply.github.com> Date: Wed, 13 Apr 2022 13:29:47 -0500 Subject: [PATCH 008/106] updated indexConnectionsCount (#1901) Co-authored-by: Ramu Nerella --- gen3/bin/kube-setup-dicom-server.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-dicom-server.sh b/gen3/bin/kube-setup-dicom-server.sh index 1de617b84..d0f498ff1 100644 --- a/gen3/bin/kube-setup-dicom-server.sh +++ b/gen3/bin/kube-setup-dicom-server.sh @@ -40,7 +40,8 @@ setup_database_and_config() { "Host": "$(jq -r .db_host < $secretsFolder/dbcreds.json)", "Database": "$(jq -r .db_database < $secretsFolder/dbcreds.json)", "Username": "$(jq -r .db_username < $secretsFolder/dbcreds.json)", - "Password": "$(jq -r .db_password < $secretsFolder/dbcreds.json)" + "Password": "$(jq -r .db_password < $secretsFolder/dbcreds.json)", + "IndexConnectionsCount": 5 } } EOM From 168dc5b96676038b9d4c04e048f4794436c7d443 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 14 Apr 2022 09:27:26 -0500 Subject: [PATCH 009/106] feat: VA's networkpolicies (#1902) --- kube/services/cohort-middleware/cohort-middleware-deploy.yaml | 2 ++ kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 1 + 2 files changed, 3 insertions(+) diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index c7395ad1c..a21d97900 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -19,6 +19,8 @@ spec: metadata: labels: app: cohort-middleware + dbatlas: "yes" + dbomop-data: "yes" public: "yes" tags.datadoghq.com/service: "cohort-middleware" GEN3_ENV_LABEL diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 8fc342411..08f58efb4 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -19,6 +19,7 @@ spec: metadata: labels: app: ohdsi-webapi + dbatlas: "yes" public: "yes" GEN3_DATE_LABEL spec: From 22d9ec7daa07f8ffbb363ed356c3498b18598f85 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Thu, 14 Apr 2022 11:47:32 -0500 Subject: [PATCH 010/106] dicom-server client_max_body_size 0 (#1905) --- .../revproxy/gen3.nginx.conf/dicom-server-service.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kube/services/revproxy/gen3.nginx.conf/dicom-server-service.conf b/kube/services/revproxy/gen3.nginx.conf/dicom-server-service.conf index 13a79a280..914b1d167 100644 --- a/kube/services/revproxy/gen3.nginx.conf/dicom-server-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/dicom-server-service.conf @@ -7,4 +7,7 @@ location /dicom-server/ { set $upstream http://dicom-server-service.$namespace.svc.cluster.local; rewrite ^/dicom-server/(.*) /$1 break; proxy_pass $upstream; + + # no limit to payload size so we can upload large DICOM files + client_max_body_size 0; } From bb2bbc5349b883f1913e0743c0383a1708f1b861 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 14 Apr 2022 12:29:23 -0500 Subject: [PATCH 011/106] feat: allows apache.github.io in squid proxy (#1903) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index a77ebf61e..d5a843c31 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -4,6 +4,7 @@ 192.170.230.164 accounts.google.com achecker.ca +apache.github.io api.monqcle.com biodata-integration-tests.net biorender.com From 578ff59e8fffe1e8211635c84369e8c9b68d4aa3 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Mon, 18 Apr 2022 11:04:41 -0500 Subject: [PATCH 012/106] roll-all: move DICOM services to the right spot (#1907) --- gen3/bin/kube-roll-all.sh | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 0a464b46a..ab22ce07b 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -231,6 +231,18 @@ else gen3_log_info "not deploying audit-service - no manifest entry for .versions.audit-service" fi +if g3k_manifest_lookup '.versions["dicom-server"]' 2> /dev/null; then + gen3 kube-setup-dicom-server & +else + gen3_log_info "not deploying dicom-server - no manifest entry for '.versions[\"dicom-server\"]'" +fi + +if g3k_manifest_lookup '.versions["dicom-viewer"]' 2> /dev/null; then + gen3 kube-setup-dicom-viewer & +else + gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'" +fi + gen3 kube-setup-revproxy & if [[ "$GEN3_ROLL_FAST" != "true" ]]; then @@ -290,18 +302,6 @@ else gen3_log_info "not deploying frontend-framework - no manifest entry for '.versions[\"frontend-framework\"]'" fi -if g3k_manifest_lookup '.versions["dicom-server"]' 2> /dev/null; then - gen3 kube-setup-dicom-server & -else - gen3_log_info "not deploying dicom-server - no manifest entry for '.versions[\"dicom-server\"]'" -fi - -if g3k_manifest_lookup '.versions["dicom-viewer"]' 2> /dev/null; then - gen3 kube-setup-dicom-viewer & -else - gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'" -fi - gen3_log_info "enable network policy" gen3 kube-setup-networkpolicy "enable" || true & From fbab123292a107638c76b2f3c2ec0dac2e47f763 Mon Sep 17 00:00:00 2001 From: UchicagoZchen138 <86243966+UchicagoZchen138@users.noreply.github.com> Date: Tue, 19 Apr 2022 10:28:53 -0500 Subject: [PATCH 013/106] fix spacing issues in argo-wrapper deploy (#1904) --- kube/services/argo-wrapper/argo-wrapper-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index bb68daa64..f00bd2cc2 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -19,7 +19,7 @@ spec: labels: app: argo-wrapper tags.datadoghq.com/service: "argo-wrapper" - netnolimit: "yes" + netnolimit: "yes" public: "yes" GEN3_ARGO-WRAPPER_VERSION spec: From fbcc6e007bf93e93801d1024d0093ba4564e1851 Mon Sep 17 00:00:00 2001 From: UchicagoZchen138 <86243966+UchicagoZchen138@users.noreply.github.com> Date: Thu, 21 Apr 2022 09:44:58 -0500 Subject: [PATCH 014/106] fix add missing configs to argo archieve db (#1908) --- kube/services/argo/values.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index fe1f32b78..bf407d651 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -5,7 +5,11 @@ controller: enabled: false # -- enable persistence using postgres - persistence: + persistence: + archive: true + archiveLabelSelector: + matchLabels: + workflows.argoproj.io/archive-strategy: "true" postgresql: host: GEN3_ARGO_DB_HOST port: 5432 From 0e3441108e32950cec01e3d073bba4e52f838cd5 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 21 Apr 2022 10:33:50 -0700 Subject: [PATCH 015/106] install all tools on jenkins2 (#1909) * install all tools on jenkins2 * Update Dockerfile * add python install script --- Docker/Jenkins2/Dockerfile | 117 +++++++++++++++++++++++++-- Docker/Jenkins2/install-python3.8.sh | 7 ++ 2 files changed, 119 insertions(+), 5 deletions(-) create mode 100755 Docker/Jenkins2/install-python3.8.sh diff --git a/Docker/Jenkins2/Dockerfile b/Docker/Jenkins2/Dockerfile index e9a29b207..26f81c143 100644 --- a/Docker/Jenkins2/Dockerfile +++ b/Docker/Jenkins2/Dockerfile @@ -4,11 +4,115 @@ USER root ENV DEBIAN_FRONTEND=noninteractive -# install python and pip and aws cli -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential zip unzip jq less vim gettext-base -RUN set -xe && python -m pip install awscli --upgrade && python -m pip install pytest --upgrade && python -m pip install PyYAML --upgrade -RUN set -xe && python3 -m pip install pytest --upgrade && python3 -m pip install PyYAML --upgrade -RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade +# install python +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base + +RUN set -xe && apt-get update \ + && apt-get install -y lsb-release \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg2 \ + libffi-dev \ + libssl-dev \ + libcurl4-openssl-dev \ + libncurses5-dev \ + libncursesw5-dev \ + libreadline-dev \ + libsqlite3-dev \ + libgdbm-dev \ + libdb5.3-dev \ + libbz2-dev \ + libexpat1-dev \ + liblzma-dev \ + python-virtualenv \ + lua5.3 \ + r-base \ + software-properties-common \ + sudo \ + tk-dev \ + zlib1g-dev \ + zsh \ + && ln -s /usr/bin/lua5.3 /usr/local/bin/lua + +# install google tools +RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ + && echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" > /etc/apt/sources.list.d/google-cloud-sdk.list \ + && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \ + && apt-get update \ + && apt-get install -y google-cloud-sdk \ + google-cloud-sdk-cbt \ + kubectl + +# +# install docker tools: +# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 +# * https://docs.docker.com/compose/install/#install-compose +# +RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ + && add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/debian \ + $(lsb_release -cs) \ + stable" \ + && apt-get update \ + && apt-get install -y docker-ce \ + && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ + && chmod a+rx /usr/local/bin/docker-compose + +# install nodejs +RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - +RUN apt-get update && apt-get install -y nodejs + +# add psql: https://www.postgresql.org/download/linux/debian/ +RUN DISTRO="$(lsb_release -c -s)" \ + && echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && apt-get update \ + && apt-get install -y postgresql-client-13 libpq-dev \ + && rm -rf /var/lib/apt/lists/* + +# Copy sh script responsible for installing Python +COPY install-python3.8.sh /root/tmp/install-python3.8.sh + +# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python +RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ + ./root/tmp/install-python3.8.sh && \ + rm -rf /root/tmp/install-python3.8.sh && \ + unlink /usr/bin/python3 && \ + ln -s /Python-3.8.0/python /usr/bin/python3 + +# Fix shebang for lsb_release +RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ + sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository + +# install aws cli, poetry, pytest, etc. +RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade + +RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 - + +# install chrome (supports headless mode) +RUN set -xe \ + && curl -fsSL https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ + && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google-chrome.list \ + && apt-get update \ + && apt-get install -y google-chrome-stable + +# install terraform +RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \ + && unzip /tmp/terraform.zip -d /usr/local/bin && /bin/rm /tmp/terraform.zip + +RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip \ + && unzip /tmp/terraform.zip -d /tmp && mv /tmp/terraform /usr/local/bin/terraform12 && /bin/rm /tmp/terraform.zip + +# install packer +RUN curl -o /tmp/packer.zip https://releases.hashicorp.com/packer/1.5.1/packer_1.5.1_linux_amd64.zip +RUN unzip /tmp/packer.zip -d /usr/local/bin; /bin/rm /tmp/packer.zip + +# update /etc/sudoers +RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ + && /bin/echo -e "\n%sudo ALL=(ALL:ALL) NOPASSWD:ALL\n" >> /etc/sudoers.bak \ + && cp /etc/sudoers.bak /etc/sudoers \ + && usermod -G sudo jenkins # add our custom start script COPY jenkins2.sh /opt/cdis/bin/jenkins2.sh @@ -16,3 +120,6 @@ RUN chmod -R a+rx /opt/cdis ENTRYPOINT ["/sbin/tini", "--", "/opt/cdis/bin/jenkins2.sh"] USER jenkins + +RUN git config --global user.email jenkins \ + && git config --global user.name jenkins diff --git a/Docker/Jenkins2/install-python3.8.sh b/Docker/Jenkins2/install-python3.8.sh new file mode 100755 index 000000000..df21c66e5 --- /dev/null +++ b/Docker/Jenkins2/install-python3.8.sh @@ -0,0 +1,7 @@ +wget https://www.python.org/ftp/python/3.8.0/Python-3.8.0.tar.xz +tar xf Python-3.8.0.tar.xz +rm Python-3.8.0.tar.xz +cd Python-3.8.0 +./configure +make +make altinstall From a1cff25d7d0d04b32903078361a5cd17d68dabaf Mon Sep 17 00:00:00 2001 From: cmlsn <100160785+cmlsn@users.noreply.github.com> Date: Mon, 25 Apr 2022 07:33:33 -0700 Subject: [PATCH 016/106] fix: Docker/Jenkins-CI-Worker/Dockerfile to reduce vulnerabilities (#1913) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-DEBIAN10-OPENSSL-1569403 - https://snyk.io/vuln/SNYK-DEBIAN10-PERL-570792 - https://snyk.io/vuln/SNYK-DEBIAN10-PERL-570797 - https://snyk.io/vuln/SNYK-DEBIAN10-PERL-570802 - https://snyk.io/vuln/SNYK-DEBIAN10-SUBVERSION-1071814 Co-authored-by: snyk-bot --- Docker/Jenkins-CI-Worker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/Jenkins-CI-Worker/Dockerfile index c9e5fe763..3ed282c80 100644 --- a/Docker/Jenkins-CI-Worker/Dockerfile +++ b/Docker/Jenkins-CI-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.3-1 +FROM jenkins/jnlp-slave:4.9-1 USER root From 7c52fa144a85576fdaf81f1307075326ec35b637 Mon Sep 17 00:00:00 2001 From: John McCann Date: Mon, 25 Apr 2022 14:27:09 -0700 Subject: [PATCH 017/106] (PXP-9849): Fix mutate-guppy-config-for-pfb-export-test.sh so that index and type match (#1915) * fix(mutate-guppy-config-for-pfb-export-test): yq * fix(mutate-etl-mapping-config): use yq -yi ... * fix(mutate-guppy-config-for-pfb...): use gitops * fix(mutate-guppy-config...): make from etl-mapping * fix(mutate-etl-mapping): check for prior run * fix(mutate-etl-mapping): invert grep check * docs(gitops): add configmaps-from-json --- doc/gitops.md | 8 ++++ gen3/bin/gitops.sh | 3 ++ gen3/bin/mutate-etl-mapping-config.sh | 13 +++--- ...mutate-guppy-config-for-pfb-export-test.sh | 43 +++---------------- 4 files changed, 25 insertions(+), 42 deletions(-) diff --git a/doc/gitops.md b/doc/gitops.md index cc180485b..64c30597b 100644 --- a/doc/gitops.md +++ b/doc/gitops.md @@ -45,6 +45,14 @@ Note: if a key exists both under the `manifests/` folder and in `manifest.json`, Note: also as a random help updates the `etl-mapping` configmap from `etlMapping.yaml` +### configmaps-from-json + +Create a `manifest-` configmap from the given json blob. + +``` +gen3 gitops configmaps-from-json manifest-guppy "$guppyConfig" +``` + ### enforce Force the local `cdis-manifest/` and `cloud-automation/` folders to sync with github. diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh index 49aa8d1c9..a3b7824dc 100644 --- a/gen3/bin/gitops.sh +++ b/gen3/bin/gitops.sh @@ -1053,6 +1053,9 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then "configmaps") gen3_gitops_configmaps "$@" ;; + "configmaps-from-json") + gen3_gitops_json_to_configmap "$@" + ;; "configmaps-list") gen3_gitops_configmaps_list "$@" ;; diff --git a/gen3/bin/mutate-etl-mapping-config.sh b/gen3/bin/mutate-etl-mapping-config.sh index 929463b43..d3969ffee 100644 --- a/gen3/bin/mutate-etl-mapping-config.sh +++ b/gen3/bin/mutate-etl-mapping-config.sh @@ -11,8 +11,6 @@ set -xe # how it is executed? # gen3 mutate-etl-mapping-config {PR} {repoName} -echo "hello world" - prNumber=$1 shift repoName=$1 @@ -23,8 +21,13 @@ if ! shift; then fi g3kubectl get cm etl-mapping -o jsonpath='{.data.etlMapping\.yaml}' > etlMapping.yaml -sed -i 's/^[[:space:]][[:space:]]- name: \(.*\)_subject$/ - name: '"${prNumber}"'.'"${repoName}"'.\1_subject/' etlMapping.yaml -sed -i 's/^[[:space:]][[:space:]]- name: \(.*\)_etl$/ - name: '"${prNumber}"'.'"${repoName}"'.\1_etl/' etlMapping.yaml -sed -i 's/^[[:space:]][[:space:]]- name: \(.*\)_file$/ - name: '"${prNumber}"'.'"${repoName}"'.\1_file/' etlMapping.yaml + +prefix="${prNumber}.${repoName}." +if grep "$prefix" etlMapping.yaml; then + gen3_log_info "ETL Mapping has already been mutated by a previous run of this script" + exit 0 +fi + +yq -yi '.mappings[].name |= "'"${prefix}"'" + .' etlMapping.yaml g3kubectl delete configmap etl-mapping g3kubectl create configmap etl-mapping --from-file=etlMapping.yaml=etlMapping.yaml diff --git a/gen3/bin/mutate-guppy-config-for-pfb-export-test.sh b/gen3/bin/mutate-guppy-config-for-pfb-export-test.sh index 621e9ba24..a80e197f7 100644 --- a/gen3/bin/mutate-guppy-config-for-pfb-export-test.sh +++ b/gen3/bin/mutate-guppy-config-for-pfb-export-test.sh @@ -9,44 +9,13 @@ set -xe # the incoming PR's guppy configuration is mutated to Jenkins environment # how to run: -# gen3 mutate-guppy-config-for-pfb-export-test {PR} {repoName} +# gen3 mutate-guppy-config-for-pfb-export-test -prNumber=$1 -shift -repoName=$1 - -if ! shift; then - gen3_log_err "use: mutate-guppy-config prNumber repoName" - exit 1 -fi - -# capture the names from the ETL mapping -echo "debugging" -etl_mapping_subject=$(g3kubectl get cm etl-mapping -o jsonpath='{.data.etlMapping\.yaml}' | yq .mappings[0].name) -echo "### ## etl_mapping_subject: ${etl_mapping_subject}" -etl_mapping_file=$(g3kubectl get cm etl-mapping -o jsonpath='{.data.etlMapping\.yaml}' | yq .mappings[1].name) -echo "echo "### ## etl_mapping_file: ${etl_mapping_file}" -etl_config=$(echo $etl_mapping_subject | tr -d '"' | sed 's/\(.*\)_\(.*\)$/\1_array-config/') -echo "### ## etl_config: ${etl_config}" - -g3kubectl get configmap manifest-guppy -o yaml > original_guppy_config.yaml -# mutating permanent jenkins config -sed -i 's/\(.*\)"index": "\(.*\)_subject",$/\1"index": '"${etl_mapping_subject}"',/' original_guppy_config.yaml -sed -i 's/\(.*\)"index": "\(.*\)_etl",$/\1"index": '"${etl_mapping_subject}"',/' original_guppy_config.yaml -# exclusive for bloodpac-like envs -sed -i 's/\(.*\)"index": "\(.*\)_study",$/\1"index": '"${etl_mapping_subject}"',/' original_guppy_config.yaml -# the pre-defined Canine index works with subject ONLY (never case) -sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml -sed -i 's/\(.*\)"index": "\(.*\)_file",$/\1"index": '"${etl_mapping_file}"',/' original_guppy_config.yaml -# note: including double-quotes around etl_config here -sed -i 's/\(.*\)"config_index": "\(.*\)_array-config",$/\1"config_index": "'"${etl_config}"'",/' original_guppy_config.yaml - -# mutating after guppy test (pre-defined canine config) and some qa-* env guppy configs -sed -i 's/\(.*\)"index": "\(.*\)_subject_alias",$/\1"index": '"${etl_mapping_subject}"',/' original_guppy_config.yaml -sed -i 's/\(.*\)"index": "\(.*\)_file_alias",$/\1"index": '"${etl_mapping_file}"',/' original_guppy_config.yaml -# note: including double-quotes around etl_config here -sed -i 's/\(.*\)"config_index": "\(.*\)_configs_alias",$/\1"config_index": "'"${etl_config}"'",/' original_guppy_config.yaml +etlMapping="$(g3kubectl get cm etl-mapping -o jsonpath='{.data.etlMapping\.yaml}')" +guppyConfig="$(yq '{indices:[.mappings[]|{index:.name,type:.doc_type}],auth_filter_field:"auth_resource_path"}' <<< "$etlMapping")" +configIndex="$(jq -r '.indices[0].index' <<< "$guppyConfig" | rev | cut -d_ -f2- | rev)_array-config" +guppyConfig="$(jq --arg configIndex "$configIndex" '. += {config_index:$configIndex}' <<< "$guppyConfig")" g3kubectl delete configmap manifest-guppy -g3kubectl apply -f original_guppy_config.yaml +gen3 gitops configmaps-from-json manifest-guppy "$guppyConfig" gen3 roll guppy From d689d31660814a7f40b8415358508b1ff13c2a3f Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 27 Apr 2022 09:40:47 -0700 Subject: [PATCH 018/106] fix jenkins and jenkins2 deployments (#1917) --- gen3/bin/kube-setup-jenkins.sh | 2 + ...ent.yaml => jenkins-ci-worker-deploy.yaml} | 0 ...ci-pvc.yaml => jenkins-ci-worker-pvc.yaml} | 0 ...oyment.yaml => jenkins-worker-deploy.yaml} | 0 kube/services/jenkins2/jenkins2-deploy.yaml | 145 ++++++++++++++++++ 5 files changed, 147 insertions(+) rename kube/services/jenkins-ci-worker/{jenkins-worker-ci-deployment.yaml => jenkins-ci-worker-deploy.yaml} (100%) rename kube/services/jenkins-ci-worker/{jenkins-worker-ci-pvc.yaml => jenkins-ci-worker-pvc.yaml} (100%) rename kube/services/jenkins-worker/{jenkins-worker-deployment.yaml => jenkins-worker-deploy.yaml} (100%) create mode 100644 kube/services/jenkins2/jenkins2-deploy.yaml diff --git a/gen3/bin/kube-setup-jenkins.sh b/gen3/bin/kube-setup-jenkins.sh index 5dae872ee..91ea1747c 100644 --- a/gen3/bin/kube-setup-jenkins.sh +++ b/gen3/bin/kube-setup-jenkins.sh @@ -55,6 +55,8 @@ g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/clusterrolebinding-devops # Note: requires Jenkins entry in cdis-manifest gen3 roll jenkins +gen3 roll jenkins-worker +gen3 roll jenkins-ci-worker # # Get the ARN of the SSL certificate for the commons - diff --git a/kube/services/jenkins-ci-worker/jenkins-worker-ci-deployment.yaml b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml similarity index 100% rename from kube/services/jenkins-ci-worker/jenkins-worker-ci-deployment.yaml rename to kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml diff --git a/kube/services/jenkins-ci-worker/jenkins-worker-ci-pvc.yaml b/kube/services/jenkins-ci-worker/jenkins-ci-worker-pvc.yaml similarity index 100% rename from kube/services/jenkins-ci-worker/jenkins-worker-ci-pvc.yaml rename to kube/services/jenkins-ci-worker/jenkins-ci-worker-pvc.yaml diff --git a/kube/services/jenkins-worker/jenkins-worker-deployment.yaml b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml similarity index 100% rename from kube/services/jenkins-worker/jenkins-worker-deployment.yaml rename to kube/services/jenkins-worker/jenkins-worker-deploy.yaml diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml new file mode 100644 index 000000000..673686d17 --- /dev/null +++ b/kube/services/jenkins2/jenkins2-deploy.yaml @@ -0,0 +1,145 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jenkins-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: jenkins + revisionHistoryLimit: 2 + strategy: + # + # rolling update does not work, since + # persistent volume claim cannot be shared + # + type: Recreate + template: + metadata: + labels: + app: jenkins + # for network policy + netnolimit: "yes" + GEN3_DATE_LABEL + annotations: + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" + spec: + serviceAccountName: jenkins-service + securityContext: + runAsUser: 1000 + fsGroup: 1000 + containers: + # + # See for details on running docker in a pod: + # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b + # + - name: jenkins + GEN3_JENKINS_IMAGE + ports: + - containerPort: 8080 + name: http + protocol: TCP + - containerPort: 50000 + name: slavelistener + protocol: TCP + env: + - name: HOSTNAME + valueFrom: + configMapKeyRef: + name: global + key: hostname + - name: AWS_DEFAULT_REGION + value: us-east-1 + - name: JAVA_OPTS + value: "-Xmx3072m" + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_secret_access_key + - name: JENKINS_S3_PATH + # Just hard code for now + # Bucket in cdistest account used for backups + value: "s3://cdis-terraform-state/Jenkins3Backup" + - name: GOOGLE_EMAIL_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: email + - name: GOOGLE_PASSWORD_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: password + - name: GOOGLE_EMAIL_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: email + - name: GOOGLE_PASSWORD_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: password + - name: GOOGLE_APP_CREDS_JSON + valueFrom: + secretKeyRef: + name: jenkins-g3auto + key: google_app_creds.json + readinessProbe: + httpGet: + path: /login + port: 8080 + resources: + limits: + cpu: 0.9 + memory: 8192Mi + imagePullPolicy: Always + volumeMounts: + - name: datadir + mountPath: /var/jenkins_home + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.crt" + subPath: "service.crt" + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.key" + subPath: "service.key" + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + - name: dockersock + mountPath: "/var/run/docker.sock" + - name: selenium + image: selenium/standalone-chrome:3.14 + ports: + - containerPort: 4444 + readinessProbe: + httpGet: + path: /wd/hub/sessions + port: 4444 + readinessProbe: + httpGet: + path: /wd/hub/sessions + port: 4444 + imagePullPolicy: Always + volumes: + - name: datadir + persistentVolumeClaim: + claimName: k8sjenkins + - name: cert-volume + secret: + secretName: "cert-jenkins-service" + - name: ca-volume + secret: + secretName: "service-ca" + - name: dockersock + hostPath: + path: /var/run/docker.sock \ No newline at end of file From fbcffc4892d6b2a22d660e4c9c8be8b9a6c26f5f Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 2 May 2022 17:11:40 -0500 Subject: [PATCH 019/106] Update kube-setup-system-services.sh (#1922) --- gen3/bin/kube-setup-system-services.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh index f0f6cc21d..4321c6cf3 100644 --- a/gen3/bin/kube-setup-system-services.sh +++ b/gen3/bin/kube-setup-system-services.sh @@ -18,8 +18,8 @@ gen3_load "gen3/gen3setup" kubeproxy=${kubeproxy:-1.16.13} coredns=${coredns:-1.6.6} -cni=${cni:-1.7.5} -calico=${calico:-1.7.5} +cni=${cni:-1.11.0} +calico=${calico:-1.7.8} while [ $# -gt 0 ]; do @@ -32,8 +32,8 @@ done kube_proxy_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v${kubeproxy}-eksbuild.1" coredns_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/coredns:v${coredns}" -cni_image="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${cni}/config/v1.7/aws-k8s-cni.yaml" -calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico}/config/v$(echo ${calico} | sed -e 's/\.[0-9]\+$//')/calico.yaml" +cni_image="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${cni}/config/master/aws-k8s-cni.yaml" +calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico}/config/master/calico.yaml" g3kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=${kube_proxy_image} g3kubectl set image --namespace kube-system deployment.apps/coredns coredns=${coredns_image} From d494ad2478a9c897251407ccc135eaf509978e28 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 4 May 2022 13:24:37 -0500 Subject: [PATCH 020/106] feat(whitelist-kubecost): Whitelisted kubecost repo (#1924) Co-authored-by: Edward Malinowski --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index d5a843c31..88c9aaa4f 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -78,6 +78,7 @@ ifconfig.io internet2.edu k8s.gcr.io ks.osdc.io +kubecost.github.io kubernetes.github.io lib.stat.cmu.edu login.mathworks.com From 890867834f33a87cb58772f7203d9c8b95165b19 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Mon, 9 May 2022 17:07:50 -0500 Subject: [PATCH 021/106] Fix dicom-server orthanc secret (#1925) --- gen3/bin/kube-setup-audit-service.sh | 2 +- gen3/bin/kube-setup-dicom-server.sh | 10 +++++----- gen3/bin/kube-setup-requestor.sh | 2 +- kube/services/dicom-server/dicom-server-deploy.yaml | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh index aa3fa5f9e..2eebe0f97 100644 --- a/gen3/bin/kube-setup-audit-service.sh +++ b/gen3/bin/kube-setup-audit-service.sh @@ -20,8 +20,8 @@ setup_database_and_config() { fi # Setup config file that audit-service consumes + local secretsFolder="$(gen3_secrets_folder)/g3auto/audit" if [[ ! -f "$secretsFolder/audit-service-config.yaml" || ! -f "$secretsFolder/base64Authz.txt" ]]; then - local secretsFolder="$(gen3_secrets_folder)/g3auto/audit" if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then if ! gen3 db setup audit; then gen3_log_err "Failed setting up database for audit-service" diff --git a/gen3/bin/kube-setup-dicom-server.sh b/gen3/bin/kube-setup-dicom-server.sh index d0f498ff1..fae273e2e 100644 --- a/gen3/bin/kube-setup-dicom-server.sh +++ b/gen3/bin/kube-setup-dicom-server.sh @@ -4,8 +4,8 @@ gen3_load "gen3/gen3setup" setup_database_and_config() { gen3_log_info "setting up dicom-server DB and config" - if g3kubectl describe secret dicom-server-g3auto > /dev/null 2>&1; then - gen3_log_info "dicom-server-g3auto secret already configured" + if g3kubectl describe secret orthanc-g3auto > /dev/null 2>&1; then + gen3_log_info "orthanc-g3auto secret already configured" return 0 fi if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then @@ -13,8 +13,8 @@ setup_database_and_config() { return 0 fi - # Setup config file that dicom-server consumes - local secretsFolder="$(gen3_secrets_folder)/g3auto/dicom-server" + # Setup config files that dicom-server consumes + local secretsFolder="$(gen3_secrets_folder)/g3auto/orthanc" if [[ ! -f "$secretsFolder/orthanc_config_overwrites.json" ]]; then if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then if ! gen3 db setup orthanc; then @@ -46,7 +46,7 @@ setup_database_and_config() { } EOM fi - gen3 secrets sync 'setup dicom-server-g3auto secrets' + gen3 secrets sync 'setup orthanc-g3auto secrets' } if ! setup_database_and_config; then diff --git a/gen3/bin/kube-setup-requestor.sh b/gen3/bin/kube-setup-requestor.sh index 8cd38df7d..7bcc1e644 100644 --- a/gen3/bin/kube-setup-requestor.sh +++ b/gen3/bin/kube-setup-requestor.sh @@ -19,8 +19,8 @@ setup_database() { return 0 fi # Setup config file that requestor consumes + local secretsFolder="$(gen3_secrets_folder)/g3auto/requestor" if [[ ! -f "$secretsFolder/requestor-config.yaml" || ! -f "$secretsFolder/base64Authz.txt" ]]; then - local secretsFolder="$(gen3_secrets_folder)/g3auto/requestor" if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then if ! gen3 db setup requestor; then gen3_log_err "Failed setting up database for requestor service" diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index 7925c2974..96a9ac96d 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -20,7 +20,7 @@ spec: volumes: - name: config-volume-g3auto secret: - secretName: dicom-server-g3auto + secretName: orthanc-g3auto containers: - name: dicom-server GEN3_DICOM-SERVER_IMAGE From 7271f704cc23eb87e8c90e877f9a962b68d4425a Mon Sep 17 00:00:00 2001 From: John McCann Date: Thu, 12 May 2022 09:05:27 -0500 Subject: [PATCH 022/106] Fix kube-setup-fence.sh to create GA4GH jobs if they do not already exist (#1892) * fix(kube-setup-fence): invert cronjob check * chore(fence cronjobs): use batch/v1 apiVersion * Revert "chore(fence cronjobs): use batch/v1 apiVersion" This reverts commit f9ea96b0e9c508d3aca87bb1aa1e93a3afed3e6d. --- gen3/bin/kube-setup-fence.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index 1506182f7..64c4e14fb 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -80,13 +80,13 @@ gen3 kube-setup-google # TODO: WILL UNCOMMENT THIS ONCE FEATURE IN FENCE IS RELEASED # if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.04"; then # # Setup db cleanup cronjob -# if g3kubectl get cronjob fence-cleanup-expired-ga4gh-info >/dev/null 2>&1; then +# if ! g3kubectl get cronjob fence-cleanup-expired-ga4gh-info >/dev/null 2>&1; then # echo "fence-cleanup-expired-ga4gh-info being added as a cronjob b/c fence >= 6.0.0 or 2022.04" # gen3 job cron fence-cleanup-expired-ga4gh-info "*/5 * * * *" # fi # # # Setup visa update cronjob -# if g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then +# if ! g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then # echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.04" # gen3 job cron fence-visa-update "30 * * * *" # fi From 21ecd0b08f6f0a37400901703baca1612ba6e989 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Fri, 13 May 2022 14:00:24 -0500 Subject: [PATCH 023/106] Add data.cityofchicago.org to squid whitelist (#1931) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 88c9aaa4f..72c0ae7c8 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -10,6 +10,7 @@ biodata-integration-tests.net biorender.com clinicaltrials.gov ctds-planx.atlassian.net +data.cityofchicago.org dataguids.org api.login.yahoo.com api.snapcraft.io From 7efabaf6ced01b0767bb7dec5758ffd06885b4c9 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Mon, 16 May 2022 09:00:44 -0700 Subject: [PATCH 024/106] install xk6-browser and add jenkins2 deployment (#1932) * jenkins2 worker deploy and xk6-browser * add xk6-browser --- .secrets.baseline | 4 +- Docker/Jenkins-Worker/Dockerfile | 12 +- .../jenkins2-agent-service.yaml | 17 +++ .../jenkins2-worker-deploy.yaml | 138 ++++++++++++++++++ 4 files changed, 166 insertions(+), 5 deletions(-) create mode 100644 kube/services/jenkins2-worker/jenkins2-agent-service.yaml create mode 100644 kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml diff --git a/.secrets.baseline b/.secrets.baseline index 5bb288384..54230ef67 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-04-07T20:39:12Z", + "generated_at": "2022-05-15T00:10:09Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -84,7 +84,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 132, + "line_number": 138, "type": "Secret Keyword" } ], diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/Jenkins-Worker/Dockerfile index 4b6939707..5fd7db839 100644 --- a/Docker/Jenkins-Worker/Dockerfile +++ b/Docker/Jenkins-Worker/Dockerfile @@ -8,7 +8,8 @@ ENV DEBIAN_FRONTEND=noninteractive RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base RUN set -xe && python -m pip install awscli --upgrade && python -m pip install pytest --upgrade && python -m pip install PyYAML --upgrade && python -m pip install lxml --upgrade RUN set -xe && python3 -m pip install pytest --upgrade && python3 -m pip install PyYAML --upgrade -RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade && python3 -m pip install pandas --upgrade +RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade +RUN set -xe && python3 -m pip install pandas --upgrade RUN apt-get update \ && apt-get install -y lsb-release \ @@ -50,6 +51,11 @@ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747 && apt-get update \ && apt-get install k6 +# install xk6-browser +RUN cd /opt && wget --quiet https://github.com/grafana/xk6-browser/releases/download/v0.3.0/xk6-browser-v0.3.0-linux-amd64.tar.gz \ + && tar -xvzf /opt/xk6-browser-v0.3.0-linux-amd64.tar.gz +ENV PATH="/opt/xk6-browser-v0.3.0-linux-amd64:${PATH}" + # install google tools RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ && echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" > /etc/apt/sources.list.d/google-cloud-sdk.list \ @@ -99,9 +105,9 @@ RUN unzip /tmp/packer.zip -d /usr/local/bin; /bin/rm /tmp/packer.zip # add psql: https://www.postgresql.org/download/linux/debian/ RUN DISTRO="$(lsb_release -c -s)" \ && echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ - && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && wget --quiet --no-check-certificate -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && apt-get update \ - && apt-get install -y postgresql-client-13 \ + && apt-get install -y postgresql-client-13 libpq-dev \ && rm -rf /var/lib/apt/lists/* # Copy sh script responsible for installing Python diff --git a/kube/services/jenkins2-worker/jenkins2-agent-service.yaml b/kube/services/jenkins2-worker/jenkins2-agent-service.yaml new file mode 100644 index 000000000..7f4e58109 --- /dev/null +++ b/kube/services/jenkins2-worker/jenkins2-agent-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + name: jenkins-agent-service + name: jenkins-agent + namespace: default +spec: + ports: + - name: slavelistener + port: 50000 + protocol: TCP + targetPort: 50000 + selector: + app: jenkins + sessionAffinity: None + type: ClusterIP diff --git a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml new file mode 100644 index 000000000..ad29eb47e --- /dev/null +++ b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml @@ -0,0 +1,138 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jenkins-worker-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: jenkins-worker + template: + metadata: + labels: + app: jenkins-worker + # for network policy + netnolimit: "yes" + annotations: + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" + spec: + serviceAccountName: jenkins-service + securityContext: + runAsUser: 1000 + fsGroup: 1000 + initContainers: + - args: + - -c + - | + # fix permissions for /var/run/docker.sock + chmod 666 /var/run/docker.sock + echo "done" + command: + - /bin/bash + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + name: awshelper + resources: {} + securityContext: + allowPrivilegeEscalation: false + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/docker.sock + name: dockersock + containers: + # + # See for details on running docker in a pod: + # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b + # + - name: jenkins-worker + image: "quay.io/cdis/gen3-qa-worker:master" + ports: + - containerPort: 8080 + env: + - name: JENKINS_URL + value: "https://jenkins2.planx-pla.net" + - name: JENKINS_SECRET + valueFrom: + secretKeyRef: + name: jenkins-worker-g3auto + key: jenkins-jnlp-agent-secret + - name: JENKINS_AGENT_NAME + value: "gen3-qa-worker" + - name: JENKINS_TUNNEL + value: "jenkins-agent:50000" + - name: AWS_DEFAULT_REGION + value: us-east-1 + - name: JAVA_OPTS + value: "-Xmx3072m" + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_secret_access_key + - name: GOOGLE_EMAIL_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: email + - name: GOOGLE_PASSWORD_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: password + - name: GOOGLE_EMAIL_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: email + - name: GOOGLE_PASSWORD_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: password + - name: GOOGLE_APP_CREDS_JSON + valueFrom: + secretKeyRef: + name: jenkins-g3auto + key: google_app_creds.json + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + resources: + limits: + cpu: 0.6 + memory: 2048Mi + imagePullPolicy: Always + volumeMounts: + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.crt" + subPath: "service.crt" + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.key" + subPath: "service.key" + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + - name: dockersock + mountPath: "/var/run/docker.sock" + imagePullPolicy: Always + volumes: + - name: cert-volume + secret: + secretName: "cert-jenkins-service" + - name: ca-volume + secret: + secretName: "service-ca" + - name: dockersock + hostPath: + path: /var/run/docker.sock From edd1f0a79c4610b6b485725146b57063ae40b902 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Mon, 16 May 2022 12:02:07 -0700 Subject: [PATCH 025/106] fix: gen3 reset - making sure all pods are turned off (#1929) Co-authored-by: Hara Prasad --- gen3/bin/reset.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh index 085440eec..2cc8d6b09 100644 --- a/gen3/bin/reset.sh +++ b/gen3/bin/reset.sh @@ -20,7 +20,7 @@ wait_for_pods_down() { podsDownFlag=1 while [[ podsDownFlag -ne 0 ]]; do g3kubectl get pods - if [[ 0 == "$(g3kubectl get pods -o json | jq -r '[.items[] | { name: .metadata.labels.app } ] | map(select(.name=="fence" or .name=="sheepdog" or .name=="peregrine" or .name=="indexd")) | length')" ]]; then + if [[ 0 == "$(g3kubectl get pods -o json | jq -r '[.items[] | { name: .metadata.labels.app } ] | length')" ]]; then gen3_log_info "pods are down, ready to drop databases" podsDownFlag=0 else From 91f009502d0bf736a3536f53fde08504a808e75f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 16:16:02 -0500 Subject: [PATCH 026/106] chore(deps): bump async from 2.6.3 to 2.6.4 (#1919) Bumps [async](https://github.com/caolan/async) from 2.6.3 to 2.6.4. - [Release notes](https://github.com/caolan/async/releases) - [Changelog](https://github.com/caolan/async/blob/v2.6.4/CHANGELOG.md) - [Commits](https://github.com/caolan/async/compare/v2.6.3...v2.6.4) --- updated-dependencies: - dependency-name: async dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index ad47acc00..f0521a3d6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -52,9 +52,9 @@ "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" }, "async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", "requires": { "lodash": "^4.17.14" } From 7353f2fd1e1b40fd5f944fafc766ffd6be6effc4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 16:17:16 -0500 Subject: [PATCH 027/106] chore(deps): bump ajv from 6.12.0 to 6.12.6 (#1838) Bumps [ajv](https://github.com/ajv-validator/ajv) from 6.12.0 to 6.12.6. - [Release notes](https://github.com/ajv-validator/ajv/releases) - [Commits](https://github.com/ajv-validator/ajv/compare/v6.12.0...v6.12.6) --- updated-dependencies: - dependency-name: ajv dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index f0521a3d6..5d9d6116e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -23,9 +23,9 @@ } }, "ajv": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.0.tgz", - "integrity": "sha512-D6gFiFA0RRLyUbvijN74DWAjXSFxWKaWP7mldxkVhyhAV3+SWA9HEJPHQ2c9soIeTFJqcSdFDGFgdqs1iUU2Hw==", + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", From 00414b0ff8331db398afb4117cc4b38527adf153 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 16 May 2022 16:19:28 -0500 Subject: [PATCH 028/106] Add ALB controller and ingress support (#1914) * Add kube-setup-ingress script --- doc/kube-setup-ingress.md | 35 +++ gen3/bin/awsrole.sh | 12 +- gen3/bin/kube-setup-ingress.sh | 282 +++++++++++++++++++ kube/services/ingress/ingress.yaml | 25 ++ kube/services/revproxy/revproxy-service.yaml | 2 +- 5 files changed, 352 insertions(+), 4 deletions(-) create mode 100644 doc/kube-setup-ingress.md create mode 100644 gen3/bin/kube-setup-ingress.sh create mode 100644 kube/services/ingress/ingress.yaml diff --git a/doc/kube-setup-ingress.md b/doc/kube-setup-ingress.md new file mode 100644 index 000000000..15b2bd39e --- /dev/null +++ b/doc/kube-setup-ingress.md @@ -0,0 +1,35 @@ +# TL;DR + +Setup the aws-load-balancer-controller and an ALB. + +This is a replacement for the revproxy-service-elb + +## Overview + +The script deploys the `aws-load-balancer-controller` when run in the `default` namespace. + +## Use + +### deploy + +Deploy the aws-load-balancer-controller from https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html. +Only works in the `default` namespace. + +If ran from a non-default namespace it will only deploy the k8s ingress resource. + +``` +gen3 kube-setup-ingress +``` + +### check + +Check if the ingress has been deployed by running + +``` +helm status aws-load-balancer-controller -n kube-system +``` + +Update your DNS records to the ADDRESS field from the output of +``` +kubectl get ingress revproxy-ingress +``` diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh index b0b4f0cac..068003f95 100644 --- a/gen3/bin/awsrole.sh +++ b/gen3/bin/awsrole.sh @@ -24,6 +24,9 @@ gen3_awsrole_help() { function gen3_awsrole_ar_policy() { local serviceAccount="$1" shift || return 1 + local namespace="$1" + shift + [ -z "$namespace" ] && namespace=$(gen3 db namespace) local issuer_url local account_id local vpc_name @@ -56,7 +59,7 @@ function gen3_awsrole_ar_policy() { "Condition": { "StringEquals": { "${issuer_url}:aud": "sts.amazonaws.com", - "${issuer_url}:sub": "system:serviceaccount:$(gen3 db namespace):${serviceAccount}" + "${issuer_url}:sub": "system:serviceaccount:${namespace}:${serviceAccount}" } } } @@ -76,15 +79,18 @@ gen3_awsrole_sa_annotate() { shift || return 1 local roleName="$1" shift || return 1 + local namespace="$1" + shift + # If namespace is supplied set KUBECTL_NAMESPACE + [[ ! -z "$namespace" ]] && KUBECTL_NAMESPACE=$namespace + local roleArn local roleInfo roleInfo="$(aws iam get-role --role-name "$roleName")" || return 1 roleArn="$(jq -e -r .Role.Arn <<< "$roleInfo")" - if ! g3kubectl get sa "$saName" > /dev/null; then g3kubectl create sa "$saName" || return 1 fi - g3kubectl annotate --overwrite sa "$saName" "eks.amazonaws.com/role-arn=$roleArn" } diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh new file mode 100644 index 000000000..5c45b30ab --- /dev/null +++ b/gen3/bin/kube-setup-ingress.sh @@ -0,0 +1,282 @@ +#!/bin/bash +# + + + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" +gen3_load "gen3/lib/kube-setup-init" + + +ctx="$(g3kubectl config current-context)" +ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" + +scriptDir="${GEN3_HOME}/kube/services/ingress" + +# https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/ +# https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.1/docs/install/iam_policy.json +# only do this if we are running in the default namespace +if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then + saName="aws-load-balancer-controller" + roleName=$(gen3 api safe-name ingress) + policyName=$(gen3 api safe-name ingress-policy) + ingressPolicy="$(mktemp "$XDG_RUNTIME_DIR/ingressPolicy.json_XXXXXX")" + arPolicyFile="$(mktemp "$XDG_RUNTIME_DIR/arPolicy.json_XXXXXX")" + + + # Create an inline policy for the ingress-controller + cat - > "$ingressPolicy" < /dev/null; then # setup role + gen3_log_info "creating IAM role for ingress: $roleName, linking to sa $saName" + # This doesn't work creating SA + role in kube-system namespace :( + # gen3 awsrole create "$roleName" "$saName" || return 1 + + gen3 awsrole ar-policy $saName kube-system > $arPolicyFile + role=$(aws iam create-role --role-name $roleName --assume-role-policy-document file://"${arPolicyFile}" 1>&2) + aws iam put-role-policy --role-name "$roleName" --policy-document file://${ingressPolicy} --policy-name "$policyName" 1>&2 + gen3 awsrole sa-annotate $saName $roleName kube-system + else + # update the annotation - just to be thorough + gen3 awsrole sa-annotate "$saName" "$roleName" kube-system + fi + + kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" + + if (! helm status aws-load-balancer-controller -n kube-system > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then + helm repo add eks https://aws.github.io/eks-charts 2> >(grep -v 'This is insecure' >&2) + helm repo update 2> >(grep -v 'This is insecure' >&2) + + # # TODO: Move to values.yaml file + helm upgrade --install aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=$(gen3 api environment) --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller 2> >(grep -v 'This is insecure' >&2) + else + gen3_log_info "kube-setup-ingress exiting - ingress already deployed, use --force to redeploy" + fi +fi + + +gen3_log_info "Applying ingress resource" +export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}') +g3kubectl apply -f "${GEN3_HOME}/kube/services/revproxy/revproxy-service.yaml" +envsubst <$scriptDir/ingress.yaml | g3kubectl apply -f - + + diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml new file mode 100644 index 000000000..6c9de7f56 --- /dev/null +++ b/kube/services/ingress/ingress.yaml @@ -0,0 +1,25 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: gen3-ingress + annotations: + # TODO: Make this configurable + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/tags: Environment=$vpc_name + alb.ingress.kubernetes.io/certificate-arn: $ARN + alb.ingress.kubernetes.io/group.name: "$vpc_name" + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' +spec: + ingressClassName: alb + rules: + - host: $GEN3_CACHE_HOSTNAME + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: revproxy-service + port: + number: 80 \ No newline at end of file diff --git a/kube/services/revproxy/revproxy-service.yaml b/kube/services/revproxy/revproxy-service.yaml index 3605d2a3a..9648a3fe0 100644 --- a/kube/services/revproxy/revproxy-service.yaml +++ b/kube/services/revproxy/revproxy-service.yaml @@ -10,4 +10,4 @@ spec: port: 80 targetPort: 80 name: http - type: ClusterIP + type: NodePort From 87e8f689d0fce355d7402c364ac173f21603620f Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Tue, 17 May 2022 09:55:02 -0700 Subject: [PATCH 029/106] add jenkins-perf to CI pool (#1934) --- files/scripts/ci-env-pool-reset.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index 3f1d951d2..aefdb446b 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -33,6 +33,7 @@ jenkins-blood jenkins-brain jenkins-dcp jenkins-new +jenkins-perf EOF cat - > jenkins-envs-releases.txt < Date: Mon, 23 May 2022 09:49:11 -0500 Subject: [PATCH 030/106] DICOM server: add python plugin (#1930) --- gen3/bin/kube-setup-dicom-server.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/gen3/bin/kube-setup-dicom-server.sh b/gen3/bin/kube-setup-dicom-server.sh index fae273e2e..59bcb8f39 100644 --- a/gen3/bin/kube-setup-dicom-server.sh +++ b/gen3/bin/kube-setup-dicom-server.sh @@ -32,7 +32,7 @@ setup_database_and_config() { # "SslCertificate": "" cat - > "$secretsFolder/orthanc_config_overwrites.json" < Date: Mon, 23 May 2022 18:23:41 +0200 Subject: [PATCH 031/106] Drop connections to DB before dropping it in reset step (#1937) * Drop connections to DB before dropping it in reset step --- gen3/bin/db.sh | 8 ++++++++ gen3/bin/reset.sh | 6 ++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/gen3/bin/db.sh b/gen3/bin/db.sh index 5e1c2c30a..63995fc0e 100644 --- a/gen3/bin/db.sh +++ b/gen3/bin/db.sh @@ -33,6 +33,7 @@ gen3_db_farm_json() { # gen3_db_reset() { local serviceName + local force if [[ $# -lt 1 || -z "$1" ]]; then gen3_log_err "gen3_db_reset" "must specify serviceName" return 1 @@ -43,6 +44,8 @@ gen3_db_reset() { gen3_log_err "gen3_db_reset" "may not reset peregrine - only sheepdog" return 1 fi + shift + force=$1 # connect as the admin user for the db server associated with the service local credsTemp="$(mktemp "$XDG_RUNTIME_DIR/credsTemp.json_XXXXXX")" @@ -81,6 +84,11 @@ gen3_db_reset() { fi local result + if [[ $force == "--force" ]]; then + gen3_log_warn "--force flag applied - Dropping all connections to the db before dropping" + echo "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname='${dbname}' AND pid <> pg_backend_pid();" | gen3 psql "$serverName" + result=$? + fi echo "DROP DATABASE \"${dbname}\"; CREATE DATABASE \"${dbname}\"; GRANT ALL ON DATABASE \"$dbname\" TO \"$username\" WITH GRANT OPTION;" | gen3 psql "$serverName" result=$? if [[ "$serviceName" == "sheepdog" ]]; then diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh index 2cc8d6b09..6dac0ea16 100644 --- a/gen3/bin/reset.sh +++ b/gen3/bin/reset.sh @@ -130,13 +130,15 @@ gen3 shutdown namespace # also clean out network policies g3kubectl delete networkpolicies --all wait_for_pods_down - +# Give it 30 seconds to ensure connections gets drained +sleep 30 # # Reset our databases # for serviceName in $(gen3 db services); do if [[ "$serviceName" != "peregrine" ]]; then # sheepdog and peregrine share the same db - gen3 db reset "$serviceName" + # --force will also drop connections to the database to ensure database gets dropped + gen3 db reset "$serviceName" --force fi done From 1322fd9c2e82532f57d05c8049d332bb369c1cce Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Mon, 23 May 2022 15:09:56 -0500 Subject: [PATCH 032/106] dicom-server readinessProbe and livenessProbe (#1939) --- .../services/dicom-server/dicom-server-deploy.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index 96a9ac96d..59f1b5763 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -25,6 +25,20 @@ spec: - name: dicom-server GEN3_DICOM-SERVER_IMAGE imagePullPolicy: Always + readinessProbe: + httpGet: + path: / + port: 8042 + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 30 + livenessProbe: + httpGet: + path: / + port: 8042 + initialDelaySeconds: 5 + periodSeconds: 60 + timeoutSeconds: 30 ports: - containerPort: 8042 volumeMounts: From 8b4bc258914802f1d2d6a92c4741962a4bab4ba4 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Mon, 23 May 2022 16:22:22 -0500 Subject: [PATCH 033/106] Fix DICOM server probes (#1940) --- kube/services/dicom-server/dicom-server-deploy.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index 59f1b5763..a2c1a2c03 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -27,14 +27,14 @@ spec: imagePullPolicy: Always readinessProbe: httpGet: - path: / + path: /system port: 8042 initialDelaySeconds: 5 periodSeconds: 20 timeoutSeconds: 30 livenessProbe: httpGet: - path: / + path: /system port: 8042 initialDelaySeconds: 5 periodSeconds: 60 From 183ea5bf7bf0e7e5831c34f083c92c997f28cfca Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Wed, 25 May 2022 09:05:59 -0500 Subject: [PATCH 034/106] removing jenkins-perf from jenkins test pool (#1938) --- files/scripts/ci-env-pool-reset.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index aefdb446b..3f1d951d2 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -33,7 +33,6 @@ jenkins-blood jenkins-brain jenkins-dcp jenkins-new -jenkins-perf EOF cat - > jenkins-envs-releases.txt < Date: Wed, 25 May 2022 15:02:07 -0500 Subject: [PATCH 035/106] HP-741 Feat/portal paths (#1918) * feat: option to choose root frontend * fix: make sure revproxy rolls * add portal path to gen3ff * remove debug header * fix * fix * fix env var --- gen3/bin/gitops.sh | 6 + gen3/bin/kube-roll-all.sh | 4 +- gen3/bin/kube-setup-revproxy.sh | 12 + gen3/bin/roll.sh | 2 +- .../frontend-framework-root-deploy.yaml | 99 ++++++++ kube/services/portal/portal-deploy.yaml | 6 +- kube/services/portal/portal-root-deploy.yaml | 224 ++++++++++++++++++ .../frontend-framework-service.conf | 8 + .../gen3ff-as-root/portal-service.conf | 25 ++ .../frontend-framework-service.conf | 0 .../{ => portal-as-root}/portal-service.conf | 0 kube/services/revproxy/nginx.conf | 10 + kube/services/revproxy/revproxy-deploy.yaml | 6 + 13 files changed, 397 insertions(+), 5 deletions(-) create mode 100644 kube/services/frontend-framework/frontend-framework-root-deploy.yaml create mode 100644 kube/services/portal/portal-root-deploy.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf create mode 100644 kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf rename kube/services/revproxy/gen3.nginx.conf/{ => portal-as-root}/frontend-framework-service.conf (100%) rename kube/services/revproxy/gen3.nginx.conf/{ => portal-as-root}/portal-service.conf (100%) diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh index a3b7824dc..48ba6512c 100644 --- a/gen3/bin/gitops.sh +++ b/gen3/bin/gitops.sh @@ -1020,6 +1020,12 @@ gen3_roll_path() { local templatePath cleanName="${depName%[-_]deploy*}" serviceName="${cleanName/-canary/}" + # roll the correct root frontend service + frontend_root="$(g3k_config_lookup ".global.frontend_root" "$manifestPath")" + if [[ ($serviceName == "frontend-framework" && $frontend_root == "gen3ff") || ($serviceName == "portal" && $frontend_root != "gen3ff") ]]; then + cleanName="$cleanName-root" + fi + templatePath="${GEN3_HOME}/kube/services/${serviceName}/${cleanName}-deploy.yaml" if [[ -n "$deployVersion" && "$deployVersion" != null ]]; then templatePath="${GEN3_HOME}/kube/services/${serviceName}/${cleanName}-deploy-${deployVersion}.yaml" diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index ab22ce07b..14d8762e8 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -213,7 +213,7 @@ else gen3_log_info "not deploying requestor - no manifest entry for .versions.requestor" fi -gen3 kube-setup-metadata & +gen3 kube-setup-metadata if g3k_manifest_lookup .versions.ssjdispatcher 2>&1 /dev/null; then gen3 kube-setup-ssjdispatcher & @@ -243,7 +243,7 @@ else gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'" fi -gen3 kube-setup-revproxy & +gen3 kube-setup-revproxy if [[ "$GEN3_ROLL_FAST" != "true" ]]; then # Internal k8s systems diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 6b1bfa2fd..72cc21bf5 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -85,6 +85,18 @@ fi for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name'); do filePath="$scriptDir/gen3.nginx.conf/${name}.conf" + + if [[ $name == "portal-service" || $name == "frontend-framework-service" ]]; then + FRONTEND_ROOT=$(g3kubectl get configmap manifest-global --output=jsonpath='{.data.frontend_root}') + if [[ $FRONTEND_ROOT == "gen3ff" ]]; then + #echo "setup gen3ff as root frontend service" + filePath="$scriptDir/gen3.nginx.conf/gen3ff-as-root/${name}.conf" + else + #echo "setup windmill as root frontend service" + filePath="$scriptDir/gen3.nginx.conf/portal-as-root/${name}.conf" + fi + fi + #echo "$filePath" if [[ -f "$filePath" ]]; then #echo "$filePath exists in $BASHPID!" diff --git a/gen3/bin/roll.sh b/gen3/bin/roll.sh index 1859504d5..baed75aa4 100644 --- a/gen3/bin/roll.sh +++ b/gen3/bin/roll.sh @@ -69,7 +69,7 @@ gen3_roll() { # Get the service name, so we can verify it's in the manifest local serviceName - serviceName="$(basename "$templatePath" | sed 's/-deploy.*yaml$//')" + serviceName="$(basename "$templatePath" | sed 's/\(-root\)*-deploy.*yaml$//')" if g3k_config_lookup ".versions[\"$serviceName\"]" < "$manifestPath" > /dev/null 2>&1; then if ! (g3k_manifest_filter "$templatePath" "" "$@" | g3kubectl apply -f -); then diff --git a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml new file mode 100644 index 000000000..df66b97ad --- /dev/null +++ b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml @@ -0,0 +1,99 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend-framework-deployment +spec: + selector: + matchLabels: + app: frontend-framework + revisionHistoryLimit: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 2 + maxUnavailable: 25% + template: + metadata: + labels: + app: frontend-framework + public: "yes" + GEN3_DATE_LABEL + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - frontend-framework + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + volumes: + - name: ca-volume + secret: + secretName: "service-ca" + - name: config-volume + secret: + secretName: "frontend-framework-config" + - name: sponsor-img-volume + secret: + secretName: "frontend-framework-sponsor-config" + - name: privacy-policy + configMap: + name: "privacy-policy" + - name: cert-volume + secret: + secretName: "cert-portal-service" + containers: + - name: frontend-framework + GEN3_FRONTEND-FRAMEWORK_IMAGE + readinessProbe: + httpGet: + path: / + port: 3000 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + livenessProbe: + httpGet: + path: / + port: 3000 + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 30 + failureThreshold: 6 + resources: + requests: + cpu: 0.6 + memory: 512Mi + limits: + cpu: 2 + memory: 4096Mi + ports: + - containerPort: 3000 + command: + - /bin/bash + - ./start.sh + env: + - name: HOSTNAME + value: revproxy-service + - name: NEXT_PUBLIC_PORTAL_BASENAME + value: /portal + volumeMounts: + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.crt" + subPath: "service.crt" + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.key" + subPath: "service.key" + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + imagePullPolicy: Always diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index e65e12ea1..25ab33878 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -54,14 +54,14 @@ spec: GEN3_PORTAL_IMAGE readinessProbe: httpGet: - path: / + path: /portal/ port: 80 initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 30 livenessProbe: httpGet: - path: / + path: /portal/ port: 80 initialDelaySeconds: 60 periodSeconds: 60 @@ -187,6 +187,8 @@ spec: - name: DATA_UPLOAD_BUCKET # S3 bucket name for data upload, for setting up CSP GEN3_DATA_UPLOAD_BUCKET|-value: ""-| + - name: BASENAME + value: /portal volumeMounts: - name: "cert-volume" readOnly: true diff --git a/kube/services/portal/portal-root-deploy.yaml b/kube/services/portal/portal-root-deploy.yaml new file mode 100644 index 000000000..e65e12ea1 --- /dev/null +++ b/kube/services/portal/portal-root-deploy.yaml @@ -0,0 +1,224 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portal-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: portal + revisionHistoryLimit: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 2 + maxUnavailable: 25% + template: + metadata: + labels: + app: portal + public: "yes" + GEN3_DATE_LABEL + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - portal + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + volumes: + - name: ca-volume + secret: + secretName: "service-ca" + - name: config-volume + secret: + secretName: "portal-config" + - name: sponsor-img-volume + secret: + secretName: "portal-sponsor-config" + - name: privacy-policy + configMap: + name: "privacy-policy" + - name: cert-volume + secret: + secretName: "cert-portal-service" + containers: + - name: portal + GEN3_PORTAL_IMAGE + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + livenessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 30 + # portal sometimes takes a long time to come up ... - + # has to fetch the dictionary, relay compile, etc + failureThreshold: 10 + resources: + requests: + cpu: 0.6 + memory: 512Mi + limits: + # portal pigs out on resources at startup, then settles down + cpu: 2 + memory: 4096Mi + ports: + - containerPort: 80 + - containerPort: 443 + command: + - /bin/bash + - ./dockerStart.sh + env: + - name: HOSTNAME + value: revproxy-service + # disable npm 7's brand new update notifier to prevent Portal from stuck at starting up + # see https://github.com/npm/cli/issues/3163 + - name: NPM_CONFIG_UPDATE_NOTIFIER + value: "false" + - name: APP + valueFrom: + configMapKeyRef: + name: manifest-global + key: portal_app + - name: GEN3_BUNDLE + valueFrom: + configMapKeyRef: + name: manifest-portal + key: GEN3_BUNDLE + optional: true + - name: LOGOUT_INACTIVE_USERS + valueFrom: + configMapKeyRef: + name: manifest-global + key: logout_inactive_users + optional: true + - name: WORKSPACE_TIMEOUT_IN_MINUTES + valueFrom: + configMapKeyRef: + name: manifest-global + key: workspace_timeout_in_minutes + optional: true + - name: TIER_ACCESS_LEVEL + valueFrom: + configMapKeyRef: + name: manifest-global + # acceptable values for `tier_access_level` are: `libre`, `regular` and `private`. If omitted, by default common will be treated as `private` + key: tier_access_level + # for now making it optional so won't break anything + optional: true + - name: TIER_ACCESS_LIMIT + valueFrom: + configMapKeyRef: + name: manifest-global + key: tier_access_limit + optional: true + - name: FENCE_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: fence_url + optional: true + - name: INDEXD_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: indexd_url + optional: true + - name: WORKSPACE_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: workspace_url + optional: true + - name: MANIFEST_SERVICE_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: manifest_service_url + optional: true + - name: WTS_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: wts_url + optional: true + - name: PRIVACY_POLICY_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: privacy_policy_url + optional: true + - name: MAPBOX_API_TOKEN + # Optional token for mapbox api + valueFrom: + configMapKeyRef: + name: global + key: mapbox_token + optional: true + - name: DATADOG_APPLICATION_ID + # Optional application ID for Datadog + valueFrom: + secretKeyRef: + name: portal-datadog-config + key: datadog_application_id + optional: true + - name: DATADOG_CLIENT_TOKEN + # Optional client token for Datadog + valueFrom: + secretKeyRef: + name: portal-datadog-config + key: datadog_client_token + optional: true + - name: DATA_UPLOAD_BUCKET + # S3 bucket name for data upload, for setting up CSP + GEN3_DATA_UPLOAD_BUCKET|-value: ""-| + volumeMounts: + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.crt" + subPath: "service.crt" + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.key" + subPath: "service.key" + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + - name: "config-volume" + mountPath: "/data-portal/data/config/gitops.json" + subPath: "gitops.json" + - name: "config-volume" + mountPath: "/data-portal/custom/logo/gitops-logo.png" + subPath: "gitops-logo.png" + - name: "config-volume" + mountPath: "/data-portal/custom/createdby/gitops.png" + subPath: "gitops-createdby.png" + - name: "config-volume" + mountPath: "/data-portal/custom/favicon/gitops-favicon.ico" + subPath: "gitops-favicon.ico" + - name: "config-volume" + mountPath: "/data-portal/custom/css/gitops.css" + subPath: "gitops.css" + - name: "sponsor-img-volume" + mountPath: "/data-portal/custom/sponsors/gitops-sponsors" + - name: "privacy-policy" + readOnly: true + mountPath: "/data-portal/custom/privacy_policy.md" + subPath: "privacy_policy.md" + imagePullPolicy: Always diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf new file mode 100644 index 000000000..ac2cb75f6 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf @@ -0,0 +1,8 @@ + location / { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + set $proxy_service "frontend-framework"; + set $upstream http://frontend-framework-service.$namespace.svc.cluster.local; + proxy_pass $upstream; + } diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf new file mode 100644 index 000000000..58f0851d6 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf @@ -0,0 +1,25 @@ + location /portal { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + # + # Go into maintenance mode when both are true: + # - MAINTENANCE_MODE environment variable is set + # - devmode cookies is not set + # + set $maintenance_mode "$maintenance_mode_env"; + if ($cookie_devmode) { + set $maintenance_mode "off"; + } + + set $proxy_service "portal"; + # $upstream is written to the logs + set $upstream http://portal-service.$namespace.svc.cluster.local; + rewrite ^/portal/(.*) /$1 break; + if ($maintenance_mode = 'on') { + rewrite ^/(.*)$ /dashboard/Public/maintenance-page/index.html redirect; + } + + proxy_pass $upstream; + } diff --git a/kube/services/revproxy/gen3.nginx.conf/frontend-framework-service.conf b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf similarity index 100% rename from kube/services/revproxy/gen3.nginx.conf/frontend-framework-service.conf rename to kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf diff --git a/kube/services/revproxy/gen3.nginx.conf/portal-service.conf b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf similarity index 100% rename from kube/services/revproxy/gen3.nginx.conf/portal-service.conf rename to kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf index a716ea05c..f38dcddef 100644 --- a/kube/services/revproxy/nginx.conf +++ b/kube/services/revproxy/nginx.conf @@ -23,6 +23,7 @@ env DES_NAMESPACE; env MAINTENANCE_MODE; env INDEXD_AUTHZ; env MDS_AUTHZ; +env FRONTEND_ROOT; events { worker_connections 768; @@ -79,6 +80,9 @@ include /etc/nginx/gen3_server*.conf; # see portal-conf perl_set $maintenance_mode_env 'sub { return $ENV{"MAINTENANCE_MODE"} || "undefined"; }'; +# Setup root path frontend service +perl_set $frontend_root_service 'sub { return $ENV{"FRONTEND_ROOT"} eq "gen3ff" ? "gen3ff" : "portal"; }'; + ## # Get canary weight environment vars into block # This allows us to use the var in njs scripts @@ -407,6 +411,12 @@ server { #} include /etc/nginx/gen3.conf/*.conf; + if ($frontend_root_service = "portal") { + include /etc/nginx/gen3.conf/portal-as-root/*.conf; + } + if ($frontend_root_service = "gen3ff") { + include /etc/nginx/gen3.conf/gen3ff-as-root/*.conf; + } location @errorworkspace { return 302 https://$host/no-workspace-access; diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 3f4006cb8..1cba90bc0 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -95,6 +95,12 @@ spec: name: manifest-global key: maintenance_mode optional: true + - name: FRONTEND_ROOT + valueFrom: + configMapKeyRef: + name: manifest-global + key: frontend_root + optional: true - name: ORIGINS_ALLOW_CREDENTIALS valueFrom: configMapKeyRef: From b57e3085ba38760eae5708b15b12a1bb24d7f293 Mon Sep 17 00:00:00 2001 From: Uwe Winter Date: Thu, 26 May 2022 11:48:52 +1000 Subject: [PATCH 036/106] add region locaiton constraint to state bucket (#1942) Co-authored-by: Uwe Winter --- gen3/bin/workon.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/workon.sh b/gen3/bin/workon.sh index b6ba562a2..e7b951d1c 100644 --- a/gen3/bin/workon.sh +++ b/gen3/bin/workon.sh @@ -113,7 +113,7 @@ if [[ ! -f "$bucketCheckFlag" && "$GEN3_FLAVOR" == "AWS" ]]; then } EOM ) - gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" + gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" --create-bucket-configuration ‘{“LocationConstraint”:“‘$(aws configure get $GEN3_PROFILE.region)‘“}’ sleep 5 # Avoid race conditions if gen3_aws_run aws s3api put-bucket-encryption --bucket "$GEN3_S3_BUCKET" --server-side-encryption-configuration "$S3_POLICY"; then touch "$bucketCheckFlag" From 91b98bc9ca2419d293a7df9a1767602611a2181a Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Thu, 26 May 2022 04:20:41 +0200 Subject: [PATCH 037/106] Revert "fix: install kubectl (#1874)" (#1943) This reverts commit 811579abdb1fa4050402a6c0d1fdee38320652b2. --- gen3/bin/kube-setup-workvm.sh | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/gen3/bin/kube-setup-workvm.sh b/gen3/bin/kube-setup-workvm.sh index dfc884a7f..b740b388b 100644 --- a/gen3/bin/kube-setup-workvm.sh +++ b/gen3/bin/kube-setup-workvm.sh @@ -102,30 +102,19 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then ) fi # gen3sdk currently requires this - sudo -E apt-get install -y libpq-dev apt-transport-https ca-certificates curl - sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg - ##kubernetes-xenial packages are supported in Bionic and Focal. - echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/google-cloud-sdk.list - sudo -E apt-get update - #client_version=$(kubectl version --short --client=true | awk -F[v.] '{print $2"."$3}') - server_version=$(kubectl version --short | awk -F[v.] '/Server/ {print $3"."$4}') - - if [[ ! -z "${server_version// }" ]]; then - ( - install_version=$(apt-cache madison kubectl | awk '$3 ~ /'$server_version'/ {print $3}'| head -n 1) - sudo -E apt-get install -y kubectl=$install_version --allow-downgrades - ) - else - sudo -E apt-get install -y kubectl - fi - - if [[ -f /usr/local/bin/kubectl && -f /usr/bin/kubectl ]]; then # pref dpkg managed kubectl - sudo -E /bin/rm /usr/local/bin/kubectl - fi + sudo -E apt-get install -y libpq-dev if ! which gcloud > /dev/null 2>&1; then ( + export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" + sudo -E bash -c "echo 'deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main' > /etc/apt/sources.list.d/google-cloud-sdk.list" + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo -E apt-key add - + sudo -E apt-get update sudo -E apt-get install -y google-cloud-sdk \ - google-cloud-sdk-cbt + google-cloud-sdk-cbt \ + kubectl + if [[ -f /usr/local/bin/kubectl && -f /usr/bin/kubectl ]]; then # pref dpkg managed kubectl + sudo -E /bin/rm /usr/local/bin/kubectl + fi ) fi From 023fae12c467af722e41b9c703bf9d9f1f98e7e3 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 31 May 2022 15:48:48 -0500 Subject: [PATCH 038/106] dicom-viewer readinessProbe and livenessProbe (#1946) --- .../services/dicom-viewer/dicom-viewer-deploy.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml index d37addca2..e7d05903b 100644 --- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml @@ -21,6 +21,20 @@ spec: - name: dicom-viewer GEN3_DICOM-VIEWER_IMAGE imagePullPolicy: Always + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 30 + livenessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 5 + periodSeconds: 60 + timeoutSeconds: 30 ports: - containerPort: 80 From 182cf4566362c61d28f641026b3ad00a0c751be5 Mon Sep 17 00:00:00 2001 From: Uwe Winter Date: Thu, 2 Jun 2022 00:43:41 +1000 Subject: [PATCH 039/106] Added a variable to allow configuration of the logs bucket (#1944) Co-authored-by: Uwe Winter --- tf_files/aws/csoc_management-logs/root.tf | 1 + tf_files/aws/csoc_management-logs/variables.tf | 4 ++++ tf_files/aws/modules/management-logs/logging.tf | 2 +- tf_files/aws/modules/management-logs/variables.tf | 4 ++++ 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tf_files/aws/csoc_management-logs/root.tf b/tf_files/aws/csoc_management-logs/root.tf index bbfa91bff..4eea69fd4 100644 --- a/tf_files/aws/csoc_management-logs/root.tf +++ b/tf_files/aws/csoc_management-logs/root.tf @@ -10,4 +10,5 @@ module "logging" { source = "../modules/management-logs" accounts_id = "${var.accounts_id}" elasticsearch_domain = "${var.elasticsearch_domain}" + log_bucket_name = "${var.log_bucket_name}" } diff --git a/tf_files/aws/csoc_management-logs/variables.tf b/tf_files/aws/csoc_management-logs/variables.tf index 7f04daf31..382240b57 100644 --- a/tf_files/aws/csoc_management-logs/variables.tf +++ b/tf_files/aws/csoc_management-logs/variables.tf @@ -7,3 +7,7 @@ variable "accounts_id" { variable "elasticsearch_domain" { default = "commons-logs" } + +variable "log_bucket_name" { + default = "management-logs-remote-accounts" +} diff --git a/tf_files/aws/modules/management-logs/logging.tf b/tf_files/aws/modules/management-logs/logging.tf index 4d802f2a4..7f1509713 100644 --- a/tf_files/aws/modules/management-logs/logging.tf +++ b/tf_files/aws/modules/management-logs/logging.tf @@ -2,7 +2,7 @@ # Kinesis stream logs resource "aws_s3_bucket" "management-logs_bucket" { - bucket = "management-logs-remote-accounts" + bucket = "${var.log_bucket_name}" acl = "private" tags = { diff --git a/tf_files/aws/modules/management-logs/variables.tf b/tf_files/aws/modules/management-logs/variables.tf index 7f04daf31..382240b57 100644 --- a/tf_files/aws/modules/management-logs/variables.tf +++ b/tf_files/aws/modules/management-logs/variables.tf @@ -7,3 +7,7 @@ variable "accounts_id" { variable "elasticsearch_domain" { default = "commons-logs" } + +variable "log_bucket_name" { + default = "management-logs-remote-accounts" +} From b8a277aa70287e309caa1841f6e31f5ac792f3d9 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Wed, 1 Jun 2022 14:35:54 -0500 Subject: [PATCH 040/106] [GPE-377] Update datadog agent (#1945) * Update kube-setup-datadog.sh * Update values.yaml --- gen3/bin/kube-setup-datadog.sh | 2 +- kube/services/datadog/values.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gen3/bin/kube-setup-datadog.sh b/gen3/bin/kube-setup-datadog.sh index 76c5ee685..89f007b09 100644 --- a/gen3/bin/kube-setup-datadog.sh +++ b/gen3/bin/kube-setup-datadog.sh @@ -44,7 +44,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then fi helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 2.28.13 2> >(grep -v 'This is insecure' >&2) + helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 2.33.8 2> >(grep -v 'This is insecure' >&2) ) else gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy" diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 5ad681b5d..fea2c20af 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -221,7 +221,7 @@ clusterAgent: name: cluster-agent # clusterAgent.image.tag -- Cluster Agent image tag to use - tag: 1.16.0 + # tag: 1.16.0 # clusterAgent.image.repository -- Override default registry + image.name for Cluster Agent repository: @@ -278,7 +278,7 @@ agents: name: agent # agents.image.tag -- Define the Agent version to use - tag: 7.32.4 + # tag: 7.32.4 # agents.image.tagSuffix -- Suffix to append to Agent tag ## Ex: From 009e970ff9b71abfb266b982e55b4779cdb94a07 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 6 Jun 2022 10:52:13 -0500 Subject: [PATCH 041/106] Move pip install ddtrace to Dockerfile (#1865) * Move pip install ddtrace to Dockerfile --- Docker/python-nginx/python3.10-buster/Dockerfile | 2 +- Docker/python-nginx/python3.10-buster/dockerrun.sh | 1 - Docker/python-nginx/python3.6-alpine3.7/Dockerfile | 3 +++ Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh | 1 - Docker/python-nginx/python3.6-buster/Dockerfile | 2 +- Docker/python-nginx/python3.6-buster/dockerrun.sh | 1 - Docker/python-nginx/python3.9-buster/Dockerfile | 2 +- Docker/python-nginx/python3.9-buster/dockerrun.sh | 1 - 8 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Docker/python-nginx/python3.10-buster/Dockerfile b/Docker/python-nginx/python3.10-buster/Dockerfile index f41a7d226..9da445160 100644 --- a/Docker/python-nginx/python3.10-buster/Dockerfile +++ b/Docker/python-nginx/python3.10-buster/Dockerfile @@ -108,7 +108,7 @@ EXPOSE 443 # install uwsgi # https://uwsgi-docs.readthedocs.io/en/latest/Install.html RUN python -m pip install --upgrade pip -RUN pip install uwsgi +RUN pip install uwsgi ddtrace # Remove default configuration from Nginx RUN rm /etc/nginx/conf.d/default.conf diff --git a/Docker/python-nginx/python3.10-buster/dockerrun.sh b/Docker/python-nginx/python3.10-buster/dockerrun.sh index ba0e39b3d..583590e36 100644 --- a/Docker/python-nginx/python3.10-buster/dockerrun.sh +++ b/Docker/python-nginx/python3.10-buster/dockerrun.sh @@ -91,7 +91,6 @@ if [ -z $DD_ENABLED ]; then run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & else -pip install ddtrace echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini ( ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini diff --git a/Docker/python-nginx/python3.6-alpine3.7/Dockerfile b/Docker/python-nginx/python3.6-alpine3.7/Dockerfile index 06eface8f..ad371dcbd 100755 --- a/Docker/python-nginx/python3.6-alpine3.7/Dockerfile +++ b/Docker/python-nginx/python3.6-alpine3.7/Dockerfile @@ -145,6 +145,9 @@ RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \ COPY nginx.conf /etc/nginx/nginx.conf COPY uwsgi.conf /etc/nginx/sites-available/ +# Install ddtrace +RUN pip install ddtrace + # Standard set up Nginx finished EXPOSE 80 diff --git a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh index e73de6697..4f4f6a6f6 100644 --- a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh +++ b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh @@ -91,7 +91,6 @@ if [[ -z $DD_ENABLED ]]; then run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & else -pip install ddtrace echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini ( ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini diff --git a/Docker/python-nginx/python3.6-buster/Dockerfile b/Docker/python-nginx/python3.6-buster/Dockerfile index e2d658038..55862b1d7 100644 --- a/Docker/python-nginx/python3.6-buster/Dockerfile +++ b/Docker/python-nginx/python3.6-buster/Dockerfile @@ -108,7 +108,7 @@ EXPOSE 443 # install uwsgi # https://uwsgi-docs.readthedocs.io/en/latest/Install.html RUN python -m pip install --upgrade pip -RUN pip install uwsgi +RUN pip install uwsgi ddtrace # Remove default configuration from Nginx RUN rm /etc/nginx/conf.d/default.conf diff --git a/Docker/python-nginx/python3.6-buster/dockerrun.sh b/Docker/python-nginx/python3.6-buster/dockerrun.sh index ba0e39b3d..583590e36 100644 --- a/Docker/python-nginx/python3.6-buster/dockerrun.sh +++ b/Docker/python-nginx/python3.6-buster/dockerrun.sh @@ -91,7 +91,6 @@ if [ -z $DD_ENABLED ]; then run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & else -pip install ddtrace echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini ( ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini diff --git a/Docker/python-nginx/python3.9-buster/Dockerfile b/Docker/python-nginx/python3.9-buster/Dockerfile index 9f81c1aac..f847b3864 100644 --- a/Docker/python-nginx/python3.9-buster/Dockerfile +++ b/Docker/python-nginx/python3.9-buster/Dockerfile @@ -108,7 +108,7 @@ EXPOSE 443 # install uwsgi # https://uwsgi-docs.readthedocs.io/en/latest/Install.html RUN python -m pip install --upgrade pip -RUN pip install uwsgi +RUN pip install uwsgi ddtrace # Remove default configuration from Nginx RUN rm /etc/nginx/conf.d/default.conf diff --git a/Docker/python-nginx/python3.9-buster/dockerrun.sh b/Docker/python-nginx/python3.9-buster/dockerrun.sh index ba0e39b3d..583590e36 100644 --- a/Docker/python-nginx/python3.9-buster/dockerrun.sh +++ b/Docker/python-nginx/python3.9-buster/dockerrun.sh @@ -91,7 +91,6 @@ if [ -z $DD_ENABLED ]; then run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & else -pip install ddtrace echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini ( ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini From eebd73492c843f2c86e97eee0a59735dbc5c1988 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 7 Jun 2022 12:42:35 -0500 Subject: [PATCH 042/106] feat(kubecost-setup): Added kubecost setup (#1923) * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script * feat(kubecost-setup): Added kubecost setup script Co-authored-by: Edward Malinowski --- doc/kubecost.md | 82 ++++ gen3/bin/awsrole.sh | 43 +- gen3/bin/kube-setup-ingress.sh | 10 +- gen3/bin/kube-setup-kubecost.sh | 302 ++++++++++++ .../kubecost-master/kubecost-alb.yaml | 19 + .../kubecost-master/object-store.yaml | 16 + kube/services/kubecost-master/values.yaml | 40 ++ .../services/kubecost-slave/object-store.yaml | 16 + kube/services/kubecost-slave/values.yaml | 50 ++ .../kubecost-standalone/kubecost-alb.yaml | 19 + .../kubecost-standalone/object-store.yaml | 16 + kube/services/kubecost-standalone/values.yaml | 35 ++ tf_files/aws/kubecost/AWSCURInitializer.zip | Bin 0 -> 741 bytes .../aws/kubecost/AWSS3CURNotification.zip | Bin 0 -> 603 bytes tf_files/aws/kubecost/data.tf | 3 + tf_files/aws/kubecost/manifest.json | 5 + tf_files/aws/kubecost/output.tf | 20 + tf_files/aws/kubecost/root.tf | 448 ++++++++++++++++++ tf_files/aws/kubecost/sample.tfvars | 2 + tf_files/aws/kubecost/variables.tf | 15 + 20 files changed, 1118 insertions(+), 23 deletions(-) create mode 100644 doc/kubecost.md create mode 100644 gen3/bin/kube-setup-kubecost.sh create mode 100644 kube/services/kubecost-master/kubecost-alb.yaml create mode 100644 kube/services/kubecost-master/object-store.yaml create mode 100644 kube/services/kubecost-master/values.yaml create mode 100644 kube/services/kubecost-slave/object-store.yaml create mode 100644 kube/services/kubecost-slave/values.yaml create mode 100644 kube/services/kubecost-standalone/kubecost-alb.yaml create mode 100644 kube/services/kubecost-standalone/object-store.yaml create mode 100644 kube/services/kubecost-standalone/values.yaml create mode 100644 tf_files/aws/kubecost/AWSCURInitializer.zip create mode 100644 tf_files/aws/kubecost/AWSS3CURNotification.zip create mode 100644 tf_files/aws/kubecost/data.tf create mode 100644 tf_files/aws/kubecost/manifest.json create mode 100644 tf_files/aws/kubecost/output.tf create mode 100644 tf_files/aws/kubecost/root.tf create mode 100644 tf_files/aws/kubecost/sample.tfvars create mode 100644 tf_files/aws/kubecost/variables.tf diff --git a/doc/kubecost.md b/doc/kubecost.md new file mode 100644 index 000000000..a230e6378 --- /dev/null +++ b/doc/kubecost.md @@ -0,0 +1,82 @@ +# TL;DR + +Setup a kubecost cluster + + +## Use + +### `gen3 kube-setup-kubecost master create` + +Creates a master kubecost cluster + +Requires the following `key value` arguments + +* `--slave-account-id` - the account id of the slave kubecost cluster +* `--kubecost-token` - The token for the kubecost cluster + +Optional `key value` arguments + +* `--force` - defaults to false, set --force true to force helm upgrade +* `--disable-prometheus` - defaults to false, set --disable-prometheus true to disbale prometheus and use current setup +* `--prometheus-namespace` - The namespace of the current prometheus, required if kubecost prometheus is disabled +* `--prometheus-service` - The service name of the current prometheus, required if kubecost prometheus is disabled + +Ex: + +``` bash +gen3 kube-setup-kubecost master create --slave-account-id 1234567890 --kubecost-token abcdefghijklmnop12345 --force true +``` + +### `gen3 kube-setup-kubecost slave create` + +Creates a slave kubecost cluster + + +Requires the following `key value` arguments + +* `--s3-bucket` - the centralized s3 bucket of the master kubecost cluster +* `--kubecost-token` - The token for the kubecost cluster + +Optional `key value` arguments + +* `--force` - defaults to false, set --force true to force helm upgrade +* `--disable-prometheus` - defaults to false, set --disable-prometheus true to disbale prometheus and use current setup +* `--prometheus-namespace` - The namespace of the current prometheus, required if kubecost prometheus is disabled +* `--prometheus-service` - The service name of the current prometheus, required if kubecost prometheus is disabled + +Ex: + +``` bash +gen3 kube-setup-kubecost slave create --s3-bucket test-kubecost-bucket --kubecost-token abcdefghijklmnop12345 --force true +``` + +### `gen3 kube-setup-kubecost standalone create` + +Creates a standalone kubecost cluster + +Requires the following `key value` arguments + +* `--kubecost-token` - The token for the kubecost cluster + +Optional `key value` arguments + +* `--force` - defaults to false, set --force true to force helm upgrade +* `--disable-prometheus` - defaults to false, set --disable-prometheus true to disbale prometheus and use current setup +* `--prometheus-namespace` - The namespace of the current prometheus, required if kubecost prometheus is disabled +* `--prometheus-service` - The service name of the current prometheus, required if kubecost prometheus is disabled + +Ex: + +``` bash +gen3 kube-setup-kubecost standalone create --kubecost-token abcdefghijklmnop12345 --force true +``` + +### `gen3 kube-setup-kubecost delete` + +Deletes a running kubecost deployment and destroys the associated infra + +Ex: + +``` bash +gen3 kube-setup-kubecost delete +``` diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh index 068003f95..476e7d003 100644 --- a/gen3/bin/awsrole.sh +++ b/gen3/bin/awsrole.sh @@ -24,9 +24,11 @@ gen3_awsrole_help() { function gen3_awsrole_ar_policy() { local serviceAccount="$1" shift || return 1 - local namespace="$1" - shift - [ -z "$namespace" ] && namespace=$(gen3 db namespace) + if [[ ! -z $1 ]]; then + local namespace=$1 + else + local namespace=$(gen3 db namespace) + fi local issuer_url local account_id local vpc_name @@ -73,25 +75,28 @@ EOF # # @param saName # @param roleName +# @param Optional - namespace # gen3_awsrole_sa_annotate() { local saName="$1" shift || return 1 local roleName="$1" shift || return 1 - local namespace="$1" - shift - # If namespace is supplied set KUBECTL_NAMESPACE - [[ ! -z "$namespace" ]] && KUBECTL_NAMESPACE=$namespace - + if [[ ! -z $1 ]]; then + local namespace=$1 + else + local namespace=$(gen3 db namespace) + fi local roleArn local roleInfo roleInfo="$(aws iam get-role --role-name "$roleName")" || return 1 roleArn="$(jq -e -r .Role.Arn <<< "$roleInfo")" - if ! g3kubectl get sa "$saName" > /dev/null; then - g3kubectl create sa "$saName" || return 1 + + if ! g3kubectl get sa "$saName" --namespace=$namespace > /dev/null; then + g3kubectl create sa "$saName" --namespace=$namespace || return 1 fi - g3kubectl annotate --overwrite sa "$saName" "eks.amazonaws.com/role-arn=$roleArn" + + g3kubectl annotate --overwrite sa "$saName" "eks.amazonaws.com/role-arn=$roleArn" --namespace=$namespace } # @@ -122,8 +127,9 @@ _tfplan_role() { shift || return 1 local saName="$1" shift || return 1 + local namespace="$1" local arDoc - arDoc="$(gen3_awsrole_ar_policy "$saName")" || return 1 + arDoc="$(gen3_awsrole_ar_policy "$saName" "$namespace")" || return 1 gen3 workon default "${rolename}_role" gen3 cd cat << EOF > config.tfvars @@ -138,7 +144,7 @@ EOF } # -# Util for applying tfplan +# Util for applying tfplan # _tfapply_role() { local rolename=$1 @@ -176,6 +182,11 @@ gen3_awsrole_create() { gen3_log_err "use: gen3 awsrole create roleName saName" return 1 fi + if [[ ! -z $1 ]]; then + local namespace=$1 + else + local namespace=$(gen3 db namespace) + fi # do simple validation of name local regexp="^[a-z][a-z0-9\-]*$" if [[ ! $rolename =~ $regexp ]];then @@ -196,7 +207,7 @@ EOF # That name is already used. if [[ "$entity_type" =~ role ]]; then gen3_log_info "A role with that name already exists" - gen3_awsrole_sa_annotate "$saName" "$rolename" + gen3_awsrole_sa_annotate "$saName" "$rolename" "$namespace" return $? else gen3_log_err "A $entity_type with that name already exists" @@ -205,14 +216,14 @@ EOF fi TF_IN_AUTOMATION="true" - if ! _tfplan_role $rolename $saName; then + if ! _tfplan_role $rolename $saName $namespace; then return 1 fi if ! _tfapply_role $rolename; then return 1 fi - gen3_awsrole_sa_annotate "$saName" "$rolename" + gen3_awsrole_sa_annotate "$saName" "$rolename" "$namespace" } # diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index 5c45b30ab..26cad3bd6 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -248,13 +248,9 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then EOM if ! gen3 awsrole info "$roleName" > /dev/null; then # setup role gen3_log_info "creating IAM role for ingress: $roleName, linking to sa $saName" - # This doesn't work creating SA + role in kube-system namespace :( - # gen3 awsrole create "$roleName" "$saName" || return 1 - - gen3 awsrole ar-policy $saName kube-system > $arPolicyFile - role=$(aws iam create-role --role-name $roleName --assume-role-policy-document file://"${arPolicyFile}" 1>&2) - aws iam put-role-policy --role-name "$roleName" --policy-document file://${ingressPolicy} --policy-name "$policyName" 1>&2 - gen3 awsrole sa-annotate $saName $roleName kube-system + gen3 awsrole create "$roleName" "$saName" || return 1 + aws iam put-role-policy --role-name "$roleName" --policy-document file://${ingressPolicy} --policy-name "$policyName" 1>&2 + gen3 awsrole sa-annotate $saName $roleName kube-system else # update the annotation - just to be thorough gen3 awsrole sa-annotate "$saName" "$roleName" kube-system diff --git a/gen3/bin/kube-setup-kubecost.sh b/gen3/bin/kube-setup-kubecost.sh new file mode 100644 index 000000000..bcdf8854d --- /dev/null +++ b/gen3/bin/kube-setup-kubecost.sh @@ -0,0 +1,302 @@ +#!/bin/bash +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" +gen3_load "gen3/lib/kube-setup-init" + +accountID=$(aws sts get-caller-identity --output text --query 'Account') +awsRegion=$(aws configure get region) + +gen3_setup_kubecost_infrastructure() { + gen3 workon default "${vpc_name}__kubecost" + gen3 cd + echo "vpc_name=\"$vpc_name\"" > config.tfvars + if [[ $deployment == "slave" ]]; then + echo "cur_s3_bucket=\"$s3Bucket\"" >> config.tfvars + elif [[ $deployment == "master" ]]; then + echo "slave_account_id=\"$slaveAccountId\"" >> config.tfvars + fi + gen3 tfplan 2>&1 + gen3 tfapply 2>&1 +} + +gen3_destroy_kubecost_infrastructure() { + gen3 workon default "${vpc_name}__kubecost" + gen3 tfplan --destroy 2>&1 + gen3 tfapply 2>&1 + gen3 cd + cd .. + rm -rf "${vpc_name}__kubecost" +} + +gen3_setup_kubecost_service_account() { + # Kubecost SA + roleName="$vpc_name-kubecost-user" + saName="kubecost-cost-analyzer" + gen3 awsrole create "$roleName" "$saName" "kubecost" || return 1 + aws iam attach-role-policy --role-name "$roleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2 + #gen3 awsrole sa-annotate "$saName" "$roleName" "kubecost" + kubectl delete sa -n kubecost $saName + thanosRoleName="$vpc_name-thanos-user" + thanosSaName="thanos-service-account" + gen3 awsrole create "$thanosRoleName" "$thanosSaName" "kubecost" || return 1 + aws iam attach-role-policy --role-name "$thanosRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2 + gen3 awsrole sa-annotate "$thanosSaName" "$thanosRoleName" "kubecost" +} + +gen3_delete_kubecost_service_account() { + aws iam detach-role-policy --role-name "${vpc_name}-kubecost-user" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2 + aws iam detach-role-policy --role-name "${vpc_name}-thanos-user" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2 + gen3 workon default "${vpc_name}-kubecost-user_role" + gen3 tfplan --destroy 2>&1 + gen3 tfapply 2>&1 + gen3 workon default "${vpc_name}-thanos-user_role" + gen3 tfplan --destroy 2>&1 + gen3 tfapply 2>&1 +} + +gen3_delete_kubecost() { + gen3_delete_kubecost_service_account + gen3_destroy_kubecost_infrastructure + helm delete kubecost -n kubecost +} + +gen3_kubecost_create_alb() { + kubectl apply -f "${GEN3_HOME}/kube/services/kubecost-${deployment}/kubecost-alb.yaml" -n kubecost +} + +gen3_setup_kubecost() { + kubectl create namespace kubecost || true + gen3_setup_kubecost_infrastructure + # Change the SA permissions based on slave/master/standalone + if [[ -z $(kubectl get sa -n kubecost | grep $vpc_name-kubecost-user) ]]; then + gen3_setup_kubecost_service_account + fi + # If master setup and s3 bucket not supplied, set terraform master s3 bucket name for thanos secret + if [[ -z $s3Bucket ]]; then + s3Bucket="$vpc_name-kubecost-bucket" + fi + if (! helm status kubecost -n kubecost > /dev/null 2>&1 ) || [[ ! -z "$FORCE" ]]; then + if [[ $deployment == "slave" ]]; then + valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" + valuesTemplate="${GEN3_HOME}/kube/services/kubecost-slave/values.yaml" + thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml" + thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-slave/object-store.yaml" + thanosValues="${GEN3_HOME}/kube/services/kubecost-slave/values-thanos.yaml" + g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile + elif [[ $deployment == "master" ]]; then + valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" + valuesTemplate="${GEN3_HOME}/kube/services/kubecost-master/values.yaml" + thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml" + thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-master/object-store.yaml" + g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile + gen3_kubecost_create_alb + else + valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" + valuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/values.yaml" + thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml" + thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/object-store.yaml" + g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile + gen3_kubecost_create_alb + fi + kubectl delete secret -n kubecost kubecost-thanos || true + kubectl delete secret -n kubecost thanos || true + g3k_kv_filter $thanosValuesTemplate AWS_REGION $awsRegion KUBECOST_S3_BUCKET $s3Bucket > $thanosValuesFile + kubectl create secret generic kubecost-thanos -n kubecost --from-file=$thanosValuesFile + kubectl create secret generic thanos -n kubecost --from-file=$thanosValuesFile + # Need to setup thanos config + helm repo add kubecost https://kubecost.github.io/cost-analyzer/ --force-update 2> >(grep -v 'This is insecure' >&2) + helm repo update 2> >(grep -v 'This is insecure' >&2) + if [[ -z $disablePrometheus ]]; then + helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} -f https://raw.githubusercontent.com/kubecost/cost-analyzer-helm-chart/develop/cost-analyzer/values-thanos.yaml + else + helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} -f https://raw.githubusercontent.com/kubecost/cost-analyzer-helm-chart/develop/cost-analyzer/values-thanos.yaml --set prometheus.fqdn=http://$prometheusService.$prometheusNamespace.svc --set prometheus.enabled=false + fi + else + gen3_log_info "kube-setup-kubecost exiting - kubecost already deployed, use --force true to redeploy" + fi +} + +if [[ -z "$GEN3_SOURCE_ONLY" ]]; then + if [[ -z "$1" || "$1" =~ ^-*help$ ]]; then + gen3_logs_help + exit 0 + fi + command="$1" + shift + case "$command" in + "master") + deployment="master" + subcommand="" + if [[ $# -gt 0 ]]; then + subcommand="$1" + shift + fi + case "$subcommand" in + "create") + for flag in $@; do + if [[ $# -gt 0 ]]; then + flag="$1" + shift + fi + case "$flag" in + "--slave-account-id") + slaveAccountId="$1" + ;; + "--kubecost-token") + kubecostToken="$1" + ;; + "--force") + if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then + FORCE=true + fi + ;; + "--disable-prometheus") + if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then + disablePrometheus=true + fi + ;; + "--prometheus-namespace") + prometheusNamespace="$1" + ;; + "--prometheus-service") + prometheusService="$1" + ;; + esac + done + if [[ -z $slaveAccountId || -z $kubecostToken ]]; then + gen3_log_err "Please ensure you set the required flags." + exit 1 + fi + if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then + gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name." + exit 1 + fi + gen3_setup_kubecost "$@" + ;; + "alb") + gen3_kubecost_create_alb + ;; + *) + gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost" + ;; + esac + ;; + "slave") + deployment="slave" + subcommand="" + if [[ $# -gt 0 ]]; then + subcommand="$1" + shift + fi + case "$subcommand" in + "create") + for flag in $@; do + if [[ $# -gt 0 ]]; then + flag="$1" + shift + fi + case "$flag" in + "--s3-bucket") + s3Bucket="$1" + ;; + "--kubecost-token") + kubecostToken="$1" + ;; + "--force") + if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then + FORCE=true + fi + ;; + "--disable-prometheus") + if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then + disablePrometheus=true + fi + ;; + "--prometheus-namespace") + prometheusNamespace="$1" + ;; + "--prometheus-service") + prometheusService="$1" + ;; + esac + done + if [[ -z $s3Bucket || -z $kubecostToken ]]; then + gen3_log_err "Please ensure you set the required flags." + exit 1 + fi + if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then + gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name." + exit 1 + fi + gen3_setup_kubecost "$@" + ;; + *) + gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost" + ;; + esac + ;; + "standalone") + deployment="standalone" + subcommand="" + if [[ $# -gt 0 ]]; then + subcommand="$1" + shift + fi + case "$subcommand" in + "create") + for flag in $@; do + if [[ $# -gt 0 ]]; then + flag="$1" + shift + fi + case "$flag" in + "--kubecost-token") + kubecostToken="$1" + ;; + "--force") + if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then + FORCE=true + fi + ;; + "--disable-prometheus") + if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then + disablePrometheus=true + fi + ;; + "--prometheus-namespace") + prometheusNamespace="$1" + ;; + "--prometheus-service") + prometheusService="$1" + ;; + esac + done + if [[ -z $kubecostToken ]]; then + gen3_log_err "Please ensure you set the required flags." + exit 1 + fi + if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then + gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name." + exit 1 + fi + gen3_setup_kubecost "$@" + ;; + "alb") + gen3_kubecost_create_alb + ;; + *) + gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost" + ;; + esac + ;; + "delete") + gen3_delete_kubecost + ;; + *) + gen3_log_err "gen3_logs" "invalid command $command" + gen3_kubecost_help + ;; + esac +fi diff --git a/kube/services/kubecost-master/kubecost-alb.yaml b/kube/services/kubecost-master/kubecost-alb.yaml new file mode 100644 index 000000000..9a0fc4ef7 --- /dev/null +++ b/kube/services/kubecost-master/kubecost-alb.yaml @@ -0,0 +1,19 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kubecost-alb-ingress + annotations: + kubernetes.io/ingress.class: alb + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/scheme: internal +spec: + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: kubecost-cost-analyzer + port: + number: 9090 \ No newline at end of file diff --git a/kube/services/kubecost-master/object-store.yaml b/kube/services/kubecost-master/object-store.yaml new file mode 100644 index 000000000..bcfadc752 --- /dev/null +++ b/kube/services/kubecost-master/object-store.yaml @@ -0,0 +1,16 @@ +type: S3 +config: + bucket: KUBECOST_S3_BUCKET + endpoint: "s3.amazonaws.com" + region: AWS_REGION + insecure: false + signature_version2: false + put_user_metadata: + "X-Amz-Acl": "bucket-owner-full-control" + http_config: + idle_conn_timeout: 90s + response_header_timeout: 2m + insecure_skip_verify: false + trace: + enable: true + part_size: 134217728 \ No newline at end of file diff --git a/kube/services/kubecost-master/values.yaml b/kube/services/kubecost-master/values.yaml new file mode 100644 index 000000000..0b4269778 --- /dev/null +++ b/kube/services/kubecost-master/values.yaml @@ -0,0 +1,40 @@ +## Full values listed here, https://github.com/kubecost/cost-analyzer-helm-chart/blob/master/cost-analyzer/values.yaml + +kubecostToken: KUBECOST_TOKEN + +serviceAccount: + create: true # Set this to false if you're bringing your own service account. + annotations: + KUBECOST_SA + +kubecostProductConfigs: + athenaBucketName: ATHENA_BUCKET + athenaRegion: AWS_REGION + athenaDatabase: ATHENA_DATABASE + athenaTable: ATHENA_TABLE + athenaProjectID: AWS_ACCOUNT_ID + clusterName: master-cluster + #serviceKeySecretName: aws-service-key , might work with SA attached instead + projectID: AWS_ACCOUNT_ID + # awsSpotDataRegion: AWS_kubecostProductConfigs_awsSpotDataRegion + # awsSpotDataBucket: AWS_kubecostProductConfigs_awsSpotDataBucket + +prometheus: + serviceAccounts: + server: + create: false + name: "THANOS_SA" + server: + global: + external_labels: + # Slave cluster name + cluster_id: "master-cluster" + +networkCosts: + enabled: true + +thanos: + store: + serviceAccount: "THANOS_SA" + compact: + serviceAccount: "THANOS_SA" \ No newline at end of file diff --git a/kube/services/kubecost-slave/object-store.yaml b/kube/services/kubecost-slave/object-store.yaml new file mode 100644 index 000000000..bcfadc752 --- /dev/null +++ b/kube/services/kubecost-slave/object-store.yaml @@ -0,0 +1,16 @@ +type: S3 +config: + bucket: KUBECOST_S3_BUCKET + endpoint: "s3.amazonaws.com" + region: AWS_REGION + insecure: false + signature_version2: false + put_user_metadata: + "X-Amz-Acl": "bucket-owner-full-control" + http_config: + idle_conn_timeout: 90s + response_header_timeout: 2m + insecure_skip_verify: false + trace: + enable: true + part_size: 134217728 \ No newline at end of file diff --git a/kube/services/kubecost-slave/values.yaml b/kube/services/kubecost-slave/values.yaml new file mode 100644 index 000000000..8b3786054 --- /dev/null +++ b/kube/services/kubecost-slave/values.yaml @@ -0,0 +1,50 @@ +## Full values listed here, https://github.com/kubecost/cost-analyzer-helm-chart/blob/master/cost-analyzer/values.yaml + +kubecostToken: KUBECOST_TOKEN + +serviceAccount: + create: true # Set this to false if you're bringing your own service account. + annotations: + KUBECOST_SA + +kubecostProductConfigs: + clusterName: slave-cluster + athenaBucketName: ATHENA_BUCKET + athenaRegion: AWS_REGION + athenaDatabase: ATHENA_DATABASE + athenaTable: ATHENA_TABLE + athenaProjectID: AWS_ACCOUNT_ID + #serviceKeySecretName: aws-service-key , might work with SA attached instead + projectID: AWS_ACCOUNT_ID + +kubecostModel: + warmCache: false + warmSavingsCache: false + etl: false + +global: + grafana: + enabled: false + proxy: false + alertmanager: + enabled: false + +prometheus: + serviceAccounts: + server: + create: false + name: THANOS_SA + server: + global: + external_labels: + # Slave cluster name + cluster_id: "slave-cluster" + +networkCosts: + enabled: true + +thanos: + store: + serviceAccount: THANOS_SA + compact: + serviceAccount: THANOS_SA \ No newline at end of file diff --git a/kube/services/kubecost-standalone/kubecost-alb.yaml b/kube/services/kubecost-standalone/kubecost-alb.yaml new file mode 100644 index 000000000..9a0fc4ef7 --- /dev/null +++ b/kube/services/kubecost-standalone/kubecost-alb.yaml @@ -0,0 +1,19 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kubecost-alb-ingress + annotations: + kubernetes.io/ingress.class: alb + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/scheme: internal +spec: + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: kubecost-cost-analyzer + port: + number: 9090 \ No newline at end of file diff --git a/kube/services/kubecost-standalone/object-store.yaml b/kube/services/kubecost-standalone/object-store.yaml new file mode 100644 index 000000000..bcfadc752 --- /dev/null +++ b/kube/services/kubecost-standalone/object-store.yaml @@ -0,0 +1,16 @@ +type: S3 +config: + bucket: KUBECOST_S3_BUCKET + endpoint: "s3.amazonaws.com" + region: AWS_REGION + insecure: false + signature_version2: false + put_user_metadata: + "X-Amz-Acl": "bucket-owner-full-control" + http_config: + idle_conn_timeout: 90s + response_header_timeout: 2m + insecure_skip_verify: false + trace: + enable: true + part_size: 134217728 \ No newline at end of file diff --git a/kube/services/kubecost-standalone/values.yaml b/kube/services/kubecost-standalone/values.yaml new file mode 100644 index 000000000..cd9117f85 --- /dev/null +++ b/kube/services/kubecost-standalone/values.yaml @@ -0,0 +1,35 @@ +## Full values listed here, https://github.com/kubecost/cost-analyzer-helm-chart/blob/master/cost-analyzer/values.yaml + +kubecostToken: KUBECOST_TOKEN + +serviceAccount: + create: true # Set this to false if you're bringing your own service account. + annotations: + KUBECOST_SA + +kubecostProductConfigs: + athenaBucketName: ATHENA_BUCKET + athenaRegion: AWS_REGION + athenaDatabase: ATHENA_DATABASE + athenaTable: ATHENA_TABLE + athenaProjectID: AWS_ACCOUNT_ID + clusterName: master-cluster + #serviceKeySecretName: aws-service-key , might work with SA attached instead + projectID: AWS_ACCOUNT_ID + # awsSpotDataRegion: AWS_kubecostProductConfigs_awsSpotDataRegion + # awsSpotDataBucket: AWS_kubecostProductConfigs_awsSpotDataBucket + +prometheus: + serviceAccounts: + server: + create: false + name: "THANOS_SA" + +networkCosts: + enabled: true + +thanos: + store: + serviceAccount: "THANOS_SA" + compact: + serviceAccount: "THANOS_SA" \ No newline at end of file diff --git a/tf_files/aws/kubecost/AWSCURInitializer.zip b/tf_files/aws/kubecost/AWSCURInitializer.zip new file mode 100644 index 0000000000000000000000000000000000000000..2a785d03cb8b6f20fc2adf2b4b88f200bf89be9c GIT binary patch literal 741 zcmWIWW@Zs#U|`^2cxSsLq&xXQsV)-(12ZcFgCK(pgJXEGb7+ufUS>&VVoqjNYEfti zCj;|?(xRkCKwMhE&A`a=m63q~Y{b-H-~8JKJbS-~7pR|Zel1&6JkfcY+JOy8(vR}2 zMJzSU=4J${_)ogcvhsg*=FM}*Z;8xctnYkM{X}_YW~yvY-Esq#=6dbK39Nrk#@()w zZ7~m@xm|jTYVCuS!GAQhuaxFz))Yo(Jsp6{i^|xm#H@hTZCPO4;)>bFS9o`ly@* zJ+Y}%-2Wwhaf?W4Pwo=JfNoeqT&ms0w(d*ubN)JaEVD)I20Q(fVB zZtCWBg&&=s@4oT!isZs+Iz0`Gws<~hnDs;b%0BtGt&`FgUhb=WcJEsC*7v_|I8FLr zy7S8Mz286HEW7lR@BVwQ7XHJEEZa3MUaTqIy0=sK{>QUQ&oBE;G(M;^(dNr8A1S`s z2bfvPCe*~6T|R>uJ}tSGJu!Bq(*oVEOL6rSq#_zWi67 zdt>&a){W5%t!{UE?RxyJq&O+?>H2R!5`63Xj!!oEs_t82&H8tiaj0iy+UmnIicW;R zDe2n!)BoDb7pLz&*(QB+-ejGqB%QC5=Fa`TQ-*)G;@bQ;h5&CyCOKwYDNh2JA{iKf k35#J#BZ!419kN2wAzB&?@MdKLX1M{!OqNLwITw1}+z{v6ys0D1w+)4ia%?<)b-`8~QY~)<`eU@BS_p)V2SysvQ zWQIv-Ow`-4`-{KSzTMnYrmdYJ;ji;%_RRa$&Jzq36TiGVEMQS5%ISW3Npzv^A^*>> zu5rAo?o$e9f75lsS777oZ_n27THIZBB6iohCzj`#M4A+C3mmVVS>^xkPNDee=@ZRm z@1B)mF0VYcR6+ZcQ|L0UMNOxP4X>D=R z`?hnIg-ny>L6N0St4a$NsWV0XSp53swR*XJ+56#c3|2`jTN?$&I-wU@-Z_bt+v48)g&Sb8o@Nb4} zL4Li7Ub#8_6aB=hR>oz-%)9m6^{~Cf@8j!#|Cajo{YLnjLi3GN+$w+b26!_v$uZ-K qYzbf@U|;}7B*T(M5DR Date: Wed, 8 Jun 2022 10:13:35 -0500 Subject: [PATCH 043/106] (PXP-9967): Create RAS cronjobs if Fence >= 6.0.0 || Fence >= 2022.07 (#1935) * chore(kube-setup-fence.sh): uncomment RAS cronjobs * chore(utils.sh): add isRepoCommitGreaterOrEqual * feat(utils.sh): add convertImageTagToGitBranch * fix(convertImageTagToGitBranch): echo result * fix(convertImageTagToGitBranch): quiet grep * chore(utils): add isServiceVersionGreaterOrEqual * chore(utils): isServiceVersionGreaterOrEqual * chore(isServiceVersionGreaterOrEqual): use var * fix(isServiceVersionGreaterOrEqual): expand args * chore(utils.sh): doule quote variables * chore(ga4gh): use isServiceVersionGreaterOrEqual * docs(utils.sh): isServiceVersionGreaterOrEqual * chore(isServiceVersionGreaterOrEqual): restore * chore(kube-setup-google.sh): uncomment DRS job --- gen3/bin/kube-setup-fence.sh | 27 +++++++++++++-------------- gen3/bin/kube-setup-google.sh | 32 +++++++++++++++----------------- 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index 64c4e14fb..192000b8f 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -77,17 +77,16 @@ gen3_log_info "The fence service has been deployed onto the k8s cluster." gen3 kube-setup-google # add cronjob for removing expired ga4gh info for required fence versions -# TODO: WILL UNCOMMENT THIS ONCE FEATURE IN FENCE IS RELEASED -# if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.04"; then -# # Setup db cleanup cronjob -# if ! g3kubectl get cronjob fence-cleanup-expired-ga4gh-info >/dev/null 2>&1; then -# echo "fence-cleanup-expired-ga4gh-info being added as a cronjob b/c fence >= 6.0.0 or 2022.04" -# gen3 job cron fence-cleanup-expired-ga4gh-info "*/5 * * * *" -# fi -# -# # Setup visa update cronjob -# if ! g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then -# echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.04" -# gen3 job cron fence-visa-update "30 * * * *" -# fi -# fi +if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then + # Setup db cleanup cronjob + if ! g3kubectl get cronjob fence-cleanup-expired-ga4gh-info >/dev/null 2>&1; then + echo "fence-cleanup-expired-ga4gh-info being added as a cronjob b/c fence >= 6.0.0 or 2022.07" + gen3 job cron fence-cleanup-expired-ga4gh-info "*/5 * * * *" + fi + + # Setup visa update cronjob + if ! g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then + echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.07" + gen3 job cron fence-visa-update "30 * * * *" + fi +fi diff --git a/gen3/bin/kube-setup-google.sh b/gen3/bin/kube-setup-google.sh index 31d487b85..d8bd54166 100644 --- a/gen3/bin/kube-setup-google.sh +++ b/gen3/bin/kube-setup-google.sh @@ -19,15 +19,14 @@ goog_launch() { local path # add cronjob for removing cached google access for fence versions - # supporting Passports to DRS( - # TODO: WILL UNCOMMENT THIS ONCE FEATURE IN FENCE IS RELEASED -# if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.02"; then -# filePath="${GEN3_HOME}/kube/services/jobs/google-delete-expired-access-cronjob.yaml" -# if [[ -f "$filePath" ]]; then -# echo "$filePath being added as a cronjob b/c fence >= 6.0.0 or 2022.02" -# cronList+=("--from-file" "$filePath") -# fi -# fi + # supporting Passports to DRS + if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then + filePath="${GEN3_HOME}/kube/services/jobs/google-delete-expired-access-cronjob.yaml" + if [[ -f "$filePath" ]]; then + echo "$filePath being added as a cronjob b/c fence >= 6.0.0 or 2022.07" + cronList+=("--from-file" "$filePath") + fi + fi for path in "${cronList[@]}"; do gen3 job run "$path" @@ -42,14 +41,13 @@ goog_stop() { # add cronjob for removing cached google access for fence versions # supporting Passports -> DRS - # TODO: WILL UNCOMMENT THIS ONCE FEATURE IN FENCE IS RELEASED -# if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.02"; then -# filePath="${GEN3_HOME}/kube/services/jobs/google-delete-expired-access-cronjob.yaml" -# if [[ -f "$filePath" ]]; then -# echo "$filePath being added as a cronjob b/c fence >= 6.0.0 or 2022.02" -# cronList+=("--from-file" "$filePath") -# fi -# fi + if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then + filePath="${GEN3_HOME}/kube/services/jobs/google-delete-expired-access-cronjob.yaml" + if [[ -f "$filePath" ]]; then + echo "$filePath being added as a cronjob b/c fence >= 6.0.0 or 2022.07" + cronList+=("--from-file" "$filePath") + fi + fi for path in "${cronList[@]}"; do if jobName="$(gen3 gitops filter "$path" | yq -r .metadata.name)" && [[ -n "$jobName" ]]; then From b48a776d26f079f2b8dd59fc30aa4262017089ee Mon Sep 17 00:00:00 2001 From: pieterlukasse Date: Wed, 8 Jun 2022 19:53:57 +0200 Subject: [PATCH 044/106] Create config-local.js including iframe "keep alive" code (#1927) * Create config-local.js move file contents out of README file clean code * describe steps for changing and redeploying Atlas config --- .secrets.baseline | 4 +-- kube/services/ohdsi-atlas/README.md | 39 +++++++++++------------ kube/services/ohdsi-atlas/config-local.js | 31 ++++++++++++++++++ 3 files changed, 51 insertions(+), 23 deletions(-) create mode 100644 kube/services/ohdsi-atlas/config-local.js diff --git a/.secrets.baseline b/.secrets.baseline index 54230ef67..3bc358cfb 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1016,13 +1016,13 @@ { "hashed_secret": "6e71f9f2b1e96de5a712f899ed26477ebc260a73", "is_verified": false, - "line_number": 105, + "line_number": 87, "type": "Secret Keyword" }, { "hashed_secret": "317b889ca9fa8789dc1b85714568b1bdf2c7baf3", "is_verified": false, - "line_number": 108, + "line_number": 90, "type": "Secret Keyword" } ], diff --git a/kube/services/ohdsi-atlas/README.md b/kube/services/ohdsi-atlas/README.md index 23a756962..23d060ab9 100644 --- a/kube/services/ohdsi-atlas/README.md +++ b/kube/services/ohdsi-atlas/README.md @@ -23,32 +23,14 @@ kubectl create configmap ohdsi-atlas-nginx-webapi --from-file=webapi.conf } ``` -### `config-local.js` file as `ohdsi-atlas-config-local` +### store `config-local.js` file as `ohdsi-atlas-config-local` configmap + +See `./config-local.js` in this folder. Add it as a configmap using: ``` kubectl create configmap ohdsi-atlas-config-local --from-file=config-local.js ``` -``` -define([], function () { - var configLocal = {}; - // WebAPI - configLocal.api = { - name: 'Gen3', - url: 'https://atlas-qa-mickey.planx-pla.net/WebAPI/' - }; - configLocal.authProviders = [{ - "name": "Fence", - "url": "user/login/openid", - "ajax": false, - "icon": "fa fa-openid" - }]; - configLocal.cohortComparisonResultsEnabled = false; - configLocal.userAuthenticationEnabled = true; - configLocal.plpResultsEnabled = false; - return configLocal; -}); -``` ### `ohdsi-webapi-config.yaml` @@ -118,3 +100,18 @@ stringData: logging_level_org_ohdsi: info logging_level_org_apache_shiro: info ``` + +## Making changes and redeploying to QA + +Example: we have some changes in `kube/services/ohdsi-atlas/config-local.js`. + +To redeploy to QA, follow these steps: +- delete old configmap `kubectl delete configmap ohdsi-atlas-config-local` +- get a copy of `config-local.js` into the current folder +- run the `kubectl create configmap ohdsi-atlas-config-local --from-file=config-local.js` on QA server +- assess results with `kubectl describe configmap ohdsi-atlas-config-local` +- and then restart Atlas with `gen3 roll ohdsi-atlas` +- watch pod status with `kubectl get pods -l app=ohdsi-atlas` + +To clear the browser cache (when making .js changes): +- go to https://atlas-qa-mickey.planx-pla.net/atlas/js/config-local.js and force-reload it to clear old code from browser cache diff --git a/kube/services/ohdsi-atlas/config-local.js b/kube/services/ohdsi-atlas/config-local.js new file mode 100644 index 000000000..73f9aa76d --- /dev/null +++ b/kube/services/ohdsi-atlas/config-local.js @@ -0,0 +1,31 @@ +define([], function () { + var configLocal = {}; + // WebAPI + configLocal.api = { + name: 'Gen3', + url: 'https://atlas-qa-mickey.planx-pla.net/WebAPI/' + }; + configLocal.authProviders = [{ + "name": "Fence", + "url": "user/login/openid", + "ajax": false, + "icon": "fa fa-openid" + }]; + configLocal.cohortComparisonResultsEnabled = false; + configLocal.userAuthenticationEnabled = true; + configLocal.plpResultsEnabled = false; + return configLocal; +}); + +var parentOfThisIframe = window.parent; +var mouseoverCount = 0; + +console.log("Adding activity event listener..."); +window.addEventListener("mouseover", function(event) { + mouseoverCount++; + if (mouseoverCount % 20 == 0 && parentOfThisIframe) { + console.log("Activity detected. Atlas running in an iframe. Posting 'I'm alive' message..."); + parentOfThisIframe.postMessage("refresh token!", "*"); + mouseoverCount = 0; + } +}); From dac3cc6f05c2c94a21a61d9f8d560b6bb1c460c8 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Wed, 8 Jun 2022 15:34:44 -0500 Subject: [PATCH 045/106] Add an IAM role to every hatchery service account in cluster (#1906) * Update hatchery SA --- gen3/bin/kube-setup-hatchery.sh | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index e4d537f2f..b3eb659b8 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -40,6 +40,24 @@ policy=$( cat < /dev/null 2>&1; then - role_name="${vpc_name}-${saName}-role" - gen3 awsrole create $role_name $saName - policyName="hatchery-role-sts" + roleName="$(gen3 api safe-name hatchery-sa)" + gen3 awsrole create $roleName $saName + policyName="$(gen3 api safe-name hatchery-policy)" policyInfo=$(gen3_aws_run aws iam create-policy --policy-name "$policyName" --policy-document "$policy" --description "Allow hathcery to assume csoc_adminvm role in other accounts, for multi-account workspaces") if [ -n "$policyInfo" ]; then policyArn="$(jq -e -r '.["Policy"].Arn' <<< "$policyInfo")" || { echo "Cannot get 'Policy.Arn' from output: $policyInfo"; return 1; } @@ -59,9 +77,9 @@ if ! g3kubectl get sa "$saName" -o json | jq -e '.metadata.annotations | ."eks.a policyArn=$(gen3_aws_run aws iam list-policies --query "Policies[?PolicyName=='$policyName'].Arn" --output text) fi - gen3_log_info "Attaching policy '${policyName}' to role '${role_name}'" - gen3 awsrole attach-policy ${policyArn} --role-name ${role_name} --force-aws-cli || exit 1 - gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${role_name} --force-aws-cli || exit 1 + gen3_log_info "Attaching policy '${policyName}' to role '${roleName}'" + gen3 awsrole attach-policy ${policyArn} --role-name ${roleName} --force-aws-cli || exit 1 + gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1 fi From 84bcf39ddbf1ac82198f99bd9db2b4d0eb980e3f Mon Sep 17 00:00:00 2001 From: Matthew Cannalte Date: Fri, 10 Jun 2022 13:23:12 -0500 Subject: [PATCH 046/106] Feat/cedar wrapper service (#1933) * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * feat(cedar-wrapper-service): * Update gen3/bin/kube-setup-cedar-wrapper.sh Co-authored-by: Mingfei Shao <2475897+mfshao@users.noreply.github.com> * delete secret before creating * fix for cedare wwrapper Co-authored-by: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Co-authored-by: Mingfei Shao --- files/squid_whitelist/web_whitelist | 1 + gen3/bin/kube-roll-all.sh | 6 ++ gen3/bin/kube-setup-cedar-wrapper.sh | 20 ++++ .../cedar-wrapper/cedar-wrapper-deploy.yaml | 91 +++++++++++++++++++ .../cedar-wrapper/cedar-wrapper-service.yaml | 19 ++++ .../cedar-wrapper-service.conf | 31 +++++++ kube/services/revproxy/nginx.conf | 3 + 7 files changed, 171 insertions(+) create mode 100644 gen3/bin/kube-setup-cedar-wrapper.sh create mode 100644 kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml create mode 100644 kube/services/cedar-wrapper/cedar-wrapper-service.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/cedar-wrapper-service.conf diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 72c0ae7c8..219f6b41e 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -133,6 +133,7 @@ repos.mia.quadranet.com repos.redrockhost.com repos.sensuapp.org repository.cloudera.com +resource.metadatacenter.org rules.emergingthreats.net rweb.quant.ku.edu sa-update.dnswl.org diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 14d8762e8..06f381a3d 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -302,6 +302,12 @@ else gen3_log_info "not deploying frontend-framework - no manifest entry for '.versions[\"frontend-framework\"]'" fi +if g3k_manifest_lookup '.versions["cedar-wrapper"]' 2> /dev/null; then + gen3 kube-setup-cedar-wrapper & +else + gen3_log_info "not deploying cedar-wrapper - no manifest entry for '.versions[\"cedar-wrapper\"]'" +fi + gen3_log_info "enable network policy" gen3 kube-setup-networkpolicy "enable" || true & diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh new file mode 100644 index 000000000..129e4de64 --- /dev/null +++ b/gen3/bin/kube-setup-cedar-wrapper.sh @@ -0,0 +1,20 @@ +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets + + +cedar_api_key_file="$(gen3_secrets_folder)/cedar_api_key.txt" + +if [[ ! -f "$cedar_api_key_file" ]]; then + gen3_log_err "No CEDAR api key present in ${cedar_api_key_file}" +else + if g3kubectl get secret cedar-service-api-key > /dev/null 2>&1; then + g3kubectl delete secret cedar-service-api-key + fi + g3kubectl create secret generic "cedar-service-api-key" --from-file=cedar_api_key.txt=${cedar_api_key_file} +fi + +g3kubectl apply -f "${GEN3_HOME}/kube/services/cedar-wrapper/cedar-wrapper-service.yaml" +gen3 roll cedar-wrapper + diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml new file mode 100644 index 000000000..dd56a617e --- /dev/null +++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml @@ -0,0 +1,91 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cedar-wrapper-deployment +spec: + selector: + matchLabels: + app: cedar-wrapper + revisionHistoryLimit: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 2 + maxUnavailable: 25% + template: + metadata: + labels: + app: cedar-wrapper + public: "yes" + netnolimit: "yes" + userhelper: "yes" + GEN3_DATE_LABEL + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cedar-wrapper + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + volumes: + - name: ca-volume + secret: + secretName: "service-ca" + - name: config-volume + secret: + secretName: "cedar-wrapper-config" + - name: privacy-policy + configMap: + name: "privacy-policy" + - name: cedar-service-api-key + secret: + secretName: "cedar-service-api-key" + containers: + - name: cedar-wrapper + GEN3_CEDAR-WRAPPER_IMAGE + readinessProbe: + httpGet: + path: /_status/ + port: 8000 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + livenessProbe: + httpGet: + path: /_status/ + port: 8000 + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 30 + failureThreshold: 6 + resources: + requests: + cpu: 0.6 + memory: 512Mi + limits: + cpu: 2 + memory: 4096Mi + ports: + - containerPort: 8000 + command: + - /bin/bash + - /src/start.sh + env: + - name: HOSTNAME + value: revproxy-service + volumeMounts: + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + - name: "cedar-service-api-key" + readOnly: true + mountPath: "/mnt/secrets/" + imagePullPolicy: Always diff --git a/kube/services/cedar-wrapper/cedar-wrapper-service.yaml b/kube/services/cedar-wrapper/cedar-wrapper-service.yaml new file mode 100644 index 000000000..c22585213 --- /dev/null +++ b/kube/services/cedar-wrapper/cedar-wrapper-service.yaml @@ -0,0 +1,19 @@ +kind: Service +apiVersion: v1 +metadata: + name: cedar-wrapper-service +spec: + selector: + app: cedar-wrapper + ports: + - protocol: TCP + port: 80 + targetPort: 8000 + name: http + nodePort: null + - protocol: TCP + port: 443 + targetPort: 8000 + name: https + nodePort: null + type: ClusterIP diff --git a/kube/services/revproxy/gen3.nginx.conf/cedar-wrapper-service.conf b/kube/services/revproxy/gen3.nginx.conf/cedar-wrapper-service.conf new file mode 100644 index 000000000..b58c7ae16 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/cedar-wrapper-service.conf @@ -0,0 +1,31 @@ + + location /cedar/ { + + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $authz_resource "/cedar"; + set $authz_method "access"; + set $authz_service "cedar"; + # # be careful - sub-request runs in same context as this request + auth_request_set $remoteUser $upstream_http_REMOTE_USER; + auth_request_set $saved_set_cookie $upstream_http_set_cookie; + auth_request /gen3-authz; + + proxy_set_header REMOTE_USER $remoteUser; + set $proxy_service "cedar-wrapper-service"; + set $upstream http://cedar-wrapper-service$des_domain; + rewrite ^/cedar/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/cedar/; + + + proxy_set_header Authorization "$access_token"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + client_max_body_size 0; + } diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf index f38dcddef..bb5ff652c 100644 --- a/kube/services/revproxy/nginx.conf +++ b/kube/services/revproxy/nginx.conf @@ -419,6 +419,9 @@ server { } location @errorworkspace { + if ($frontend_root_service = "gen3ff") { + return 302 https://$host/portal/no-workspace-access; + } return 302 https://$host/no-workspace-access; } } From 4835639d2158b5c0b985fb7b6978a34c5a11066b Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Mon, 13 Jun 2022 21:55:16 -0500 Subject: [PATCH 047/106] use blue 3 (#1952) --- kube/services/portal/defaults/gitops-logo.png | Bin 51731 -> 30338 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/kube/services/portal/defaults/gitops-logo.png b/kube/services/portal/defaults/gitops-logo.png index 8ea938d4ea4ec7316efe274d34d68694efb9ed44..4b3bb8f51d7455cb0beac8596c639c9b93629977 100644 GIT binary patch literal 30338 zcmY&=2RM~)`2TB{eT-Dds)$gLJ#&guB6}-@?0M`}IEavx%*fuGBcqT}RyZ<_y-)Vu z|L67n{r=bg@4C9WF1_PcB1}q{4XzJc}KAh?LiGPyC1T@YdMgK3(6fL6sJt z3)E^ZSlPh`U*$M&sc&U_9QMdRF2G;eHm^FG-8JZq<72r3Ee=b5?5l7(ha>Vgl{yuS zewrKsSF?zG*SBuDCLVvm(4-#TGn3`nQA;g?>Y$m`z2Xyy1#*NEP)DO8B$ecA zU*yI#-B+Ov0i+0ezV7^jwIf-PZ`AIjb>2#?eL{;Y9|D0;!!SGHH+@Fm$sB}<-Jy%3 zXZI9lAcFo;$>krSE#Jw|nQ-}|825<$uZy|onm=gYl=hLIT>Dqlo1V4hw%Z{&wwcI5 z{f%C_?K~k1iOH>Jr)0~_kday&1?QNv>i%or*wc{U-~?GEM>6S@J4oj4W)<_qxS;(~LN16UVtHgL^_ko$dD~^Q z%rwWA%jE6jQ-)Ba6#f<`_~BEq#!}I*`|>pAVEK`;bov^%Wyd$|bXhOem+-MQPSzxJ zQ9qUKp=#;&pK^hWNpH1b+R0XLtWj!Qah;GZwRwm$E_jUwKhK^wFI#(7_*|8J(~)s* zhwu$LX4){o;IDz^H}9tnQB9jy(CFAF`4N4wT#VdV2HoR%t?21ShJ{HrSo@Nvy zYeZnBgU5*0_|P)KmPM804pG-Rmy#XaJ&Q!Drp4CH>>*VpDTc7q<;nY~BOE5;&tUxI zuN_oU;%nCJ1HP-&+9Ea-vq#&#K~**-cFIgacb^2&<`WmExKbcUe$v~n7nlnoRgN7j z{N6Wha(i2E%o_jO8eJv$bhB|1EB&3zp5oglo)ulp*A;R6WzIDA%Xf1*1yO?Y8e*Oz z{zayTPq=VNM}In1r3!iuqy)!k!*Mlzr>gV>Blb$XK3);@1CCHVp-D>8(H)Yl`v?z) zsnG_T-sSF0F(F%qPOF=$PqhdZEO7Ub0h@=u>jhawVTlJd+)U=jYb+5DlC}ly_C8lT zMf3h7Jf|y~5qg1Or7IML-K41hjXLnq-fp+Hm+eG&Gd4Ox)C4`ujYgH`W@hxM`otUL z{ucLVsuUtG6-$iRONp6x#3ojW&Ksx75jPUxj@D>M_IeNZ6YQr#7`n`@{NV$HuCZ!8IDbA8CQ;Z8WCj5R4qAp)~ zXqrHRp$)6~)~hooE-7^^H@Ke5A#q907=MMl+z+}_<@0`b`U4++IN$VHfz6+C`up1) zHhxq02)7s=N7q^0&Or~J=JU7`?-Dtg5em|WypTV9nUoySAS#RBTL`bISs{CRUeyNJ zM?QB8mHn@dK} zzD(WBE5=KMss*tvn_@hj79YFG z^V4*GFge;U`C0Vm;DefX`XQ>jA3i*lmYWcP^vrPg7L@~PcUP;HHy$^%PyBt@SD%qf z^W*fiu=##n*+>5PQ*?KcvX#kWk6CkQuIV((>&S32!cs@-bX=yvPETy{XN6*|qZEy0 zGVs@o^SzRKFXL2>SB+opt$mxmZX!gyC*UsaoLjKdoM)sadwL#EyY$(So-f#)ldty} ze>YS{k^O0T&Dh@$H2t=fb`OJZ&wk*7j;K#;No5s8I94KbuH<>$7B6}#FYNiJzes#Z zD(tFHCubujvHEa8=U-((yZbRaU#@MgcIqhRHTut^5T?QOLB&VLM0fV&*UV?tX?XaG z3}|;pKay0RV@c$H>(IZC`?S%a_^Tt!Dg2Uy`TI<&4dMo3GK-DSdkt|Tp*mtLUDc?= zH%)zB=hvLC+B%Rz%QV`)yqv!FY&TNtT~%7-BPD1!LygOLZsm7Ij3?y}(&VNM4=D1` zR<5w8mA{{^zOyXr7wy>OEXLBwpP-|A!{Mad)UvC@j?-IxBA-<<=Z=Vk)G{sp;=#8Y z>X|cg+wyYMm3Ch!xtu&}CHJo5DH|$klQc2M2#t1;7h7*~Ow}EzN#raH3dxG?OS=&h ziKXZJ>SO*n6W+(I`hQ$TsC9eWTW$^21yC|4w?57ltL?L~Lr-iLY<_Sjf z7k%@cAz`}esIP$Ap3D4ZMd&?%d3^`_!JM8~QBQHt{2z(-KPn~j+%9^>IkTyqWSN4S zQp*@3NDjFE*UE2+P)Q`>cX66_z@@`4V&{*!9CZTp4DJ?UjIyyeJvqvo2Ng)h7QjMFyy%GrI-@v|$9YZ4R^ zzGQ03t@-o8Q_P)3d$n{!Yv;`@g1@a`Lq_Z1XdST#-!+ZyC)@F9dY#M8b72qCsfjJJ z4h$5pnkU#Pe^C*v9ZOR|wy!-QxM~|R51jFx@#%a-9-|JFTI5Qz{g(HqrbiCVkPXyZ~EN{pl$qWj>G0{~p-LqH2LiGY$+i_vhN8d*3#E$t91h zumfHon(CnP>%>Y_Djd!@`CG)D(4%~>HW~~Y((iVq_i)qX+ExJe?$FG?_792#V#Ao% zrEgxCVNS1%&owttrlV{f4-;!xN-*RD{eB9l}BOf1^T5y_{B?Ds#wj@$f(Ti5pLFA!OK_tTtXy2 ztunOBkw0un8bSG1a<%FtCEfGVC!}gx` z;!C8y?$jYoE1x#3L~}fqqXRA2p0SIm8XRKhjPg1P^A%ENS6AQGX*McA+bd_i(A) zL|tX^Ti$;s)=MYDuanaTy~m2WGpHS&UI^9hTo&5De-}#Wdzh7wPl4t+&Ph)ol$84T zL7Q@W823^pkhk{y8j0iD-%lIlG9h9&-iWfL#nScKVyPD2jaa0usD5bB6$N&o2t6E> zqwY5C;H&448i1%P?7qmIBXH{b6#7ASwlatZ*ItVyxfB3y5ulG|Dqu$QEPE{1)D)50 zxeI{UKi~Em+9=THF(Dpbm35!ynp|U;wx$lv&O)xoxGKDy*on{1>mSIeF5U;?xMLzQ#(yDHxVd)7bMwVdj zqbnIArM#E zkp0@vb}S>vTK?Q?zUvS!T{xh#0w^gdr2c6(bt`g}TS zb?X;rJl^=0sK|KnrK2yz<1Y$*BmJJI`i&zHB{hK3G=H6rA%UbEMQRE?{V~I#z9W7r z2j~Tv$hP|2y+Fc4ikVw-G(@*9`u{yPjbFrPZ~nSH%XnJr?Z~>C{g6R8%wqFf07}=w zte9(O%&$ot^0i~V$6!`4(9+F_6~+;K7sg-Ud_*(#uqC%Ght02#fA&9X&hNCJYF?n~ z8iDfZ<@2%*zD>6|i?gnTm;ANWKbEoV&kp;Rcp`Rzllw;>eRDJ+PZ+`0`e8n_MAcRV z48QlY^uDWgfhl^ybk#FiRNz8*xqgV*ogj?}mPB(~d=Qb__G@TlKa5CPopGGhA=UQFDrwk~srlq$mTFN}LJ9(pe z`UA})_N){kJ(jw2VMSb>_EMRRYDTrS-XK$Zn-2Kw+Lo5aM_&hwLEg`&-J9=o1l72oC1;{)!ocTb8)%$VC3OA40VY&0@I*x#&NN- zv}wKB^kMT-&;;7-n{Q0Pj>eK%kjirh4o+8uTumA>3~{I^s865%6c zqE~mafk&+CIfNSTIWsLl9V=GYABkQL7<{IQDbL8Aafv`(p`56NR>;hx{olRaMDv_y z(BXUSl#AK0eqZY0yVK2VA%BbaQc)7}^x1&5mXuh7k!GK**yB*+pLT~#{+mBzpVrd% zcmN)ffKfTDy%Lw~JEocOPT-q)xGNPxEynTiacnBKt^JhLVLXxz@<&t^EmoM9t$YX> zE&rAn(I%KwDEcgQR|mCc;>@!WS5%i>oX*$qcX!g`Uvrp!u2RK#Q35hID#fLGtb&g% zd*to4c)uskX>o$dBMcD_6!ZNS>%zZya{2pWpBTD$`zjtjh1On5AY<3vY*N@JD!P#; zx;`yy1%G`N25?494`bWI()bU_w@C5S#%Lpq$g{1g!V@Rs@k)@+J{;z@bsziU%n%di zat*`9?kgGKjnmF27!JBnci_?|e_gl5NdC;NUfbK?p@O4^KSg+RZhlQCzOo@Sbmx6BJ-DvX)E6TMGA%Ck!(a4|IJ%p?>!b@?+Tm^$0wIkKeV8cRo(3l zZrSV#?%0Q9%2yW25%XQWlQ|)}C|bm|gnM}&%R)di$Vhct7KYLbBs(~Ec7}B(fw5> zHv-KAfjq~_X$+U(kVCqw;?&p9^>1r^+9^l!DA_+BCu?p>9V?O8ZBAFj*wi)N(5rVJ zyMvE<+)tZu`0Vhy^rfB@y{dol$)kQfKdtlm(CCW!>&w69nh;+Nn4 z>o!kDD$9wI3oyPgijJAzy&P)O+OSisyEJn{i&1UX zSKrwXZZU%@y`V~<{pD%ZLS@73PuzO~VPMM7V_OBzyws(I?3|ByNs7SXqzUk0YOj$M zZbHDP`OmA@tHM3(B@iRU|+7~S^PUr1k09#J5B9I$ZSmv~QEBuC=3*5x728GCKZ1Lh=~Q$#}kXt;dcY!q0eM$B# z!@0z)TVh7^3luB|Uz8}-+6^7pp7XWGmC`E=o?3A=WM55t`xB0pkeIdda}D#c-rAIM!pN)u_HCM<%Hk8Li|&& zvm}rGl6Z~g38h|i5cO@92t=a;vwCblpZ`##kJ_7MrM))JeEJj(WDl?-m8g>?Pd7X4lmBfp=(?nY12~vy35uvo~r8eu|4htl+4*|5FR;Ay0b!gfWh_s0PU|9Ci zl?0(i2LKg`WMjVU<7V$_cHW-_ABVj0Ef1?nW1RAKaAjl7+s*wTfb(*8L?U6ytn$(; z@T~bbf%x6PEKeDz!fyYKPlmRB&)9Q$ z)n@^cTi-EFWT$k3F3Hy&ZwzKkdTf2=+a+1Uncd%Ne7PL!c*fE~qGX+>?YjuqKDoAX zQ^ttdCYF6HPq9$DRIJRL^|;@DMM9M!UTR%S*d?_s)S?J)e;@q44~6 zEYDnd*X5mBvy`&dNe@7gr3eC@3(BS@fq+G$V-a=wq6CAQXh+c|;!|imoz;#00vF)P zVGvRTRI;p#oinZfps}-os4)~A;xiL|(jYsf7F{ovWak6Pd4j*PB>aY{MaqI9_8FZf zzt|EFc^Dqnwl&luXUMQrNM!ppviU70*yg25^0s|dj2qH;YPN4hj!8Y|K{oXhQ;_4Ah z6HRWlna|g)3XPuD~*g-Y8cwJga^lQ)jPY%FX1{1?-fuam#R zil=aN3ZXEFp zaIZSKubi7rAxlM(>$|C}O=XIbnl*cE$suEHc&j7fga0jb z72_%ET71sXTf;FbbZHB_G&O&-Yizqg>OQ-O?bIS;p4NSd0?D=qBZm?+jm8C%&{6l_ z_76c=1URY|5V2~R2)TQaUV{GLVH`ffWMF0l@}^M|Qhr_=TTP5Sk6eFSoSz9bS6;eO zw!$t6XSAGG%@`s`!#vQH2%%OAhia@Y`OBg@@<1Wr%4A+>+Fz)_| zgD!^aq`%Nf4YvBv>z}K)aZ%B99oM;%4TU2)uh703-*Q?TqR))e(FRAHwDdcg$so!; z2?VG#%6qH5KGHoLn$fGessd`Au$1D$y-`@zomHf`)yz5f-aww+j^Y0Io?^3;Cg{Wr z$Kmk1p#+JHB_!Lj$9G!;@BO1wn9u+Ch-qkYRCi(&kiUQqfqr(c0?1rv@QTJ10ZOw=e5V}%#cE!2w=SdjU0TtolhL)C zQ9?KG$+WB2v|s`PvyOf0*czFCUJ8jwaNqC!2|_66Ry8id88++7V5|u1wVn}RRt`@` z{BML`RoC9Tr&QhM=7aw-derk@xb4l)RgvBlvP*^ZYDx>AK${l6dDCx8?SN^c<6)S| zc$YpW8#7OM8}ibvy84I%Rq^|#PY-7Lz06%HNuS{c>S>A&lD$hQ_{X(V)t)r|)#k4~ z_l$pShN7RLVHXqrW53xr!XVa;;d&m%lvM4N;ri3U=aswG(}IvdcPWA0%c83O(noSQ z*)Q(>!uMwK(iNXB+71IJl{kBSz>nby3wg&-z#J-Xm_2j;y2gc%>OR=I+Z~zw>iv32 zt9J9;NmF$|po+sgM|qipxwy+9Xfrw{L|Uhv!9ft(D3vd=%lT2u%{@158LYb@$T)bfS%?3_|S{?A#2h z3b;)Cmy1@oXdZ%-OHQ+#_(7phcKINZq7&ing*>PzN4iHu2yl)%g9le*sXt84`DlgH-tqPPC|{9 zJ*nxCj*RF05TNI$w(c?+96?eqJnFNZHXkF0g5k4=cs}7Yasuzb2%beJD=OmaJu2~) zj|HObu3#l>-m9sLjT5#zv?F0mwDgccM3ca{Z^Jl{FHR7~Tp)SG9<^H8#j=jVM-~pt zNJ&aGm)~29UEGh2(`ue;AYZ_1&4<50qrV=PZJ~J#|H`DT(vw!zCNb$L(jv^$IjUsF z6(^ga8U3ogiLSZasaY%nS8^QuJb97yiL>PBgQm?JNug4vD3Kk31hHfGvdvJQJf5wO zzoS??9Ta;Za^tP^R$p#h^nYgNC6YrsFS_dGQRDd=ctGM?-=&t_{GUl zi2um4)aWzK19kk{WGAP4$%}Kp-|soc&8vHQxlHow6Yu2>?33Fihw@Yemit^FfXViw zJ3h73FQIENlu37Q{y9h}RACY8zr)V3xU)8@|sSkNn`C^4fliG({L?vB6gf6QE9 zeA0efDg|At0SuPwL;J~R-cwVuK0S@)@O4Oy99pAXz`I$fXR3C) zY`#6Nk8MqurB&-VOJVWBokr?6dCNeorY27W&M8gG<|`acL9cczZCfO@EY#;5HYCR? zq-0y2#iZz?&%>^M=U2}{nU|Nt72>F!RU3_#qirGO%lVQ=Gmu2jKbL5#mlox*a)eQ8Xv61Vt&|Dvj^)7-Ns#f z_EM*U_s{tWpS;aF7H%_F5ri7pqrVZ5=Rx0!VL-%)&?F5;Rp?w%{fj*DgPOEw*13Hj6)nho>HLea2AANg?)$d<0Gy}rk*4+KCnTr?%M$!=U<^=py5 zvK!Oo`mBjrpamS-rqTGH_hvIcc#!fN)sjaRwX@rcm^jtT3-f!4i@r&nTB8*bGVz&8 zD)za??|MWZ^}8k^L_GQ59gnE3u7)#4X=p>folG-8WLFxoK&%aeU=`rWwmqoK_8kWqGb!1fJ_Q!4@)cs*2~75a8qAuTO52mk zf7>b1?)C!@0GvLs(#P#^2h$Am*S0W=BFT%5gm`M%bGNLIe^2HWNXB%QP_u6YS)iJV zy+01F(5u}dwmQtu9SjC)JCKMt$aY`*a$0l1|s6;~D9eW9B zR3S0Es9M%)2A4I!B8P5ooZRPaIn$E%8kY?z7S7=#o7*KXuYO`+mX+ZfQ~Ny`f9L-D z-WoOn<1tGY5k&7i(Cd*5HchXXpK30A0O6+^w0jK8M3vMJ8Nn+MPSaiXkW1!~Nz3~Z z9XIc+2=pI?p33fJJ2YzSI_L0nC6{gAD_^fsFg77!MUtp8!e`a{#j6Vm6_KkF5pd}jwnE7e1QVnij z&wWK6?82FrOz~$&7g;z5u(VVydE}6r7j*o%o(DEOR9U!(r&IC2sY{x{VX0iDTOSt1yOUg5 z+jM1gSCrl#2dOaO*xQgZ&5V3_rzZd3=em4Jcsd3iY2o~2jhiG3(yGv6Dk@ni{t2G& zr{jaGEni%J64}5xNN`;{b}lN91m>weQ$_ay0|JO9=~D^g%ca%q{dY4pa+Gqzeod+Y z-0!63wn60==-(t{ep7P*6tQ-+iXlY9eXFs@%z+fn)29Nn`W2x(h9tmc!}34~r#&9D z?`vec;Sd89tr^zV^H0Je*x0HB&JzMxT94z;)d%es+_~+wie>xRIBCPYNdg_dz_0@G z8b!gP6x7q37pB=RB4aDh&>$&I+`60CAqU_G!<0{v$Ad{`EQdUzl$!m~hhVb#0>WZm z7V6rOM4}LMdMlx4arTzBr6dD0i@RT_qz7KBfBc}(RS*%K0_VEPgoI5CxRL!Ua(UB` zib~Ya1k7~IWO4LnzP=~mOK?Wq8f^=Nl;6A3kCg+BXi5{fx+H-I@KoWxa2fYAtqd8ohP;kWKc*w2UgKGm)8aelG_XQ)}@VyM}x zjAfMviva>$I2@=mMfCQbp8l3czEGj7^3+wVp5eD13}^O#Pl|4|P4TKQdA=)u2M4;r!Hlr|fGlhHK%!*Kxq<#nY~>pnF$M>^8Iy(x7{O zM%j!A9_`Y2wq^>DY2V6>2*8rU5-(~*5p{3v8jQ94DhraA11wl$sQ-IZ4!dp2{dFSn zKps{GtZ*=i6RhHCuyl zmH(b|8(tD7b73WpC8+kCoGb|6^75A5+|o!TQIIsy=u#bkU4LqAb3tb7RjFfaI4>9K z8cZYew)W0qP+Q3|mSZs(WC?^?9Ej;lJ&ORW)Or}Oh5N>gbw4moOkv8&b%lF^*h`D( z|1JX^6VSeVG%rj=n0N@Gf!xJ#5FWP_LzXZ3hY?;hC;8}(2k2bJJ3Qe)WQd3USX^6! z{Eq=qzV3caRDcmY>Mi^z1c}{Y;>Zo~!sr^ps&xq{OBc15svsqub^BTAki^v_M|a>< zoDji?3P;_;ZjfGmft#eM$*jSYw_`YQ|8q&ehz!jPSXVw}giLJ}*}Oa?j^z)1 zKVGiI*j-WBMto+81=E0G86xe-x{0tlQ2X;7irI}A!s^rH>ndEeL8`xAh#81l;4fPx za<(ZXUS1w4Yx2g-}g8Vk*QegvggpTt#l2Y7jDPXWg3YWOw+Z_em= zco!c@-^&K}Av{y1NIEGOb~$Rjt5-nPv>f<28b4HftMN%t}K7x5S65$ z{_nNou<jy1;W?6`a9iGhFFURnM21o=??R_&rbBv8Jx z7PQa;)H9>Xt~TV^PA#(l&D3gffxYX}SqcsVRM*?d_}A}0_pylhuru>7GXP!|y9mex zWsBy=bGLdh*PaJzd(OIP=~+(%){4l_$x`x8$qvu>ty_=m&<`s5P~x&8%|HPO@Vu25T26r z(bJ%^1Yf947d|3LI?M4vyFQjfgvc*kO3?%5~J@T_o zihgotTD6aWhTM$egE4(YOytT{mNuLk_j8?;m{H2m`gp zl8mN%q!=@S55ItTcR_Z;wjuudqX>BB@-2Ugh~k%Boht7ahb$EW9BEro^E)S@_uf^m ztbNZ^5u;D8%`R?|xH1sFSR`wO>>5pl1BG@l>HoANk=3e zRURMM8t%=_9bF=1vBQ;`9vbZC-Iw+4#sr}Q3yLcj38D9(mPzIOC?#69r-_fWX|A%R zN2qbKKnet-Qx&yE5ny6U=1c_%i{Z8$l$m!I{{W^X7;F@*6Cw$3bpy2Hb$W{bJ=KC` z9Xj)m{fr{QXgAT5*3ylzFWB3$QUKg_Ir`jz8J9cMHrd*p*=Y#>{=&473GQ|+ezIeY z?t~2)Jx)#Ewsd>8@49Y?suK+1i)rI?1XYBe3C4Rqh;%#tI8&do*M`nO;F^L=Dh-$@ zYD43HEx=VqS}Nb?vYp4-1~0)GwS2%^P=~YRgAF(@gPB6w z6SADF28bPm`!f@BYuEK8EbCeNh~XPY=ynvCyqp4_6sVNu04F^YHh_;uzd}R0wEwyP zCW!~E4Jd9RVl%V9izC@jVM-vcF(CrP;ky8|VYv&)zk?EQD$iUvM|FLKL%|6@f~(h{`;z$xG8I{(_>k;jA38qVRVv|ey9JP5=4lz7dpiPIkeQA zkbDJNzXDAF73@(!MVIdvAgmn-!|@+WN6t?Z(qwf?LgixwiVD@)op>IK^XxUO;=2S+ zXh0mt2CG#p>tWA%BNGApcE%gX*wUHIys=DVu`IB{=YcE;`O>G8|9 zJMYgRaa#{k#fINX*1$5SCg}wqI%R%_S)jD`$yC*wMv)i7YV(CGyQ>M5h$h!!Pgw1Iqr+Izdrs2+jAL%}WiMP0 zu(>ySs9acC5Z-dZrqqtJ1G%&?Uck&#bK#~=4?L;H5;SItBTO|8!5b2!4okd2AE~>b zlp}xmKp=a@Avomx|>tw_r&odIe?|W_Cxgrv`G}^rC)Z5K}HKbW}Ghs|Kky zBFedBWiq|>(&d=yQyM%ba}QTWUg8vEZx9~<8TQ(^?{TGox3lvJB>{Dno!UZq@B6~U zsZv5To2>&s8>r|Hgk%7U+XjVSZ+6mp3osG#@fy}|9 zz%bh59t_1hE57mq^G%=Aj!2!nQpNp^Obo zy*jk@`cEPM*9`(JeK>ZeCmIJRcS2U;K_ znO|L!b{xKir%iL2y|GeZm)d_G5Pa#YAAl0x?@SleH#tcF4VC^D(;;Rg^zbel!LtFN zUtxi>3{t#7cdFqCx+q$*;oh8-MF2ASy;@g)u7l>k?b?ZEtoLHwhy`ao?4`_-I-h~f z%N<}c5q_E$ai3|4&ZxxeXM7GL7EG{(sTe!0F5QgM!$+D#DQKw9{@l(0!fH35Pf5=% z+fK1WbFw5ySk=~EjN$^#Fnwyob{tyix{T(!2K?s6WlP06A460dTgcSl5;C z{eGXy!Z|s=Ka*1_Bo)?vQZ_1^>)8A8kxgY^ZolX4(wbn#vi8$EEiIf^Z`ISL%?KEu zajAE#a3vl(%dL)o{@o=b{A7tkIC!1kqU#Pn{Zd?+Qye zik1j0Yb+G4|6|g!(3MopUmClzL|ku}0E2L{lhnKu#6nzKsEYLaVvjmWFRolaNK4Q^ z&?hse0}qh6N0v<0<_MPJOvo}eXTxHFlV?DZkJc@A23Qn&r`nadQVao2keMjv)71@}ZvE@-QY%od0z}!k zM)Q2&Q-vyq`}#W-)Q=Q7@cuUC{K5-7inkjvuf2OM^cSKCps@<-hYEfH6Kq&SKM}93 zza8P$T}0NvNX+iv4e(J31l^3cW>J){G;S!M7#~fPRq$^r&<6t{_U~Gj#!Bos(vmhc z-Z8CBUYEgBegClJX7o&>b|-R2^{}(s*sH?}tF{=}R2C35e`PUL&m!vSS7-d##M-DW zr{`elfrN@4QrGPNFU=0hL(C-n@n9O#a-R6bNkkTXhvpjFv+c2-p*AVT-wm#^I!!vCHH6E5-@6N5TA&XgKj3|J9qBaN~- zV*IDnsWlrpmdSMeJLlM>l#u<>fI}ikuvWY(*78sKzN!1}{WikhmT#II@^@d?1|2Bb zC5$&W0YXib*zRyNS`-JABbld=K%&hXq-sfp^j7PavV@L&)4(^tGPVip77!@W+t`xU z#a2%qAp4&HR<9rk_#%%xgZmJM;}`2co;aofVSd4|N|qO_^nm!^m2_XU=(SmPLc}m zUVoGz?G&9}I{s3exyXs9r2wEHg;|T^Cp56f$%jr;HKEa-0l)9E3r}44gU- zctPR`?MVv?aZbXjet(Gv^bK)C$#9O#es*0Yi7g!wj}&J9K@vRaMGztn#!!bT+(g6k^W7?7THtQ z&`ycC^x&KhXBuVs{|5t*fu4xfZ~>S+9WSNk;FRN93rqV_j7jGQt6rXreqQfHB7s*ym0VcKw8!g>eW}E@F^e!{`9vE3N+c(Fvbp?L` zs=L4a2pW~6CqaT%UOMP4-ocz660vE>$Ad2n0w#1?KimZ$r9~JSOWP#5s&MXA+NKe& z2R{e0Jb8ab_efRuYXH&<4MxeH5A?&e@2C)K6IfQQd=VHLN36D|4Of`vE6{zPjOUS0 z`u6-HBDx4>8TRg1!1)rSBiuWMlKG6FNbWuvub^JM0eJ0s@)`ly z4`j|XLjgX%6i`CAaU=%IR5LbH#uK^p%l@wL{y=~f!C zBZ!Mw7J1C(4$!`Aslw+^M?j(EW(&(pP%eQ6(|vu0fhg!95yti1Gv11 z{B!3*C5SlwlJ)pN|0v41tIe1gH5j|>Mr&PPS_KhWmAnN7LoE9w01%!Vgl_Z8-inEA z&|U!0#ay4jJyhta>+bXq8tIvn<(4Lzt;C%smR4zv0m;YgAvYXu`cFe!8`2{BIR-N2 zZUuarEgv(QAXsikqY3#y_-h*IX~}FWf_OHs{JXdA2%Y|2pFZ{3SPf$HZ1dU9Xo}QW zSJ7ypl~aad2^XxRJ|l=dAAbM$v-<0I($7soYJsuua>2~iWtcWAxlv*Tm-p6bi%nBc zHs!g0-d91iwL>aI;|Dlt-j6f|RvTY2W_nR%mSeZx@;tA>5+IS6_?1v&V zrf`D>pBqwSU%;C*{!H`EI@C;e@VeCJ{n|px}~-x~EqE%zra;aaOW*Pn%;2!0f` zVK)Ni@0fr_;I9lbkU~B9bIA&f`*UT@#UZ;<@r#YK6GAzthm5^x!#|ZlClJV1YJq!+ zK=_`4HK{w_huexpK?`N&XfM7$DuLjeEsjx`ye8ejZJHP#*$p3EDhrJ0H^I0+!aT-R z^S0#ufLX?;h$f*^6;)&=mogc7}$sRz~FRt!(G@7 z6Yj$2uf>>>2F~4m5W^kB;%KJkSibM@zKNguTF1>Z$J2$|7acFjEkXVM<6AX~k{D%w znv;KvedXuJ$ZDS_MH+0Njq~3|qa#2bG90WO1{hu1z0|fA0lt*X%%1lqqI+x4li3)x z!+%G1AjAnigcN`SX51jM30*e-6~0 z*cRY>oCigq(WnSpWNn5LIxSs!D56$p4UoA#!<)!ZR& zB*({H#jjR7`R#oP8P#9`y$BxpA25hL22$t0=Asgy3ctb>x+_>~wqGR3gdz4}n)=DX zAEvx#jM&@Q3e6CUTt6v`T*s8-I|OIOFxr} zAongzi((zCw(rbtxRZP4?d4BcuF%8`5hJz)pqmWC2si|jY_i0<++9%Yq%Dsbo;uQ>?N|Q zg;d?R?sqQcKyC}CR&Set=7}P#^r>Ide=m5N28-`2`H__43oV=Vg^han3&uQ={~A0d zG#ZS$2zurQ!(9`sMb}h=)oQ!1O^#H837Z=xBlDx~qjj*OTRz1qs(2btdt`Ab6b5Lu z{ElHBT$ygQ9-{6JGXnkW%*lB=&<|BfoxU8z`M@W9fdiOmzBozgtuAYw)IEG~#D%osq)uF9OmcO3sF z&P~&MP?7O6ACWJ~NH?z7bTE8Twe;)T+^+=9-bR>Aq{8B~o>jc=H%iU{FsAL)AKtCB zKmy$!ks=c7U$g0;brkfip4}e>E=OVvdc|k@ItJVzrFeg(WJT!>AwAuYjHJWAu}Z3m zTTNv*)X~fHYLT;(NMye`T(w-Ad+p0CK<1Lf^{nxQR|AO4`O7P@8CP}Ahd;6|s2HZ33L_VLa(+T<2>inUhJc!pZuD5M zME1Ut65)zh#tL5;yIH(f_bLtgKFQ0N-@j{6bI0CN!(>KBdS zoSrq{oeQFRgTF4Z?vs;24DKS*@(+jBaTU_PwP|i764&pq{f6QF2RC>n68U97tlsPb z^3)vHdxNuKx$T!|krcjfG3VhOPAD&gK_(+(+LL$YB0lnoHW>9+`|Gd`pbgKU8staq z-(8?D@hHHQ;`$)dwZntXrZQ^@DTQs~zD;!=t9Ns3kVApwn~y-L52DiseY|JsR!5De zVX4EIbRl9-yV4?8h`pPBWK#E*dt20|+b!GF9!GX=T~TQC)>|~s@9G0Ub1zFzGZ&Gq z#kwkn+<5RoV_^HyTn07N{6aLJV5@UW%T}8P6>_x7oSgDQNtm`tNE>9MAK~z}a!RgB z@CfA26Rd$XT4sr8hj4G+%Q|Kv*Nv-BZf5ApiJQ-I5Vu9P+=d9(4StINBE-2wR|uja z<3Uq%j0A+zL%Ad#-HF%sxh+NEVs-F>PW^lF_t$xxaa0Ufcw_o&72+G`^AU(3=Ci%E z(w~w;pdypV{#yKMsEV#`!LO-|E3>H#8vpCjefOJ7LQGoEb1cq(^&fRU@O5k)+4W zJZmhyUsoQsF{~TdU3`FyCHc?1-4DnLn0j-ttgJ3N4L7J1?N&*;;u^rz&7YC?szq3( zH+kLVPq-uNT@<18e96kM27oMekQ=~}m~_OtRRk2Hm4$QTyH%3mzEJndDSp!bo=jxH$$mkX)RFVBO}o~Dnr=$dKoFcm8bS9 zFLK3}w}b=^GF{0?-sXMO;kJ2_2nsCNPehocw(N8%TUK_=s*n&+A zJ;Ujort48TV3loAh!4l9V&5^Q)|`dEK7c5^*#yU8zx%<)v~0kH3Oxg!DQ380{%`YuvL3>L#pBxlof3M!A204_YCX1FwG&!I z)eTMLj(!+R{yNh#kN`dh*m)H4PMn&PTMuPJ0sDI?{eQu=7&bM}@qeni?s%%d|9|i0 z8dn){DVvfNDzZadQL=Yt_R5HBk1M1oqewC{B3)bI5=tV)B_ZQl*;n?SzjMpy`*{5P z?Y__Zyk6(L=JWY_?-rufDo~gFUAac*mjMbg;B<_3Pg)Xr%Pi<~H5ZGw#XY2ApK&3^ z{b48pHp`yx?^ffsJ3IPLe%0%~X12F&6g^{dVX)|aUC-?#b{1N9Q0S4%Kl|l5jb}Yb zY#FA`|GOmGVa>|i5Sps5GVlFeLb6IjJ>NEqL#XH<`%$2@F0dJvcW~eZf|66Lev)-H zHU2IQHb64y_D%df0wj%4lr`bb-q3nmYGu|mG`(yt?(yZBPQdvOnqL``6=@ar78XI3 z(^RCy4~`+j%ANZ5_XtV+`@M8g^=+wh-&QWox^fLI8b3U$_G$5nghek54-)2IrfCirYJwV+T%O`I@j@~V*$EY8bhC0KK+&r`kaXhbWeMvwMXbcQNvCVw>0jza6e|eh0o@`! z3P|hFz}%FlOU+AJjUzHlRSGXy-cQU=E#Lwb7)zb?O#VDH2&kEy_W9-DZ&M{K) zrS|W&6&%B9lsT`JVVGwMIUPJlA&xNzWzAo5S9wO zkm+CZUp*PHw4d8Sa7~9rn^f!Ua)fL!*7>-`vM-WFE2VoB&TGX*JemI%h&(zORbS?! zyz-Vc<{C!eG;7I);Oa8mqADaB^zypaW{Yqk^dg}&vrWsvcnfisl(@n|%H?-67H4oZ zZm22oD{BR#4jjrEYn9@{n}$}*yyxQc2VXv1`n&t+&o-AlX4~3bAqQz_ntZj!`3Fne z%Xrv&p8O;SQDd+7C4xZ-i4h=&L3MO2j_&ZR>nhobSe6E5J>eX28PH}r70>_4yh50}9MIW|+`nvE}=5a*A=+sCZDOqZT!Oezyj# zeEDau7{z{Gv?HXj)^%3}a(|O9l`;{oa^1oQHIbsHlayjSB_}5_Dgvl~A{I!mTHub7 zT>Y24eFcj`SI0ih?^1qV;R?6$WTC50E4+1Y__;CSeDA@%Z+m=?oM^o6R%xm0%{*{~ zc%n*BU5j({3k!u?%UujeO0xnTo2jWOcQd;V4#a*Nj_=AUDQRkh7-vW@a3mP|nfN!{5is+9GwL5(a9WspHO6h;^y4;UYw(}u8WIP1DB z6yN7ll>6;RX%gK95{D|;paWH8GuBKmie#%v)+jp7;GCtB(!T8z}O%IZg`HvT7b{3lz%vYUWQwGXFP?h%9=zxiPOqn*UonqG&M9V9W>_L_!cYfrI15Uy`MjX zSQtGdYMsQ#+V+gnkKumu<^Qa{s+}ny(l`rn8taNcEQ>`f{lh2PYWqaq^f`b`9OY26 z%dP-xdn?AZ%mQf^51zL$n0)m~5Uu98U5~o9c^|EGD)7Ar~YFeJ``u!Zv;>AoFzSK2FQW(g>4spQ=NNO+Ei?RR=8ADOw6y? z_MYloTa5najd^JB5RQ*95%<>xv$yunjG4{&DzbDn^l|)QZ(Glic`_X#pLhjo39J}O z)eow7QNGniT+twrrY{l9=99J~lEPFof(Zi%C*wg$8EfC-G#joTeAgM-y zQyhVH(z4Br0mzMVJ~-{_X#Co|dV2sO0FUEg2(J`QxDPbtGNaX~!cC*^h4nc#wwH4| zY_PEXHvsi?<2!Mlr4 zaf%9dKULLCS|bO#D#eCtfyNp?c2hzZoWXbkl-(onJn!!t+DigW)O{R76MK3N1SPwh z=a!o*1p=Xh4OPA?!=~{?6T1eS;vF#@8%EEcVC#1$p9$kkTO&0G3S+WdDvusRW=yJ0 zV%A8RN)^0Bfl?!HXKjhMDdiFMWHey!J!dkpB_up_KjeJ6x~I3bK#+$f_O8bT7m2OZNB)?gf%E6GnheE+$3COc7?c>hmNgI=;mPz!xX zYTQt1eyRW~bm7_j?}F|-LaRChh2`C{cu|90JIRqv=w&!V8oRW!YTxibsgOIqEo|L2+#f?ILi z;hbu98vS}r6c7?q(nxn|O<4=JZtnC?9y!$h%Ie3Fh1ta!sK&XY<)825P zt$Zy6A1@l$(Z1thM+&i97mJG^lHcY`_|i%( zk}h{Qwa7fZE0*^-$q-DH+6C)d4rs@LA0BeGNW zL#RDi4Qjm6T=w3=LeKRq{1Os1_5lz5(Id;9xY+@2aN_S9m1q^4rr|OCNmT7TUOvtcqXXV3%lqecfu_0k8qZ@;3&ko>$ zdN_RY^&OovB3+g3*Pn#k$_~4`!$h;x80N6-;jXP`Xqeoy{)1o9Cwm5wuBWsX5w=0j z0RQO@27x6%!Pe9`jC@b+yM%fUydJ$e4gEq^NAHZ~<)k|H*16ZAO-F3-+Zc&bd9tRb zeBIKLK5Vqs_Fz3Iti{?eHEq;vM2JIIrr6ESzWiaN?|1pTpQEBra>#6M30s-36%{IA zWA0E5h0K5PACT-eQiY`IN>=22l8tNwDexabP<-IIG>*4Ss{FatygaQ-lkn=BiHj@1 z63X8D{s!iF26G_p)p_EVCQ2A;@I0>2#6q%SRdnM+5N>Yoo792onOnTPD%T&&FgFS2 zXvv`B0Rkw;?YvY{#-+{PmvWB79u~@WQE&0@$*K027n)|4Z^>vFuBwzV4Iu2SfjRk8 zZ@N^-uSl^R`nogq8YN6ti+$#ikzXktIO*THWGhzFf2_7E$+O;>ZKI*Pz+o8RFwoX+ zc2muu{%pee@LLmO>8v1(@Aj=-_G%TJYMiqDD&Xj~G4SFhm3!sycJV3E*2J+=c~=VK z%%R>duNw_~r(bY=^nMPRefHJcu}c(l!g$uxE4(5xnM>(_->{2HTKnzH)n}cpRwo5Z zhpo(y1axNvysDItWY7i1ZO4Kr6H@hnuR+?HG3)Na8(~jQJZ}6EM~KFeMel|FIqhnVvkWar?fxYS`yt(#FRW}BNiQ+ z(FdQ9=K8p$;LEF3S>W#7U~RjndmDMxd<7SKGrp!Y1XV@H6vD2ysoNU?k)dx2kBhZT z<7_ITI&44>gbU+QH)N`@^)^PDZqsXhJSgVvVMjTkL0;(5S99(GTp=pyMWl`HJLJVd zT%+#@_5tv$S`4kVTy0-x2LDv;Je|vhv>a`+={I?0D~vU%1jiLZy#qs7ty@f&a|9HZ>eseI8K*9c?768Rxxj2RUWI7K z_B%Osru^q7iE0=U87+1(y?eHB?J8sm41{ueBP3dqDs^!0v_jLRi9l#r>0<$Ia=FGW z=g=^KN!9qTf)Qsv!M)vFd2cH9-ixLIaK1r-pX@Xp9#RJ8=&`VGP>_KHn6lXSC*T_$%D-J=?A()ur#M=n%ceVqSicu2iADW zvQ6v>$~@dAl$80g(;YPgSvV=D$hcr5n=x1Cf(C5vCR0a zzN%#Hv_C_9nOxPfe#X?1dl|Y8H_8r2<^&8c)oH3oEbqbfDHlduf8Ryyx8hWg4QLhI zP9i;4o^1K-VK{JssncrpWW{Vx8M^DO!9r#g=U7WKqz+&V1{k(8Jr0P}nUE3r*@j=5 zOLq^DtELXI@0pz6$@%pc?mjpg5#Se8c&9acD)FRIfstF_EeiW+lFO4x5bWcCQ8yR4 zrTBRC-rZlIzd*arO{BZ%JtR3fF3mw z5E{xUR?W{pMutSq_`4Q&5)2WJ@6cpZtN|UkY}cP|UG&Y~`hnzkHcV9&%jCn(Ux;T8 z`y0VWGDVy6)7!sPiYs$h|HOatH=-W~<;)D%#NT&cn}~vNR=e}i?7lADCA+!am}r-MgtBaaBw& z2#yNpyMf%!6o5ok`IM!{k!m(bcxuFadxC=2T@GIfmB1^G8rTPKd4)AZe`;BC6xE)e ziW{bjP~?Jv(M&fas^1+J9O7ZR zrE~kr+nkEclGK7PAjZ7484IGqFs`N;3E#WPF`eJmW%QlLi={%bN`=v95DeW>Tce7V zEwZ`7%p;pKbdYDQLJgi<8B&M+WA#Z6;8Jv#Ab zBLvWdF58b)*A(=)Nq(ZL;Y2cKy% z@2JijYW=?Tb(fxU&4YLqHB}164l)KC-l~pMKlycEd>ZNJu%DiEDQMHBmM}5mxZ>sd z8t>e~67~+Qayb}J*DS=YdsD*0Gaf>}J(K3Kb8yxG6cR}F*f*(Cwl5&L;hyq=m{TrM zZCt)fy?Pb?i&z%9palE~(VLB2EdR1w5YwrU+4n>R{Lh^q`Ac`|hF>-510_tMmI^bZ z#D8wdRe#%(97!LyXzt(B8d*4z8GqTWr*lYFu(==vCn@=R{6tpIWA$aRAqof*fPJ~! zPj66~@Sn5&w#4;{jdYx)es2bAgRyY^xF}ieaQ-pp(U-M7xoMJGbM<^0IbPw9wGGW+ z+J0*zwK6I*H{Uu*%*W0GzF@mc)}9x?>*3=To63IW%YLcFuzs=3psRj^o%dQf2WKR? zz0;&)q`S>)?bsl}$y$vvSvQIT)6rzQuc^|1gUfrWpJg!Y%pee|O?fI;-}f{x3ifo^ z*hwPiJfx+7{-9%GN4(LPF)g*C<0f-QMyM2~F>l^~X!+61*t*Gu=U3S7_I}R@h^KM@ zP%9%nFeQgQEz@u`ckP2hPr{>XYE!9rT-fvNM`TQafCju}7#4lPZK5zCAa+{o?TxK~ z`*z93`>YsxbCL6{)Z762p32+l|Y0NXZ>p*u*;)qC%Bh^5pE9#%2|b`u{j zMHe-%0%j>9nTooBN3`En=zLtu@7p|tXk@yNc9!0snGY%e$K)=WHBJ+11fINs$a5w8 zUfsK<{jkN|HD8DDHd#@)i!EK{#sr< z<|*yAh~*fuL+V)DN=xl2FO~C5&Eh{5yJf=#2q-@GuuizP$IcLzbPLku4jzGbjMM*k z!)AHk3dZe!1EsccwF=+Oyk*n@Bv|ZlxIU`nLzP|XJI{R|4KsgtN$luF3qU=Iwmb^2 z0w%J-Y2^E*w5?MBbQrDN!ZK#Web402ekx6|?dAOQl4}7ze!If-D*dH_w#8agf&pXM7;M9Yx<*MO=AMyRGH1);gZ$ieBzsxqYlMCT;kDbL2Yi@o z>f5(uLKvI=2z7oZC0E*EcP=KUq|_BJjudztZf&YWj+qAAO^7Qo%g$@#H(q(PJ!}t+ z%hk^Ypb@KrrbY{|5))~^!3v*uy3!eeU*L5piN;e6MoMMgl#TmvhM9KrA$B)2#A}7a zlPjxr`wpn{g}V_i&(o^+ABvz}BS5q3k&6Jc)EfIj?SA`&m_vB_X^;U))=!XAiH5hu zevwzY?!F(o>?j>4!9bk$m>;w)t-6% zD2Yu=p*F{Ci!d0N+eLon_FiAHG=rqRw64NC;cv z0h2gkOg7pr{c0O4qL@(G>|IC7%EezE+=GYGj5!fA=>upI0U((Je~*|9#T2c0!YUTk9~7|Mj+j1HhJ4GhFgM}7n69i zP&e0RWqEX`IxTW~y|PTlbdnaJyL}(eks_5y~zcv!kxl_+W`4zivK2gj>jo zyP#_AabfRrxD_plK#PnL|AgZ)op0;U3`|@T#+!+6FI3iTxzFvKEj$sCeW8_d&H>KJ zkPOF_59JIFy8c*^*9Uw*FmYN)8tzVDg?s?yt5f?0K*}G%aJDxFCKg*vpS<2JIZ}qr zJh8BCeh}h3N=1vOK5^#W2<@U4K|5-~4ju}#rX#-g(H~P`kiXEiw~r4R{uWMx*j1>4 znx6_J)C~~qtuhXU`8R1=Ns zg{8e@)cDwGs)cs9`xH8pXGBj2Zw?UroaZbi--e|t$FW#LFbaJZH%!RdE+Z|3inOtR z8r)k*UgCWmcp&f1>d~ij`e8{RzXQ9C~N`4Wa9U!H!-SilFcyB?k zcP4P3hYzmN03K#y?E~O`VAYFckUF#Kb9V|(iX`5FxPt+?^D*w7j`o;H8&8VmZlOl2 z-+c-n$7Nb1>0Wz4YCOKhA1z>MwpJ$&uN4K-lT*qxaSBziVF+WkQV%zwcKi@I>%Elw z)sSD1*}%f!6CWuNJHNt@%8TkL6Z@eSf3BzZFExY=);aG{g7d?R;Cb>%tgugw{4yvm zpW&5e0Ww@bb(U+XKUe1Jo9)}0nz0}Hc;3*^5C*BIBxzLO3MjnCGj{_x_|x;JyQCZZ zYXOhJkxA3Sw=l4C4e}X0N9Z&`qG5ZpI3!LO|M7LVQ__+-xVVC+>z?QMK0HC0#U>;5nsYvwJxj*17kOYs zL2B_+E}Y()ybK1l+BusIMZp$a2;ETaD_Q~pcY?$K(f{v%-()di@rjJF^jw&<-!t)} z=lfCZ(%Xr74BF5yK9ACoC|dxfpml0{t(Z-=FbG8erq_x)aA-?ay(q0>jk)`7# zG$x>=y^Sd%8VPw~(Q2!Cs`QFEcRn?d#uvYTo$WF=GI8A!R`v|7K`GxW{YrX#3rcF3 zf@gQ?B?k1JsFa9)^Z9 zYa_2`y_U|ceJVSM0E+|FCP}YK5g%wtPrpmqJbD5ITX+8}+7Y^40{ZD+N1ui8{pFAH zwmXl?Pd3uQb|0YC_;1ZA2DF3%LA2--_a06J663V|t5d4SKiXLV8t7>QTH3@eW^e4BNEP8$o`CV$oHQgRSVMn_PTzSlxFfNe)p&)S_Gmt^Uq#WZF^C68*n7Mx_aQMxa#+9$5x0+3Nw0I<1gNif#EJQo8))K6-OYxkA4%Phn;&_f)?u$+ZUj50G4qf zT#MND`_22m%uRqJq`CUL!y5tsyxpdVbW0tJut#W0`>anl(2g)^JT8}JL{)ytBjdp4 z_~p$^PI3IicDZ8ZaMuf%48#;a;_g?djpyN%)z!H)-lZxRNTO(@d^U;4FB8&&8_D|4 zFeXBb6MaZExk&0NfsaURbI1h@DDW0SyP9$NG$V5jxxRtDXO8dH`jyhhGH>89010C8 zlSW}t(RZ~X1gj!YAJ^#XtZkgJhy2x#%NHiUeJ~kCCh0qM`dD1=9pc-p;Qj>J+}KU8 z-ZxjLcMJTgXkRU#G2bw;vvr-!f+&Z=BO1!zZ}Cg{Z3z7LtDN!vsJl8DHiSf2RO0g| zpjk}}ckkn2;?#rBh*!6Yko?#W!<-lE?Ju6)#s#t;OJZobLuIrc4uJR^7O$ntw+iY; zuGGwzKSU$oS&$$7#f=P2si3RL*W`WmU^*;s!sNg8cqH9k@GA%v%jsQ%=M#rH*V;0A^VYHM^gU|2Z zKpP@B90`etoI(pP!^8PBARmDB*(fcH9D?JA)c~qoxuCm+-Pt_f;T*=>~oo zfH>}cBhg6P(^-hF?cXW_;SZ?Etn_CAS4~jDT+zu)8f<5=kH=0MB3M|GM)8raPef3{ zb}DkhST6v~wI^9E)D4|dvn!Y_tel>jmId2f`S!MJ%4U@)j~pqu1{*qrT4T`~h!4U6 ziC^C+2>iP+Ip?sCIrjt&DxI4!0-;sqS1PKGCXsul)jHE0rW?Ps4k=ey;nxAJ<|zzG zs@lBXt;s2Rqi~*di*@^iUK$wZ-q$0>F$JlyA2>HB^J#)RaRzRvvBfO4WT~`woAkpi zDNO+dK91Ju_P>{3mr^=ToT5n<{A$Q7#PmlgZqbfNf3q=1@EgtNCqEsB#!gsUpwdV4 zh-1eRl+6weZj?gGj^NIo4BTn@pxV1(tbv?~bTgEGvv_M`$_uh*Qk|FHp1kpU??Y-y z^IFASQYeu_`}A*2r3dCsi8#NiDHSRGgDntKJY*y?-F$NPZ6+S?*RRbo3np4P>-A3} z>3$0A`($921W21>Kqp)um-7sRT1VWy@sdjGYwS$vtemr|B9Unx*I@XKZehA)&ySg| zlkM|HSYqeL|8C4rLE-aEAM%7)%>44=ITs!t+1=S#9ZeB48N1!DGs9%ipRri;Bhu}| zO&F%geAu%E!_pJNY5880q`{wX5Cp%2JTURg8@Sx>jmO3_AGFFwKK1^K6mp9-rvuC! zvmY9qZZErz?BG8XejR)3LYj$8zY^FISaSe^-(aPAo027rF=_4EL>l%V_}iLI&be`= zBmET@Tf#CQfp~`13rCfO zm=n(FT}LCgG7|pTZ(RhFQNJeT#Xj6wm6y=sL+loRf8*upJZ9nxbAAldElUN2kQC$> z=vS#YNN5R^e4nHa7?31MRTt5=n5Y4jcPAwB5TI`s-}&~FfCTaH1Njtw8)-KCuyk2^{yntne zUuA`6vMZ+$w$vQUBB2@|#NRVB{VzaBQV!u4V*^#}(-?W;^Al^(VUdzB!}FYMNpC5P zM26nABqNkW$-BPc_?st+4yiR6DffpaCXdgb9<~s-Xag^)Uc5L#Rx{*n|EWlLx6zH` znMfQq(5)c2dd~xXPX?1w9CU%o^G0*zgyPGLtYb;JP; zJ`#(VFzXY!#@%Fw8J^SF$7}wkJOso9C)(5vrRtHajWan#jTFl&c1M*j85yyij7b+r z(Z?Fov2B^tmp~7^nwQar{c6GSDNX1EH2JMB5rc{YF7{_32_8Qx8aUIflssQU>TYk5 zxA9{*`=wt^!in_94qtZ=dCPL;s&2^XF84kWXccpl%|lVAM_Ylj1GD4Beb-VwNe;MX z;$%d-Cl&3xMe1zHsLWxBSCb6H1m9yVT+X(>M=;H)LDn<*1u5vxlFr3=|*ssJ{qF$hhIAM!UJNwpeL$baW;8io* zpCL)H(b|55xfPwk7`GA~#iY7YJcU%GMih~ipUKLBqg>=zOedJMO6sU%Xs~h>ChSdW zuwZOcE2NKV-8Kr#qAm{EURHDtD<}I(Y1gXBr6v}>n5V=A-Gx-JW!FWdSQPXuyryoM z5vKnIy<$VcqX3D;zMN>WMG&S4O!wl@8qZ@Ozx2be4k>1*ZMpPxtfjh@`=zm0{GW|UJOj@lN~S{E{HIewfu zmKW@G8Sao0Gjh_}k;t6GWLnTb>wznpgc1`v|9gf~tJZC=Na4c}2yK}=&wP$waI1{{ zRPuq5X|xYq>cm;niz2cO8TYQv1Zq)Zu7l@@`hP(doLh(xKMwr`%I0$qMqF1UX-|!1 zf$!sGl$EK4?$_+D9E?=a-hdxF047hEI>@`CVExcG@YRT$O=MNLufjVK*T>CTNyJvt zLOBTnFRB*y=x0Qso8WB*AH>5?_g;LpeTSG}@Neb}iix+?p!W|ES7U}b1Z$a{vNmL! ztSUIa9!9|Naroo^x#5TMNRiOD)zvF|yt^#mmBpy0CzodFzW4o4ce+l;{(%k>1Sqkc zy9US^P!h`Y;CSkOEInnc-Aw{#v4T7Em>rS1T_!$oBHoe&T<+sZ{cr1&ht+J==44J% zM9%g=FC~7*A@{-m)Zcz8N`F4?L3|^bK!y7&Z11F8r;KpvLUp_Bxt{;72*aEE`eQ#2 z5cd^fzrNz4E52M;DorM=otN6jm37E|fgQ``g3m4V!L^cEH(dZTi&9?lqFk&c`C>Z` zn@7jS`1vU%kP3uC$7CTPkG%zg6^z|R$Gfqhy zKkD8sfL&rl*;9~5n^o&r*S`^Be0hGeu|xLUze!#o5y}P_q=4PfeK)ES7{K|_nI_U9 z54nPZI>eu&D6g3HlLL1DU#+wMD}SV;8!=;Bkj;w4SC4m0UiHgZf=;s2I0f;=#bc*| zk|qV)-(5$xh2bUSWp!w}{0`ahed>V4pAWpEz$a)v0Y((b6*Gjq$}1`{9&V{cJcfhs z6w>8CfOrVnIb;@8rl`d#RxVPXJ+}VqSM=Zo+!gywhN)osE$CcSIcC?Cr!=lej`R#) zIo_;O9ik{Di<>t^X~h;|Lr|d>#DslJA*PkgAC`NE!FHbs^2aY!5hXz}17fhkMlouo{&?^5ltBVx!$jTTK& z;!2W)F0v|xqqW~NjG>im8s1i4vC!SGM{Adi|8o>5!G4dM&4~?MiX2T}F-$N|Y?^W# zWmjGknNDq;v&~TfHsao~Zg_9LCAHn;xfFF~sdds3)*gUTxp%rLS4VzA<}l>#7drnT z*@u$0}e__m{v78qqCXNJEtG|O` zz@hw31)_*=bcWJos_Xs&6cB0hfq>L%y0I(J=WlG@Twng&E*xVu=KZ2M{8+c}_C!Pc zj9NOL&MP}AZpP|Yvj_>+IxkGs!d(5hVj_-@ZCD-uWL*nQ0)ED*HK-JknD=H**l24jENf% z?gnghc1QFsrUYL0QFq4HF!)4w{Ld8ymnYSs-;tBUYe&%JUbq#4yg?a!8WwUxRSQ+3 IY<>U#0A731h5!Hn literal 51731 zcma&Nbyyrt^e#BK1xs+(;Dft61h?Rx;68%~3+@m?fZ#H?ySoPq9^Bm-+;+a-y?b}} zzn$lqneOW9I#u0O=bZOFry|u=2Jm(>>KSXOu z6-fY~CLZJ290_)g=Axhv0st^R{P%?e{LUbP-KB4N>VK+$?L}$yRMe@Yo!!1t@d9~( z95iC+R8&+VZeOf~G-c%e_wBGVQ5qXC*hPq)-P6+(=*bOqcC%*Z6ciL>=ip-J;$nl< zU;}wOfz7?xoIteytC9b^9ht8nOE+5=u&uKb)qm}pTR6LeMQLdM6ZC&R|Er&1TdV&^ zk`w5E4+}Ox_W!Q1a{@Wo|4-YnTSfjW6;gAv{R)%(U;AR5BL7qK|5NtA<%qEVC;b1_ zng2D?|0#tHRSaE({r@~RF?8i#27LfP0-z`(sqF=K;)fDwq|@>ydbgH#$|Qj&sZNCy zS&)zrk_UXuLJGoL3!@9 z_kQU3*~A#z+xq!ROZva@#ukvZ$7oA+TAHug%S6k*XQx}Inhlc2fCZ=D)9q3Weov0n+Az1S~` zD#g?#%rYgozImBGCeU{&R{yGT)tl$BUZ~8)KYMMBHX84c0cF&=J|Xh>n)w?R*!^Ka z3i*Xoxp<21nSN&TwyhmCh-=x{Il*V<2zIBwdaR-Eax6Cp29Q+k)wc?B#SsYE{E>^K0$qY`2$mMTECHoRN4!@ z8ll)Y)2B8f#lJ=2k-p1ybPMtF8M}~Nu4Zif5V0ZVwxhuRlEqpd^Lojz-{u+5n(#2) z)lbRwr7e1#VqXC+u#km>r103dL4JJIX%8(mEUYpOd8#&7$-qNPbHe$f$cb}W#D{w2EVkT{RHKNPPpe=lQS&hxohx9}98y1t`2Zxl> zGE+tgGTObCFzT^Z1LNg@?zM8A2&SERU4+>pgxROBG+^YRFncGmA2FnqC>E14!N;_J zqZkF9zY8?m1YE&4|6};K!?DChDEtmnT+7$U{Kqx_JJj2xjSXH;3}ypI`$XROis!#d zCFW4iO9x$B6X3yEa&LaD$KlumZBiwO-oVwz1DPQSK5}9Mje;>bos2Z*Mdq7#`YxXbNrcm7JwcUE= z&(T(Euy|4KCdLp$Y(Il!ezata)@7b0sEXd0`W4aYzUz4GU=r<2EcPi{K`iHdMd0yF z5+Szi?zGM!-q;iJsk&=#oY>GK|%v&TZsv zE3N(UDfDg`cdw}Hpk{1@dvBt5=v%^$vN1pNyD1tZsQG7o?XMc+mimeblI?IfFV1~a zUDv}e(d)zeMixPYdjwK0Y-`cA2g3!C z>K($dkx-hOw5_X$G6D90zipR;q%5zu?5^w%_=QS}ktld4*=k8!CuRZ_d*AI`Hu*Ui zevI@RR!^n2SnY1T{TbRiU*L3~&lp*}uG~e&xaNxF)A$}L&KKW@$W5XW9Ht|a_ZM9u zW2qBe{q0vaCEDoT)zi0;7{Tng)I{NySu>L)D}cg}kuJoOuelC=-CVI}QPC9o4fn3% zUD?bF*?Osv+!_1yq0D(Z`nL!_Ir_66WIZmymP(UkQ)N5=wvllKX@%n%q>sVDkIs>6 zzmV{dy$@2i_pZ+P(p`CsI8k9%xt9ZT*>ZFavsR?VR+16o(;cVtz8haoRm}C89?g@w0a2b*E8N z&GAR*x^aL=p|Qs{o%}+!xss+`dshEEfsH-9BzBq<<@H5y^y|rt7^UdtO|CFsTq`-* zA|?vfcVR-&I9IXXkRwKA^hJ&zJWB?d)?sQZ9+f*l(2T0Vx{sU=Vp|FKzW-mhdcy7- z;~?G9pU5^@-=tZ3mEYl4<*0P$-~&<&-1U^`p!_0>P=^}Qt$UNZ1C<@wxR-S7U1>{q+h|@=EY>@5(mhg_}Ah+ z+n)hwMX~!X?|U**=k(kpIds12kox!B472=_b%dT_I8*_sE-Uwk$E=Aw;=*dt$!+;( z8ps)b`Qzjy2tmup&kc#64imEurT4&|!o9zFangb;yZr~nHz5Hqi>+LK(Z_e!GvygW z!@bTS{7od4=>hLPJ?pL2R@sS&A=kGozjp&ZCfG|oFLp%FOG=C~Ux~<0#X)m}98aWj`eM3{{~{IDlN0?rGgVEa2=T_Z zJ191qZ}qG${v0dqZP=3R2w19i2wQ04>|}SNndFV?f5dZHo#`{GheTwW@!iyVpL-vh z&DCufh;8c;6#O7Cr8|k?HVBORil@xDOXj4PqnerXZ$w4Qdi({EpBDfoOJt(JZO^y2 z#&2YjHdjR}?}n!_;5%EL0d`$x-WSm3ajaK(a0D68DuRFZ3*Dgi5^izf#ezX zxZ~sR9{**{dWP(vl3UWa*1qUxcLk1i-y}U{i2VssQEB@sts+hOMe`Q`S@aK$&h2y& zz+sGuavnLf_Mm`(oyj8n-~nCUlRs!i7-LA>t22Cuj^D7Ba7|!3(L#l&D#C7YW`+Cf zU`93%Pk$cbZ60n9#+@U*u!7(T(I6$VwcBn5Zd{rne})SYK3=*6e7ut9pKzu9>b_l& zyV-BoCS)CRtRl7WyyN3=1;7w^NsSLR8r+?!5Nq!rD0rG*>p-uugZ8|x9!AsGfr@3q4qqKBRS?qvCyoIW8PXO(30 zNJ;VyNv8e_vwMvOAYF{`J-JDh6|X+(o@RZ_kB^fXAaPdbeXVkN^}dQulI&{gev)N9>ndt7`z0F$u%@4d9B0ECWKS1!GfqK}iQASW$W zEBx4dav4qCb;iJvP)qX_nrKtk7_HXBjESnwhVnn!Ka}{v+uuuc$Jj}M9ZH|@HdM+# zpB>Hp=5&}23)btV2!Hd)6I@_*XF>k$2R;NpMKL= zLG!_!kxRqLA4H@(>Fx^wWE_|Q>D30jRJa@u;gQ&09^3c!gOW2Haoe3p)IAh7yx5y2 zq$JzZiI^D>DnmRXd-w;)vZ4M*k}*svAn*fnkV}W{JE={TBh}`>{Gv?@V+M|bg!t*r*#})T;dVsGDSwv8FlW z@1^{*C!3mS9*$^c5Z zg;!u8P_!p;S^i&)O~`0p%R4fAnqtz%Z4th&^X=CQK-2cn451#;3}*B^yg0+$VEy@! z-h#kkR1Ze0*$sXN)~;r!xgXg!Ug9KL=koK(JDq@oX(agZ=AG&6;eme-E1tO__BIzH zE12okH(eeMWTIE8l6z25JtWRMMjcE#eKvEuWXlC3n}?Xb3M@K-M|VP_sV{%Ox_k@- z0`}Z$1{2GDT?d~nnK*q@%_hd-MR?>e1=goommkUf#qnYfj!Djqp+4aneOvzkx~U@e zLSKeG76?Iu1gtjkSSP~UInohO@X65caHsXit9wsMV~<|%>+mE+U!nPGE7i2bFr9;Kkw#niYOw6 zOOeEJi<}=VrNHfV4YuPNux8Is>3fdNKuE1bbtGdH)ulk3_FGU-xJCP(lpti}PoS{u z4)?)7>GG5$diF*Ozg+5Nq*|-!+ay!^%sI(lR(CZ^#KpIR%&@AfI1CK>SIj?{EY|w_ zb$It6#eU9)BWG_@jlM16-h1wj_-vF`$c=4DwIdfKp-L4RJ;+;`|90?qE;{=P0|oV1 zMdi1NX_cUxC7im`Pi(C>KA&uoHNac%0?(!Q5ZK>)-+TR$F_>1$cu*pmdhAn=o zZTAF;cVXs+QI?a+zc-t5XNu4`nyM`Y#mwom0x^39W3ks-;vEzHj(#>r__uP%gn$s9 zm;zTmvvT6=S$~Sxvd-{Qu@8NG!0b2lwK{!K`Yj+wfrHM}vUN_=nxiyy+y9H;yi422 z$2^v-hb#cEV8eLnQL$e(0DZczs%Lzk3E&23%m3%~-8XdZg3A$eo!$g|_2Ul)#8AC2 zH6nCP;M>+OVL|!f+&a}dc6=?Kl|`aDA|)cS#c==#IImeVlk(hAkV`MHBzbA-#+K3N z|5OwH%jwhZWp8Drso78nA}UG_#b`3TIwkmESCt_p{;4YOJG*k5#ae|r!%76=wK;=& zqzbY$i(7aC8;YIOV?qd?S;#Lu7WJJkvm?X_%oOWU=Wb@aj(Sd4*7mw=9c}l&Dcc8R zC*~fF9#L@6E_=iAQz*XC>@YGA*png5_xFbWmhc*D^Ar`n+}!JPOo1O@@BhI`BEJ)E zrqPRRVEm#82V4_i8UM(^Wnx@;t5-Z!8G(K@{`Nlhs7`+q{(0aYi7>E!|Bqqy!NkpV zA8xTt3-s#rt2t$DOLG%mq5?fl6}<^^u;-uFWTjzxdw@Wuh1(~@f;j#D+L9u6Z%B&p zEW>dj-A3Y*r8Cub%I4s$m~h&&So2ZzK^3X1R?CN@WTy^olH3pk(om(sv5f0%dtKm* zKhwbsE>iiAm%)rk3y}7wM!3_yw93xS=N;3hzO84P7O;#|JJPtWkq=xPHQ>7foLoB` zpKL73<%`qn=@M_flsHEh#47?#E&?5wqW;u@I)dw3{;r0rsNv z8efwKb)pUYsD`Y&*nSkJ8?_;PSvZC}v8RZ7LF;1g{4$?G+?v0N)zHnkF|Qcx;C_sJ zQ;jq8&G~4Il)qe`_{lL51>) zNAYX8nQRU-utmkGqoic*{?s`=!@SQVju!U~i!51hbyyh5!-0q!DK5~eu~DP=M+03` z+3hl31Erlur`WgY`J1e@l@AS1k-muF_C-zSEDqIX{FR-MFJ#u^PZa)BRr%Kp?!$P( z=Y7G)^A!fIVQ9m-@QCQ)We8NfaEmR?5Shy|9Jxzk2b;4{rYu;Cfza~y}7rMP>snp5ZSKrjTx2Hqvy#mNIQF!03ByRxjUMsCl z5g%3%@N#QI6yTgHi@W@7`--)wa*ZT;x|?x`YIVjvoFF<{73nDHrwsDSi?^6!<@#l&N>y~wp&sw7QSiyBPK*-lLB~OBaX~XrfmAVBy zTQ&jNPrR2bqiHVddd-I$8t0emTWBjQ>+k-enI&;Rpo9wOdHMK$pn~v68iWu;GvhdFzN_OM2xFmyEc&AK}VF!*Tx>f9(9D z0(SF=d2r5k=Sbs+Yui64iGqfPo-FhP)S36z(6aX??H(8Yi!=_;Y^zc z$_jl>u>CL0Og{UwZ!_f*fw-lcb(<4#dx_`=G27VX#tl2Y48Pc-len$Be_&OxUr=Vj z{bbqhtTJwK;EdU++(}GimCQ`1Ly_C!u$~K0unpB!<4Z4sXDF~Uq1NnPfOI(@eFd>g zA>gN&MC2DNY2)Bs+$^;5#lLd=I;-lHWuL6KV(Aepbr!gh?P$R=J^ceSIj80%a)pXW zai0S4a-`d1uGa+e_$=4jdUE=7nBO(k&YN?_Hi%bv(<92Ap& zC%DAor-P}57E0mbKY#;e+5HPiL3g3)AR!tq|En19Ydt;bzN7rXu)8@3V}dlg0&w*P zd>ExPNu8_ncR&g5!sTk^E^d4~4&-pR|E^v^gU^R9h^jsdZmf(&bfFR@)H_MnY4Vm$ zJ_tE}cQ{7siKM*;6A@*!xLqD{Y?<%PgE2I{J`gFroh!S9Jh%Tc6Eu;jir1unig*K zs$Uc7NF)#Zy&~9iJDtX#iPcn;BHi?-9&P}&QpMiG<$h9@4aAIbg?W14fd@3L>a?1h zrj#z~1PnSNZb%pj@vDG&NjX{K22v&y#BK;#-zhw4=2RX@i)%=@9riZp&^7v#0CXHT zHM@6PoQd*J7dalU0oO{uv6rD$>)W<9R2{hxk%_wC^M(C)qQ1+N9X`Hy-hbhy{LWT9 zDg>vzb#tg^Gf_6*lMf1n5J_C?DIsO|FF#6Q=hq8z#q+chF2GUj1_{EIsJa`cUJSF@ApyOf^`GwEg zGVaKOnxT9=1TA3Uye$YBqP?ALNOLe8wZCq;G5Y(u~c;^P5r;GJ7y_?_-Z$}V-_1A;Ef(TU1! zKU|e;XT6E`v=`CZ8EYtfZw!zb1sjMXW7`Z#pZo#1NCL>i*ArC^Onsn#&nffPGFI$$ z@l&=s3Ja0mchi3U%9>{LM9R;9V9oa;|II_?`VX7z?-DV5RwTNVIE9FPl}ui1E(1kl zpB&y?Gv8v8LdGc!q?I|;2-=%H%gMlo2gi#URcc=#U*l)T*olPVH-v@Cb@RGbx6>sh zkmwYJ;FYWzPYn0mY$j5>L#uRX@tH166`7RSa(ZWpn(52_R%y<%R^NLa-QN0mZ2*GawX35RhwRGr9 zaa@w%+D#&M zzG^0~_yB|@T;98&tF+XVX+yEVBeVVbvzI+-Kr_D2^~rV`_&@`_9_gFkKUBXh};9GvLdxmL+e#;K_3f<2w+y-#(4K&lp=jb8tCt*w0 z56`DDS8|8H8A>xE$4CU(V8ckC9L6bbqyixMxyPpztZ;d1!`oY<4xduSgaC9epKCeN z{qZ6>mI1O>kI-RJ2U-CV@tKCQK+tbl+K*29%jxzDYmAaL%{vr-m*V?s&}L8on7Tz@ z`8|l6Y60sOS?hl;uWU&gCVf%Y)*B(sP-XYSS#t{xXdUaz{oRkaMMfl|bT=}%xqTd_ z4gGe~!4_DIAidWu87L}qRGNn#2ha6`7`n&qEb>HGJ?{Cho-rSf%y3zq6oaFxL`q>+ zdZMr*fE*Yfv6Wy`DDXE|5pTP9@#|}(&8ZhfN^f$B%|ej8z-H^MoXh^(ANbKUVe25Wu(dEFiM6|KokY47{@a`j)Zlp*LM;DRTfGmCaM) z7#W*C|GT-kn(_MQIT>~b^$(}(3Q9&z0`Xw-&Xzdr;NlYeiXU9=sWrYfZFXQ{ec3oSd|J5xWxXZM zrX$SO(MKYJ0pniM3~OJ;WT(*ydUUtg%>FJ^bE1RWsdICw-Pd8T(uJYhJlBzmZAsY*!TgKNv@*A$hC{QJRd4WA+Tv49pn?t+R{s9|iZ~ za2|~Ro1VJa^GYml73#BbbUi2Kvpt^d*;8Z(n`Bedq+%gH1&rGu-{}) zX>g0)Gl-D+PKoJqhr_&|vr~dFb4+5ZGW&#Rsf0Cmb=)CmOZ%g2P&eMi1SCCCamH#4 zh9AthN@GG5;3&15#6_=&Y_fDO9Nx#JfB4&Hm2*;x9wH;Ua{`0Os}csRMnn{{P$u0; zQbffI;y9H(PRlcFGs|uw)qo!=n*Kw=STX1H*l>1yeZFgw4jCLOTm)@OB-{7f`m7x* zm{`J^dN1e-K`}bwB|cxo#$*Q|D0J}dsU?5YG zftIcDx%qNK;A`{-e_T367Tcq7k~tsd0FFSND^@gr2ieAu=Q+L%-{xS*Hes^vp)lEo zngx7RM?Qqga>)-v4Wg$`UErozdznO+(7Sh2u zlw&9@@q1p-9LV%H+CpWRck#d1-HS-W&Uk(`2~+lDsNcxS0*?R-`x;WfxXhI@#t?iE zPRZ}aC!yP%{WSq{EQ9yUCcB304uzQnereEo( zGjMg&t=Sftz95c6D=I;$Nn^gBiF@By zrO%|CaXlKkCbGLU=g-Z|T&Z*;$lw*3S~Fj>vAJ*$8w?Tbh4i`9H60l)6=#!91r! zN|Qf{hs#%)8}cNu7+nU5f1u47#Qs{gv%lds0BtPckMBLvHy9<@u`Y%psq8??qm)pS~E*x>7{=f}yop>}8nsLFN#zPP69R(y}ba?wj_pn86iv%~v05U%rBRVO#h~8sh zXd}BGei6MQU#Z^-j4O+L`ylX&tcn6CI_p#JDO#_gmakfT_ z>&Nsge=-@Tv_IQ8-7)lao+#NsBZrc4BV6X?x=@3bY^VdlfA(+G*!@2-zU9)AJzU-= z{ScXsQ19;{GN+ctT*QmArvgzcu3r~vGh0{bCiKqD_dHa8iq9si!Vy1$AFg&4B_3Y2 z73QG(ey>=Gv4~JHYjreyBq5D%!W-VTwjJK~3w^i4UlpW@Xi3&9jI{7lr-jW>Rp@7N zjA(csxg=_hOWT&Es3YbMg3hH9jB$1RC8e%|r8yicrRbS=pHGH8^&RB8L=2i;Y-_3@oa1&WGTJP{;gk5 zhxbC%GI^VVLRo+vnf*sfK^ooMa`yF}qTEOCQ{z|q6N*#ca;r+dfbH!uw_VbG`K9vK zw&k7wpa_~ZvgZ-&h7c49Kntp42=?L!2yk}i=4g@ndmJL@99WEM)Z-m>u$9u_+dQ9I z!L)eBLbD!GO6zB}j51VpNz%0bUfn5OvxzBrtr<9Wo$L3EExYh^3 zkN%gk`fAP(Qr@bGzJpXjDuLljvmaziclFCJ4&Q%zL(fa_2$oz(zC{iq8L%bfGz%>T z$_AYxys1%`pRV$FwlEa#F5pmcUQ+zXDbA9M}6u?P@uTvR^M^${Sinu>rHxIF#LtMybT@(JH_reT7hZ$E-C$blv4E{Hc z_MSvEOk?Lbu2Yg`kI;_QfH(o)W4Z5w`Axe?h(Q3+Y)sbB)^7m5kZWOwK!NVrWvI?0 zmi-9oWaV3pqstZjbe}l-&^gS*L;VCdmhS;H_8d^Fy`r!-gi`SPkB&Xe{G)&KH9U6=w`$htn zMdIyOjVV+=yy@Oon3edgwrtwqc=|2HXX`bxs&X=2-MVw*NFD3oLX_dMmH6`?YYUQh zTfpTO8#Bqkj-L$5xHp(H8b{-%e&+9j<5xd$LRjiEnmsBaP_L)VpD2Aa+r(sPKEY!4 zswnVK4{{$|Os8BP{9Ns>pD{ysv{xkYIrP3ZgbdO&Bo#4wBlpfQp2|mfhL3mPXZkor zL?OjjxuE=Js6+cukP<&8=((O4#BRxgmy-2QuXvVP%Pd#dgl6-#pz?EoXQfnL9)YcP z7}8T~?!KdMv?d&fllM$|kro?H&XW)n`hE5;i>W&_34=_!r1y9kZ@Cdmo(4bhC#jy> zF{0h;WgAjHI&MXA%*Xean2L!%EtK6JynfvoQyyvSqG{zIq{NL|@vUvs#RGp1_@b$$ z5bs^tbCo@$;qbo~E$bx!L%KCG;~gbnrI)kZZGv|Bwy zy{5eF`a{@Et(;*nr-46ew|Ba#N$AgG3-(bgHmfgg$o-vd-CpOqDxDbogcaii)~u9A z9m>kXXXynBzxy8gY38g9!PlR~9p%=lPQq%yjuN!OuEtJQ-1q==U{tPWJvCn76qbp(UzOHkzl~!H>2(N=wpGO??-8(?1Tvh>x6{F=OG|qOVfIGuqev=_(*SItCn@TzttpJJ zblSprNa%s^Vkw<=bB=F-=deD=c}w;()-g2=;ea|xef z--$x#@L|7Ttv#?iW4jE{p>5rA1ha|~untSz^oThZ6!LAPFl8q| z#cbGwcQNVLRckvID&XjiA)QC#3Y-=GY@BJ%L+L?5EM~s^Lr-~T07(U@FYO5twi1uX zTGhmEN*L9B@K6(#7GQ84msof=%9~q}38KW_@i1*F+H3L`blQ4(vdy;epg-!9N|{Sy z)@xo)9^2bsU-xon*JQ!>)MgfR;$Hgcp@Zrb4e?-fw1a9;l~+oG${NoGJxytW?9R#o z4?F^};1Yi95L;24c&?AZC<2>^C%-*aSDM2$gI-~a zR9bj9zgl?R(C?@vFECGnQ=RJT(* zL9DBk!Ux*6ru#^<-Pjb}hDGukZO6R3Vfi=vjC9Ng_H(@r$3a$4O|U$$7m~x(_5bGS z_5ZZhk*EWkJ&bVZ1~uYz}WTR|L&k>juJ4l@HowMdMk5( z8b2jV@KQT24$npQI1|Axc4ld-GIWHfR7Y6$p`7i#qzoaoRkoPxa#n!cQ*S3vm1oa)L>F6HU zxyXzo8&Wi06Cv6JueB3Y8gEZz!q%tpsc$X|@)0sT)Z!r3F8{wbB7j${rr$1H2{LR0 zigKjH(VlHg6Zc8zw}*3!?W#vnzQ0Kxx6V35_f)h`M<~0V8aSy>Zz?OpdEfAF?N@ot zeqtjgW~}T7G9@@4!FJ5j_$YXN^=|PLRbwAUBOf|0A=`Fo7*r=pavL3`wR;zNirxV~H0*FR(^r7d5@5U#GvAyP$PFozi(ARMFlr-cTocJe&W&c7#?W2=6AkZm ztjWHpke{0ie)=z!n2<_d)DXwmP2+u-k&s`zuaVDkQ78v;8&OYrNAfDSX(vSkb(*?D2jO5-QdPVd5cOvD48C@q^TrV=Hg4#0lc{la6*jMYl>GH#xn=S|*U z;hV2ts5WP&7*wA=FW>|;*`u5~7*X^g%SjB)n-y3r+(C--RJK8ft-0)VKQ7fIsX)rz z3Jx37B}xlUb~bu15-?^TKeEEcTK0_OY=}7ECY1AhaPN|#MM}4Q)*}>rdi0Dxw6SE| z20t5p22(Gx3rcevMp_b{4y1C-k2k+Lsu+%ve?M#A#oBsXU<3^I_yuGMS)MUBX+Gz; z!eG}f;7Baxb@-)*zi=$|I*FqhtRw=XwT!wlW%W9X`0wH(;l|E`a+h+*WHq))9qC-} zF~1%07R9XA26WUHI*-o1QWxdpmTE|<8kVB7d!Yv&yE&4cQ8g@NDqqRXh)A}J!pMZYOpA#3mA<~)t+%ji{Fe1Ut>=SDnY_dZz^E+XP{d^KPHOX$e zf#sw72~^Q=;lmhvU__VRNC}`rPrj04S!It5gib_#2PFA zM2an6Yyg6xwPFa72~Ql2viW*9CoAda+6h}i z-ZAk@!q{k&1q)8{0D6oQ`DW-MY4z<}4Q}K&!L;EcqeowqOsW{eGI)yoVajBfyk@3p{2#Uv|{1Nou2gZfUhJ#oCTKpDv z-&+Y7DmDt>NK1|ITLGic3Z-94X|7;=%HyZg`2ICq_#jb8oPv)*@mwtn>^ zf13GH>pnWxQbl!nBsBAU=y&&W!2!V%>weOowWijPXckC`)2R2v=0kbSWb4adeH_ST z#}n&?kj_G^$03$l;F*(ub?2s?B70qtUnPe+r&{r5E+R+-zh>}vVNq*TiTun@D9*_& zqZKmbI;4OHW;9#&E&in)6mOc#FWDbWTM=S4J3wx*(fJw_vxR=R_#0)9vnbcxA3yLy zESL1nGBpv|m|s=S>rv9i-u>^htpT~qremvl1_HZLbT!jIq0=kxecUm$Bgnl}ib6J- z^w34~(z$MVL<3~7lr<3bJX_kbQU1*I8JQ>phjoobuG72vyHRVc7kgq#ew))pDp6*$ z!@7VEUA2ck8MQ_a^qy7I^WXP1LMY$6>Pk`pi8oxn^>j}MvKx4dvH-&LNbJrQcWump zS45Bk2Kc-1i#$?AdDj&ZJhWG^ac}x!{9!Sc|1;gYr-ekD0ek_%7%dsJ=s58MYm%@} znXeaheWI^>bH+mFhlv4@qH1lQwVUe)rss8k*!GtF`pg-Kzc9FdP^|y3euwAznS8~; zUm?gRpo2zqA(}s{&6_(-dCGU`R9S^IE(?@AujO`llE@Le{DWGr-4a0+D4N$klsqq@ zc#hfQ_6VMAu z^t0_tJ%4+6sL6*r!0A$Q@nn*u>b}#qq84TO9p)ihPXyj;r+i6iW;4)u=#*)XU9!0y zfr@cDbP`tIs-3k?pZ_t|EAstzR&<^Hl%1LV@D(C{{;leo`}KCFAcaBnw)zEmk9>mP zFx4w8?i|~gYa7IXGs~3N4RXgvM#2%-265cwq<>gPOnDS(0XcDD5R+;PR^o5? zBI3Wlq@&b1Xks~|^V#ey9{iyN`td|Y6ewzM+)7I(V-dZYjXv;4&=l)>l)+jt;D>P0 zS_X~h#u4U}eQ+N&chUm&Ud9$>|8cdhYO^Yh`3%Q|3{@(2t|;ZS!ODWCBN?d5vZX0r zpy|*Z3q7dgwMiuoFnsNqYb^Wc^r0IWGNd!bA6z-Dr9&NT++E|TB0!Um`3BLdYJYIt znQrLanKlMwV8z>p+5WmUrF+lIx&KZk)tBCOpb9Bm8ye$?G(rEMtVE0G0WTydiLnKLQ1Mefdup&{ARh50h_g1?1h z#r^w6@_=u)+o_c8_p^(hiIxnhN^uh^C>E6=;KZ&v!0|2wcS90 zFZtiK>{y}sq}W(bp1wkI5IZszyK}gN3KLM&K*QCL2FSJ+KmO4~XiRGuvR>2-U70-n zHV<$WUQ78ngnuT}PrYDZFhZ;3B$z#ZPzmhpM>gy;i$;AH+@Slew1j|v`>DvoiR=a> z9lzqUbYHlj2MFp&$g6|ddZ2^RzsK{fUgj13x=w-S0bAmbTd1klm0lgk%96 zXqfrGk>&r=T|7S$M~hZ?7I0V*`ie{021F@28{ztbuEqq#!|=9QBEG6DeQBMz7h79T z!toUkdiE><9p=WUOX6u}+ab~5W;^`u_847yB2;fJz%WCKo#7xfCNy)}UNkgzjOD!B zpg#U>Oez=PTWr3@JjhS4b!dY>cW!a7Y+4T#&U#B>u zO8!zG8T>2t5b{BlQ9t2&PINZq=||po@}DC7H6nQ6O2N;D`0FNJVz9Q(peM( z1r7>~u(>86h}Ckbwxd z$=6{3k}~UHcjcQOExIxNP^PEHg?1t~6ldvtE_+P5+~NwQmEwN`E#4RE7SKP#gNw79 z-J|fLuIy}5fy-F-{cm%Jlar?buJRi1HF*s*sJ_55bI8BSv6L_W7`dl#Xzmd}oQ|eG zi0!KoazE5?2a4_l9TjbPoKw(fkQKTtvrgIv)Bsx5IMGfno9&0TR5?|%IH6AcgPs!R}D?XAclt6ectW!u`6+ytb>j>K!?*4)QFS&MX!TT4v#r;yL z-Ag4osk}I}2=hZ6B$uqQ+WU@=5OoSe68eYxfJedSzDG77Y_)fRHW6JFMHLU}NcPsE zc<&blOd^3TRIuG`(y`Yz(X2%m@aVki2eO`bRBv2w1dC%M2-&9NQy zFl@f@>rkB~P)7Xl(=Wc|AQ!%2h5d>=s@P*TwNWa?a>IuE@R2XewKnzd<6h09BR-&% zh*NH0?A9H4|ay9zcjI&`)N7BDWa|j5H46Cnec6un~T=K>T9M z4fcMYXR{#xosYyh!Eor|#`CkbP5k)^&rv!VanwdfH+u_ZQ64fBd{u7T0>%>YSdHLb z_U)~5FmN<-adZH8tGj0b_=$fwenIeY}1D`jRP~Os$BWv zf;@L3IMb`w_1G?^s@v?W%ha!-^GE0As>s3IldLP{)z59j1UY$^rxILK*w7^S#h`c##)yT(3k;!VHbv4}JFBMg z-l?9{cKwD7JfYSwxZ(>n8)Yi7XP9HHy;n;SgPyRhFSVG@4)-9$rafU%$wU|Nu~`|S zeaDP%)^~3hgoLjxc`lVr#-uS%*gh&%#y2Lz=YYgUj5EPkK^$@S9T5STm}Rkh+X2pY z?A(fPCG+D+TZ5K=Qks;r1n>4yPkk`+dTKZaM9dY{VS6y3pWsFyeb5vmq10cKUwnb9 z?mS1+=iV0jtZ}2s{bdWdj4OytFI>C7&MqRCt$K1CoOP04^(*%(9Ir&uW6$3eGZXXu z`{3LUDgWT`H}NTSL8Wn=uDDOmOj)U{&KnkqmXWPE8O46H#Cgzi!iwt!(h`iYA)-2x@vn5NYS9riT@&{rWN1>`kDx3c9C zul1CrbJ)g-In9svJsiny>;X6M2f0H!I!7PPR~yaL;?&UjMq2*P@K1by%G@XPPTz?C zFy@k+DNf7Ng}dsP7x1xrB}#6f2K9Bi-hQT-sc*u`hetPOE!*wj%qC&iZ`0>j_4Eju zr{c@&5GJy)PR>&f>tO|qW`!!lnWmV~^9(B8O+b9@in6#VUbLwmsZ0wdP5o}>l1H~* zY~b;?$-fIq=)qrBqb*Y)G>M5NPKkNt{CnNpwAyzK5%g^1iwH^e2LB>tyngg@aLmG_$nR_OwM!xI`2T zonP{WbNfTVN4W-(vf-#f`Tgu*P7!V7v7b0z5!KXU$S3-n4^ss#+E!NsV@h(}_J3|~H=Hhxj> zoTKs82$JjzHX-ORp6h`>D%A9aZIsU_K2JY~rL<-YJTE*X;NjdP0wzy*5q&E#Qun16 z+Rm$n`WYuxUpp~;{pIe&aSY1A#%k9+Vt?(^(V(Q-o0CWPv_S7;=lSQ6{T9FR+>&sX zkkJ`8d3TRuEWQOb*6xoi^d5A>ZX!5qxKBm+J^|3kKEi1&q~i=OBi5{Mx6R)1i4mz4 z^57r5awT0WIh#cyiGaC$5qKs+y1gQ@BfeVu1g`P)yDvAMf0^Sk<5v#1f=mEaf2+U{ zC}~Y`E5s{3j?`gbE8#rZ&)yN#zO=LZd%3F$GzVbD1I(ws@9BCAiM`baE%lh$%1>W+ zkt!on`#LLADZ`Ruegk5D-95y$X7W(CjbwiAoabWydwy=J^}#%nfEp2E&+S!JNeM`a zfSV%lboBPxfZzX!x}5CKQ4S+DAk*%fHg zR955UP?2UD{^X2R(B{#`VZz2}b|QPmzT#2V8Bw8scUzr|YTS4rFKhZp61-tNkBJ z^zFtMO(YBY;E+AGjV^bwGxQHH|0tOsp(6hBB%X0&s-$4cf^A#f3OwQbW-|Ar+YSaBL|JKU z5+Iz;;iIne4XqDQ+r7SqBd(dRW$=*27=9?K3?Pu-$a<;sU34oF7+UZC)pOd0cwg7c zTmoaHszXIqh-niw{=8@?iGUw+2-x=RxrW&ysju;W5C^(b7?pHA{jX{FzQediw9$pq zjQGC%)5!^>{qPCX$vWGKwkP|(TQ*N!>SmUPM}6=W9&J@#A|$QBWKSypd+pBHg(GZ;Q|p;~4-LT0(~8E-mAuL^kL^R?&K!f&Vx;tr0yc^Hv*reIe=X|(q0I?w)Hl@Hmmtu3)Q_!JP=qAD>QSp_8fAO4H2@Jrl zX#<5h7zvI}c@bxIN>R?LM2mUVnAo^cWBK5!9hSP4uVDT0hjBS>l#1PS#UEK?vJFjb zTh$&At|9$sN7HZ(L+5vuYq|~~nT~n>KDyG_?KaB~A?O@uB^-t}6nLm!q@Qv1s`}Mg zbgk=E!olJINn55|=06vWh9YUb3mz%j3qjmn5OX@&E<4ooIGE&=V^ANQFIO`70$ZQ& z?`NMKLVh)VU+ZrmWauwC?aaHg06@}b`H@APn$CImT-%|<(0At2B zfZ!Kf%BW>&qoAVy$eO0E=5gD&jp|~RpQZi|hg*?b9KlRjxK$$#R zMyj>4(&Z#P)wkSi7##j0*Y1~&#g&(j^X7|4DQN4Lg!eP{UB?bGpt4r}*VX0TRsGDnaJy*KXi-6TIzLx*PVXLY!y|5BO6?re{~m>z@OG`3U{O~}IK;yZM4AquAXw5$8ihPe zVEW|Ky>=|oofZkEjQefN<}_KL?lvAXS^adJ{v|@jauER z?Hou!N=oV|)Hq>tU(N;#36d^==cM&49H`{!IT;Z+K&M`DIUXZK9sL%6<2Y3TdvR}pKWI-6 z|B&RL^T~`L-=%=``j&)(Q|p4Hwp`;39eLF>Pdk1y#$ z=@hEy_>u?wD}>+*M8-z=r;9Aci`LW=?C_zNO!&H;f2)Dhoq~L)`yY@V@+6zj(e|C-~jRMd7T(*sVcRtLtHtCD%q$IMlUWCD{c=-8t1{G zy(5QZWcR3x7j4C}5b!|Pt^RKNyiT3m+l1r=DFx^NH);*)E59)mt?O}xN1tB&_yNJk z1y${CQIt(Qa)*lA7g+zOuE8q&NQL*qZ5p+k`U^X(>e?4rw!;77k6Yl0R&aGNWu;J! z4MDRil?IMw3B=I$LP;*0>$tBN_AQ1)?Yow=0gQ2@jtl2--A``T0fq+3Lookv60Mgf zO7d@>1qXkJvg9Wmzm{v3ZwH}PMjg#iTcO6E^wvG zH|CJL^%8{c>qf?f#eB+l_Jx3gFCt!-@8+GTV|@W9F@@%>y1KEzJN{TaE3yi&G|#L<)OP{zTfIykblcSz1bW?5m*i5TG;08|K zz-R>nlX*ejlAS{)3^06+d(?y^-Ags_AlpX5aeLk=A+>&?xOgJ!Y*k0u*r2tpFW32` zl6Rn1sa@pvnvAW#nwyhC08Z+r`(yn^>ACBCxCnkJxDto0zZk~}dzx9uQC2^u=UMyj zL+aaCa?9PdCnu@VoteD%qFoYx8jys2+1wG-66;vksCfEEwDDh^ z$r7D+AD0p3JyEm}0>H98-{sQJ7IgU)UsbJuhLhgyIB9Y$o(^2rA-a*1s-tl>4KHIi zs8-FE)1{t(MPL0?Q4_XJ3B~hEa?o*=-ztLyM6GLTj`a^~Z&qB&);cT8GzId{Lc3AH zn00|KCKjrhUtnKiq57+8V$&03+%~{ywtE9S)k(9T0WUbI_KVu%J7@2UmLYfBDP`KP zw5tK^C3Ts4dPO!dm%4vpx@Yu8#=rLA#ZUU}5DdevO7zO{?j!!__=&53C~xkaL5=$Q zixP_@X3uAz4JXgpsZ>UZTnq`gJ(mck$HjTmdkFuB{E&)S-#q8rg*@#7n4unjhcP>= z0t90Mn_@dMMt@v|vGG{&$m~e~@#zA{XO_?#Cbv^i;SV2j*SgL|V|gQ#;ZnauKnX1MB$-WuPziH7>aFO2CF z8$vhkYKEYJ7F6?+W`YFVol73_iaDu@K#zq&#e&w==vk}pQyHJ3qew!-&6sVq8xf2x zdVc^{+;L91x#Uxg1L&&`Q!%nXvD<;ry*Xto+0^Tfaw9y0tK7wUtRNB&6w1FLGk5v4 z{jrsB8_#aFb-Zx9uG$01a-WgLajKM&DNq->?LLFMBsTK2e53hP5!UXQioM9XhtxJX zwsy|?1DIT%A(Nh)yRw9p8#`-y^TYG`LbyTEx)FrRM;9K^*bLGB9R}wRz`!)j-^6<> zQ@b3$#z}syf)ZYTW%DkgfCXbRseyZArdYVh84IaNx&~@t;0QhpijJuZ^Jq>57o_Q2 zuaEy)!mPFYE1uzV+e0VlH0eRvuk~M-FEox8%-8@+?`-{biAF|b%NvV zYDq8@cGonF523ua7pJ%QY?()F>g{5MsFlTbn;g}Ro?e{lZ6^EQA4MqU0ILhJas9p^ zFFE$HBaUMv$2L9_%#MCNj_0v9>|C900WGq~g1wbW zTHwU5q!*KkgdzWqR0Bw0V4%e!9Nn^RXG>JMOS2S~HXEj8g%*%gzM%B{aoJegk z5Dux9ruz#zt(BgY!TVWmyp28mhE!urm#=SaZ&keos_WJ)l`(n2&mGmicMxqdMO8Y|+tjhWGVWH4BR`Gz{mSvKv4Q)2y)KS0^#CA- z=BQv!A#Ov+6AynhUFqTX>A_=?QxPIf@HYHMdtm=>Qvnumk+qj1O{Ow3CHnRoN0U{O zy20=@P%oVIcrV|cjAn)uWrPM}PT$}dr>vf;)Ynx-Vw0CBhAeRR1>fiMKWZ=b_R8o# zi&>atP8cM^tU?A&-@vsEb<96%#Ld=P`YSlnf_!7dWBGg#d=B7Tx_a$1S@s)RZ&ye9 z&?Vl>T_J3(m%|`tEH_w>hzjzQ=|NLFYQ2r!V`*InINwQS2IGb!_)`U3fTTqVmH}tC zH478`ztmFYr?Rph9!jQp%@;ac1$f>4KO`W290UZpvgJ7TF^i)8%A|eUIZnuI)V27e zc-nzeM#0ZBsOBAwW_BRF*B_ox&+=@&WzL*q30=ctF^7(o4`t~3LBm42p^n!1R(~fKK{kDkJ;?8xaRZn%eDj5c?4#QMnI1fw21CgOM)A zY){A-i%w|hZMY@MFM2g>2)Ni^y{O^6!JYq`J4@BNo%pzCncIRf ziQn=9h-H#eK-&wHN7jI&vQUKyK`p{D55D_+NYPJALVlR+XA(^I8<({d!x(;w^JdN3 z_W!LZ1jOiizU4-NmE#Ik#X(Mb-p{pwwLHj|Z%`Z<{(Ljtm8lr#*0U+5F)wZ6<8ti0U^j;UtI*R%lBGGaf}^CyYs(1n>5(uI26!iHt6{zf3nqZI!H&eC~O<5 z1IsJ^?BI%dPnuXi4*56HNS-X@h4d5(80E4dQ{2m?_HGUZU`_{!V1@(XTx@!W3N&F> z!I~4k!e)o-WE91ZatNCV*|6M$phl=L%67Nyr}Y;wI?LB=qnv8XT#v4Ie~KbgP7MF< zL@{)k$%vEFYN=7nnaSVl-3#K7Mk(4qcw??cjKb_jY`JJvlt{q67Hv^ZG}=T{48ufJ zf9qGhpV2Ix!GXK3r)IRSAvT^b6w%ei%_LKext`;07BHcDV0)S zWIaq)qbvHcEt|jkrR#FF?o9}I4Ov0#OGlxO-^XP`utUV!j$6pvg$!DUJ2}yZTn7)skimmfl^5{yrXHZ^FkF3r}|ETed;WuI^#QTdGiGcuO~9e-efRT z%YFGLannU3ADm`O#HVwg3@6ge(GrNq-Im_Vw)UWuqekISze@d zx6dR;`ITGT)x+lS!>$tSgGF;QIJT5o*2W{DN?tpL+0smyf|hu^264-fE$AX{r=Dx~ zKWF*z7-@EOAzA!L`L{(KL1S7elgI;zr^=?N@LXgKRpNsF?`xP|SC2$QCWx+6&=0U_ zKn{`dHL6<0Gce<7*1_(yk~i^j=+T!Ms|+7f#x*SwZj14#3h%i_8V#$GE7m5>XS}z$ zxj6t+XDXgaCy=GXNfuDf$~n{_Tg<9=#E}gu`kZQ9zw7ixeq(#vs?2@V22e(TQVM9$ zyII}Q56e{Xj0Q?ZaKGmvst6sGZ3)~XB0cGPCKI(`>%Le-rpt^Z47e=1+PR_No|x>c zvNNE?APXVYo?4?ie~La5C%wO_D3FBl<*4}Q9JNRVa%&(>4{RA}vzT+YOYuSUT;iOG zdqD#U;F7mrR($F-c1qs(I=`c@#Y7kXg{>TceU^67^EJ^A-^RxE)Pyzsl>Bj?&FiwX z4oV;Gs}L}Z{b~50jBRED?!NKt{x$m`%QEovZ<_~2oI(y@?{(Yv$ebcdrt}md%^C7Y z^4pbtn+$!@u-pj&hSv}?4O;V52p-DDr4eQ5bKOGhY@$+6T&qM`s)XNfsOn9+^LDaRkx+K<2O7;gQ zH4|EQ&a-P|&|5k$@#PL4PuK}8!?_hMN#6tKzgJES*P71MK62BGm>qyRAc*J&Vq_7} z$yYnASgsDgcqIB84cjL;B@&n@iBKYour*Tpx3T0-OQBCPc;daDBbxHx!(TUOZHI(tjcN!^;Cf9>gz z*%MY^p6^+HPPEz=9AgRLLBvFy4z4}4UP)ait4qj*Oyg!yR46g^?McN-l^-*TLH$J_ z2Us<8xz3K;9MpZ(#0Bm@sj3f^Pe9T_4Dg`7`qjfJ_lLnUoL$=)q}I~j6BH^F*SiB3 z=W6!e?$->zJ(g8Q@2?_$ZK0S>Bt&$b@pUlw`&ks!T>}bcp z+_G>no=h_Q0|A};kev!$b%*)~buN15`ExVEl}`r+NZ{fTJN}N}ys??Ky-nV{H-1=n ze#m>QB1a-`+2|v)$ghxnj!QF6(4Wxh7}G|q zE<#-LNs%3$(rN#gT|LyuFCXh*0Ss?KNv9F0g-_@_dFayGtU>9nPW1EIP+`n%AsuiC zgJ=RGqnXaS{oc%VCa2-2u3+t8xKn6-TRJok{}%J@x;GQePF?_?1nOrmKF_~ra_X0F z;Vsk|@koScMl{G$GH80BUw9)xMT8)P~leJ%mcqP<*OntvHX+x}29ZW5oBf)CL$#%vTvcDhm z;B9W>_A`cK2XrW~#t8IG-)mS>yWwZlWZ#T>LEhilvK>+i4CaS;Q)TqzLy%op^cI%m z-|xouA+twt#$a6fsfz!qBqjd^3hzu8f@z_qGMb<6u7v2$cw|uxh!YOAfXT$;bh0ph zsNZ<_hpH#;ly1csM^7Q5yRT@VC`jgKXyfMiBDLA@_eG->{;;5L62b;IYmjYMC6?&J zohc?igs9j0;>_~C`^!oax1S3_=)pM#9M2B}sv{kVRJ5{!alb+`@kSM8xe@~M?m3aIz5v0RPFHQL zL1#+s5nssebB~V?hZU(VSjG_^*i$3f=ze;!%UBmTF712da_{yOtS{=BGIgACX8<LsTU9jmU|rh)B4{LVsfkc z>zJHgS7?HpY6Pqs|EVtpC=8mz_RimA5h3~)!Fa7F?K*Y$;ABF#n(+;qJ19hg^rfLe zur%ReU5Hi=Jd*T?G#tfdG#>%hQ`i|F@B^lJ_2`+)`oCY45pa3~5%RWRSE0XhbHmf_ z+xb~(Qw&}HOU7@cq9>zcff*SW`J`Yg{gvV`-#GNtpWAVE9Mi>&k=LLkRC>z~JY{EO z=thY#hKR`$;&s#R<@5Ge2C8FLi{DJ|G*H2H(FRdLDw4|j>QhARaPKcHq)MtwAZ{JWiIn~8=T79g%z)X;?D)a_n*(~n8g8%RkSHvNLfp>0 z9eG-TZUvPQ4&(H0tfumSlh~h3dh5d~4CGoYB2to_Zr1{?Ix+SxA<&uRs6u={N(s!Q z<2z=`8-ssR){ig52lj| zB$&+Y#*<@ITP}8Ilo;VXqFt;h!Y2u#`~k?(XACx4Jhr)YH`2!}1E{d-uctzHspJoh7(iyGp+G>Dg>GKk0B&p^+_HP* z_ku7N1)daE84c9otUJEPbKGtvde4BOHY^&A{Xfl>wc`Hqxcw7=?+^(@z|B!f%j+Ij zJvC>dQ`B&iMx)m6pU=lfmaRQs23WrK{>cA#X=D86JSyv7)Y9F4*Dk5(2Rumd1LK)B z=YDy^rWYo23(-YPbwhVGeeIcko}>I$*=ORy$Hf}$xgc~F4`yD{kX%LSpSUk5Fm=WX zG@4@b6mE&OOkp0MQNw>+QYMWHh}4$W)ErU&Ou!>XbMk%HyuXg;kcWI4r zJV{w78hC`+8ts12>2@=;_iMQfkoZJ@-tSod6G;30 zPhg0dkx!khb~rs-9~EUDZ}8h}6+@_SiRG0mTeD8;FW zeq8TyJHa9I2bq{XNuFuky!hAA{IS8IS9AlGmpr`vu^lKN4bO3zKxWV3=Gr61#83Fb zYlFduryKHK4O@Q17i1=;t}fM|GmHt+g9fhU3_W5_oSao4M)X88&pMw?UB{-H8(m+r zwNxv?qLiGy%wQ9O0oO`+QoXi24SXljwS+8J)#I(uI=s~P?g-#C-kDSigRk;4(b2YiX^ZDBN2v%Q2+Aoj^p7`~@pSb&U(V8IU_AjU@w-fz z4YvJ@BkuK=lzqlst(1Sf{B6qXV*B-r!vhR>b~G($GnpaIrhY~E`p3$WBE2T<#_dnW zbsK^2{$|dZ$+Aqux*ik9O$>N7RPaMRIB9Ojcz7O6g|+NY&`8f5T-+zaK`;<={huZz zTwKVt2@039l$Q&X$bi=Mwk_!%pEGqvAHt0e{buxhc!5+qqcsQ=l8-si^Va+Hl-0bS zoBaH9OKx+=>xZ*1TXgQ!=Uwjq2E&i@uLmyobRds0ndV#3mBp&(Lb- z7=DVgSO#MrELhr^jL^&mXR-8&%si&EZz~wHaez!?(5~A3<1G#u0#{))z90Y*;#sgb zJVvj>8W44KJ(}#?cK`->Zs+?S@!_f4!Z8i~J6HxRpv$`B%pf|61okP+!5uB464yHf z?mF*gyrz7xl0}ZiKQMM9uaBw{?|yoDzSd#}`H`lb6bfB8y|+A){J{5!J$Vxr?)vO- zCQPSd^l>|Rd-cuMZ_~F23ZdIqmF< zUUMIqbcNwnSBJLI z_CF=LpQ>Z0OykP0IU8T@?~zdrzHQ7v^s?+N`|G^J=vUTJck=BN^{4-Ydricnsr^iC zPo76-`g?N_GQ^fia~mBQjI4Xfq?R4lmckt)cJXJgL7H z#rKNVB5*y(78u$&hxheUUr30*1+@FG&bq7US8h2KQu=rQEB z&Mz6*Kc^q3)>|ChwjNR;>JvMCZq4llk2dEo+m2FPqf5_YhC%&&##H|%c0V)QzC_pz z{|wI^Z`*p0Vk48vdRVfspmy~qdVIn9=0QX5t};~yCBYH8pg(Ue=;fe~z>%zRg$P@t zgw3={CfxQ2==_oEE)WhV-^-Lan)HyY{cV_80N+8NpJ4*0ah8Jafe?LJU}-Q!-5$X>Eg$JE4yDRwZjEP)WVjqA&GUSm0?+5DsAmeoh${Y__`x#1FF$S>kP1KVTkoOI6$N&%w;(_w z?;WY5wB@qC6hrBu7V7tb5RLfY;*8dPa)9v0TqAH~lYOU|5juM7Yrf3JWDYc@65b)_2G3|icd zUHxjRe`CRo;mAk1Y4>u+#Rgd0Gv6|L-TcjHv$UZ1)SGvd9bVvXr)HTut5ycuXobO0 zd#!#+=q-R-Bj=CLtH%6jj&&M`Sz;#RDr;Ow*Z792Cpi9aelX;^1mj1@;)E*N`{VYb z0|@}C9?obuZVIk9dYL?PI0`biG6gyKdJhFwUlpV*0lbNJ`Rg zwgTd8CaVoEkxYpQ#A~_2Y{B;iKMvSb-fNZ6Y2~?j;;@3`q7F2k-v!p+ zji7uOYksy(f7DqYQYWNUh^nY7#%?$$H(eZmT!xBp!))qc)J8oFNrjOlJOq{?zAQio znIWrILDvSPT#cgWR5oVH@%D9GVgR-K!NO z%$JfnBebsF?b?aFn)7#~L5k%5p9^KWD|rn5ufcTA?ZwUX>L#G!8n&7r=U zn$~#G;;cnAnBa3b1n*e7`axXh3h_rkC^+0lWEH5o~3K+y;sWq$G zZ`9Y6Y<6IdY0ECc&b4%|lhvyoV{?v9cjGHz%jgKXt=3$wN6~c&rcmqP1R#ShLG}nh zvF(c>k*V`;<16%+yBI%*@#fvif`Ypz+tpiTt0RN=mj}Z7=^b>3R9B0T@Rpj9PaRI3 zH2O4IzKfb6%6AcC$iGv@DW2_fI^Q}bNPUd28Q0z%NQ9gw7hNlX|3>o}qOGMirJbjX zRG%c4t33{KTz7}7Q#o<5HcxKkX!usMi~d`-yT%X0ytaUd5(9cxzmFD6-e};WG`(bn!aFKo zj_{Uzwon89vmtmCi6Dqfn}6NI&1_Jgj6vZtg3L?W+;tm4wl$=6koOLD8I`zUgl4m= zA302a`>~4NQ1Wj{GN6_q3TfZeO?tDIaCu~4O`|c9&+}CckF^M3jE z>*nk+t?3V3x1gFoUP)+t>}n{v86#Qxwa##9JfN^x8lv&$!poe<4 z71!;zuFpCfveYZbRV!4BKS1`o$J5fYOE+$qc8aTNe}93rMMYPtjnI-t`$#ia4Xua^ zCY6sOW&z(j#e$-(S;Ky{z{`qv3uy$LQ(v#IpKH3Q|Hq;nH=9{zB> z)hR}!h~t$rS`Th9W|Wr8lKs+wJE6K5z#%RC00*^Q6NzqK!VX_hSqxTO01}U#ma>9kEp`+oQ^K4I$g) zxl~V2r=|A6W$@yfHXaN1Iaehy7qmHPsQV-`aA~obg7}Hza>D~UR_{HRWwR%W0aZik zTUh9BJLrEt_Wz8vg5jT*iIGpFjwYB<0ML-IssDO7l)uBLqocI%%?;iN+bd1ucC`t{ zj|YBr0>~Apv8cSHHBDhB1&z16dm}qf;0qp6LO-%du%EBKFB$%2Zg}nfzr%$aCISUg zrgympzdor-u^<9i?CQ(VU>hA-mu)B3zMM0HLa5p`u#}!#`r6G3^F1o(^;OE#>o-Zo zZ@ihF>TXN;+Lf{WPFm;J9}hVSoqK3BDIgRJY?sV+#&7LC9sjwjJwj&?Mek*yG-RZn zl&;&!;oK@wx^BnHo2Fe?F2$kyr~72Ffs|Q#P;rOm<)^J{TAQP><2JS1+pk(^?g#{p z2`c2|nks9Y(|6h4%2L{>M>je#Zr0G-e2l>Xv;x4?3{0>PmyyU50Xg)Dl`!;5siC z6?`+ye4>~RWl3pub0#9*GUV4TPxn3U?_@WWh$skHL!?1^c8C1bJ%*I{RR+BKVS(v} zBOv2ow@HvmvxdSx^m5nx45mVLlB+xk?Db~2p=*-+{)lQ#dl020@7VXAFCB;aiIf_# zt6P_R{Hs09t2H0oF|b?eXtzQ=lh2PAW1TiwAvEj%o*ovIAc~C$7Zq|j(~t^h*JAYg ze{bV2>N&iPG5LjF0w!ER~);ZbAIo^1d%`PB)<2dEjZcAdj^| zhP;|RzIhBauo@w;!iaR`hoy5LtJT(#-`Yo$IP%5o(y*tmN600pmsj3vKG%Tt97f%W z?C_k4A3bE+SZ|I$D94Ny-J(qfE=HGqz8Vv+BJeZB>ANi0ec6JZAlPlJCm~z<#Q*@z zl#&bUfmgDYLqwZhW|#qE_45@-9ofLG)>#U6VYA&LFA%1ijYHRc2`A>&d7gb%=QZEjc#)W6~znMFF6Yz%WJ30gmje(H5<-I9{M1>QQKlb71&^WE6Ga<^_DNhblf$0W#<+S#bO$~@tF9kx6=v6tz0AjKYKyXhP6E$Nf3mw;mMj;69RmoyLO z?GM*Vv3FvpwnuSIuy@`Vsy^teO8Di3j6-*r{MPfya4Ao83 z(wc5BIR6=-6W2X3tX^zO_JtxX7KonnupzS&49n+yNNk}~8yb`)<0#Sw`Gg|CV=31j6=NN~+WPbUlKpX?G47N2`R(yPpr+n^z5YSj}&urt1(7>5tA!`2>9=_QD9Iox4csUs10GU3QDYEV9o_dPjcpgrFJ!IuB^Do8t*@cL(xKnG4zU zX>rmBeR#~ir8AdKQwY={acbJ!wd$qXH4F!lwfcRV&oJe>{as#88l&CSf9Nu{!de8~5XR07Dd#Q9@(O|B61B@UGDOt4ih>GI<>;6+Ds zQ`dbjgcmnsc4v3(?}DwCJ?mD?0zk~#^@dBSswqAtuQ9q@2CzQTWW~l%xBU-q?XJj-<&qbQCrjyiLh}J~D;k?_*54h8#XuP(A z!uqobk>gy$DPu@>@5!k_S-)uh!p;90_w=F99%j!;X?CFt8MhpBqSxg*b08E1ycEc9 zZ*>Cu&Lz~UGo!;p=s|2ZRUX1BApMN*bpO^K-($P9_EZ%gywg-B*h+rbHGA|8wV4UN zX6YNWB(|+`t_%Q0jF(O_H*e8Sy(4}zQZFhCn`NNK$Dwv7($ZbemA%wt@c~|<_q8$= zaFCjM(x&s1jMPZJP=L^r*;R!vm#1=Y@x%wu{oq-%rHr3vv31mnzyP zD|&3K zmnF(C>;1@Ib2ceeifQ&sN;T+vb7ZsEQ^ZvG7TN|AAYWo5EIK8%I<+Anf#7_FFpAF) zc#-N`HWMBGloilgMTzZ?w?uqeL)Qirp0(|TjId&tZ0xnx<*HBSw&hd0!w69j>mB{Y ze|PL$DQnzGmE=Lo)Q-F60!a*D^OMeL9YqRyJqWfmyM|#$4rR-hZ=T$~sl$%>ZtfOQ zXyiaZ`i{tF1O%TNpmcjiGO@Ihhgj2-vHKZ=pyA#L#!V1lwb{-4?~0;6ZOZL)8>o9G z9@bT|n4LjJG^)ZqzC$x-nlhi8>Qx(|fXy`(aj$uIU|*Q3CtzBu)o3Wr_cFZPz~3#2 zAq=p}W@6<43n~VNen-sQNf^<-j%4#vA_}!{K*r`{- zlwIyzpAL-pJdAYMzRkp*kKbMcyd9|ZKF}MCimG89WWvWDf zE96B#b|$enOPe`Je14dU`p@ZzY}x}N^_`TJpN~M!6~=2%ai#jUWkDKK4=x2~nZ*uY389Tj!3w-$+_q{Ur{f(@82XtBR~uu! z^v^BRJm7GbXb#QLRDOOb$zD3duj5>;+cv9*A5a}jGtiXo#KiKK#zqL^vg7aBu=Ex2 z38MS={e80Vq=sMm`C{GDLW@^GK1ZNwIt@|f6J2FdtWMmC#as&de|8a40YE73)7HeT*jxt+p5x zJu_p+#w()LdnO}4P1hdI+vl)S^J?oeaNlnRYvah@Y{r*OT52yExBn1 z3@@P4Q^{88u5w8udZk3o^z*eF2%R*HNFx@aX;&54%ZTPPMnop zG{sV3Q&}jNpl^s-ZflCtIri$iWWK0!lj(5(h&!b^mWT`SXHbB6>uE0Xo!_F`|Cx6T zQNX_17!3LAEzYt!7$<_6jN9V{P+kmeHd`5rEco_4$Y5lYMLwQ;1Jbhtut&~bMyTBE z1J4#Z<3ROo-Q)){vdZezq5!Oq5=CF@Jktxp?K8{^A*PRkIe?uz^N9X!a2KFDbQi$y z{WvRLZ;Lch4(-lyJHE}@?*oy=Bt2A*-azaaezju1GoeTitsYzNh<{W4Lnfz^EwrIq zzZKq2VCXDd=C|fX-wFz&v=^(SbO1v=PT}4MC}DJNntZTmQqxt}R9-!YV8+xN`KeR# z()Pz~xYA?;&k2UXJl*Pd=6le1?7+edBq1HIDB)V&NcARxGfAZ_d3sYI6eHs>C4tsk zc9XCF@Kew|L+D2754{$8qM1-*)hl@>F`ve>B?OKJ&lC?0Fj8XgN55aBcgegDR13~j z>(XijHwiI&vnKzMhmKkE0=^uETqk#4UXNk_+nnz?$d+1dsI*e@&YJ=^lHwZ;wk%wb zE}fcjE6Q$-iMEa_+?1f6;prR;oAzhWWiI1Ipps2^QaDZjp2VqXj1}&2ozD?`UGDPv zy06dDv_7&}dy3GzD%rx#+3~oVYi+O zQ`+s0wsc363~&y({WQs947BKkWHe11QmbLXmd1`4!WvMav$$`)W{LpMY)Do)YP!0t*3jz`sG6&eSY;Zhsgvi?bW_7dc|*Jft8hxC!tAwS3xk?FR@RGoM}r=Tw~c&7W1xt=vj+hB&pO z*bXEK&`3j5oS%-}iDrJR@InF7xHBRy;A&Fk=MppGMwEBH%E2 zEgi&J<6TkGBlZt9Dl}QE8`j~eGIQ!A92tY`GV=5ykNS_rU4$(egV3!8)<4d8>A(G9 z{x9SJ*X)wh9&P+I0_wLB9FG#-RRaVv4ME+T?>|hK@QvXM7goM*g_mZ9L=P-JV)5W0 zi~_yPSPge?~6k4+sJoW*N29LB*!n}mLC!Cmf6?pt>OL&w(>1VQ^ zg02XUL!$W`1C1G^y1f`1cj0$%+|q@Wue627CT)a~1zBj81jEH+$6t8-;>ybPRtRa5 zwyQs_zl0>Ci()xa2EAa3%40cw@xobmH>pJQ<5@m)$FX#Red{~Y>g&_>FE~9)zoE31 z`qPQ)if2Nt&2!_IZrh$d8PofR5OU!Kff2epQDkdb)Z5s$9oY<8>hU{kGmo!ZIpjR| zxT=N9B*ue~5nB6NE9G7AF6s+oyxL#4eCSwM+HbHK~EuaO49uj0|iL^EjYrnz_4!ibfGwZvm>S{5f2lc_EIt$`D8_n}mFbyF#&U$g0 z-=7>kY5Fgg&YyKb=o#C#>43btjCOt=CbT!V2NysH^vr1Ix`F7TS(V1pnfeR(@fAy~ z&8y~r%vyfoG=nflSEle=DrF)VWM9lne zEEAr!wCehoRgv1<97Yx3+08Cm=qxA6WVG>{U|@4g%X$v1uif#uQyTkB)o*5Q3H$3t zFbV~Uii)rGN(b(ukO{B>2K?y~F0tKTiUXyIe5ATX#P!|us#(qT-I*Gwvhb zBKr@4poqAGy%!BHxnEtj{`cVMr1tE3Q28tX4wW6zTyQt3c);gJwv+OZh={o#^;e7_1^As$aT<-8s=U=ae;p3HHBM>JCUib|MHeSPV| z**~?pTg2lnH~g103u*ryPp;eYP4Ztwep3cxL3rFZn@#<`qN4J|Xq%+^&O~h2C1##s zob8#-I(M%zwEcyY^ThEiU{f2yecb02f0I!t={_ls-fr7_rFEVTyTp9 zc$>v^C;^4WF~4WxF}F9Te=R_B{e%-se0$)r)taXWpfV4)PowNm#k(P~_W4&BFaGMx zKUWR!n+-BoCSuN185GgXHSSyBnMG-w*63sPDFp6#{G0<1e6&`Ah3i(gYrd-mtu~H< zK?OzxRe>!`t}qybh{B=;mCupx(`+gEIhxyr40cg92uFPuiTr%F^gPDk{at${XC{U* zXTGtc^i&pCkBrKrNi<5@3lzpMNw)QzQgP8$RaHNKv9-y;yJifTLSQ8NE0rf`Ce~&X z#w_jZzyJPOm_@<-VeaOCEkTWCysxnA-U~Pgjtq9>`mk6CHMKg<`Q??BH&us(x&=5l zFTbrPB4+eCI~vnq+)I2V60)?u2tG;>gi;G(s1sF=)*VlwkA-1mh`rfsyK&=2ciH?| z4?-|csIRZNkkU+q{YgimQb@BY`wPh?a=U$$* zmIPZy!*}3{!B$nBCiwjb2dJk3+U*R3q_kMf@(7KkAKtjlxsvzavg4*9e}Ow|($0G& ztr`TM#iL)!^qcpp;H|Cajcq+X9glf~&@eSV#LyX@W~l77HC;b|-%kKn!w3o%6g{jE z(bs(te1N{~&)CQjWq6g30y^aJb(=m-iu(lb)`UsY7-2)jL1NF;IO9^=%%%AQjXFv{ z%F7*}v0Cu5c#QQdyBfiKsLbpnlzCYiX)Pswi7|AKGKcfJZ9D=C!S=`FPM`jC7K1H) zWv2mx0eD1n9gzt({fPHpxA0Zj?Yv?V3=m!hq}|k=)bnvZ#y^E`1Aumx(_*rmup4&| zAq;1dWjD$VVpJ<3@y}64#&VTD=4WNDwA`)?f6@G(8JN+oPBU{`tW57B{D&P|eLriw zSN7>0+f`ogwn}}+GfbE)`>%;He(sJP=~H0r-yJw|?XShmwO1a>>L>_Kex`Toz1mwr zd+l2Jh84pmXZ-Y!d-seTh*qw7s86f>vIu=sM7VfOjsNZb!ykW4I2CR?Qf_X7cO+Be zv@W3mtFj`1Xi!GOLR!BF6XraaIOQA4=E@fb@^sY~CVgwqkXV{E)W$_M`9--Q6$+dS zqgObF8fGp@zm{9HT(cJ4D<1dGQJRQ}M#hv@(o&0n6P4r`)1aKc(3!8y)Aq6mTR7#V9h<*Soy=!(^JdT+(V;DHQP;s6QDq(GJyW1C)RDc zMd_mcQGD?(=2U{A+~jI|xJ2{BI9>|Ae2OAyShMVa5o^AWN&H?N_1}7PiPZLOt_PwBXtzDk!VmkLYmq; zI0sYQb2xsOQ7&^B68`>)pzeG%P#RZj4ULFtaO$LsKZ!>BQOYs|36mc-TlWG$yi{v< zm?-cn%G1Wek%cXR!g(}7uk1S$rm3@#qbJhRmeyfalrs8zMCjpjEeBuZw%nL13o>PAG# zYB6YqGvsW>l>t${@C9f3-m-;r{veA$WFBuu7W306uejzrWg*PEZ89$0# zfm#y4{&v`T=T-I~bEC>^VL2H3!F2Gbyezvr>@bjFB)I&}g|n84nRgtIP7aR*g;~U= zt{*kv5x)+0kg%2_UyZ_ev<(cu%`O0L|IVWJQkLLYDTI?@6~t#{GETWy@lFi1O;|N9 zw(u|OVC%-bx55-sOx&#K5~S$F@Y$eNX`9k$>=cWct31&`F(%#nLFpMgw)-z}a+(C@ z4rzGG--!z!l)z`dbe0gPCjpK}Sa)8$e#PLA!O&5{subVcc;!cJ%#FiN)YauI5;BY_ zn{gqbgSN31ZP;-LBlCi^zZ(C5&(_uk*%TXSsZ4^=uD&v5WnKc|VZGm{*OaI-XtkM_ z9jd!1gFL@R4N@_Qd*{rl_dMqcjWpXpY3XREXMFB+Q%gcEy@6ucME2WG96Zv)tzKBy zF*(NYxc;YzIc)G|Qyzp4#sLvXzQ5M5IYap^Gqq*C4c|uV{M^*3CFp8jg_nyq2b%P- z)ITj=F!KR^FO-))1d?R~R+&BTBqB|f)7DOJo67HJ1Yb(?&jjdtCBhRDKi8SHWa0Jy z5@XDz%g@}d`_M8&nWk`?MNCG-N*>fydkTHiE8_(%0WXo=(`;qLBG73)sR@1cUhOg1d56UVgd3Oi4EOGD^yuT0WyZZJpyQxM=L@mkibV zzXToOp(}NDG%sK<0cVaFx#GX5#p5^zbtjthuG*j8sCkA_v;NE4dR7ZXXzOFx{PX}! z^1cnrkNJ!=VVRHBMr8~zCo2!1CgZMAu|@fzIxG{{Hy_`cvRNiLaozI4cQRa7VD6p- zHS;ieTc^U^fj@kA+<0A`ztu~`#vXq7!`q`RY6ean1;8$8uw@cM2lTsx?MG;$G1cQ5 z^RgE^BH1@SLJe(6YSu*H5`(|o`a+PTDp!@tQG-*qDF<$(tz| z)nCIvTev`;%D8K8 z^b{P+r+ZrcRp~SFr_h)O4M{07JopqCPpP@ z+5f|9AIDE+3_c2Szg7L6Vi28ZBmNb{1mD3(C1OO6GLHy?{(VCaUiUc6v+|jhL*%!C zr*+8Ol~(K5_vbPAt}uD!jXp*TxBIs(6W8~KRl^Tu+uhe`&to|Du@x;o792V{DGkw4 zBF65*mrrO{XmS9goKyNA^TacPLsuLMcL_t+AkZMzfQGW&2Qkqj4XD5nU(Y{L4_dk` zQ{Yg8uSKT5Gz@|5QK|nx)oxKfnfD2#@Cz{tSvMIzGM64%?`;~V_td}3`lj+*yCOmh zjR`J{s5sHp;vTZ=^2G~g-zOrby{C+v+e8pZEE|?c8@Bs=8(7Ybsy$wAPQVyz_kTWRqW=GiLtOmYA2lPtU5$yG~rnqcUP{d>PtN zy@VCUT2^BX;qIfVrq)DtyC;SWW@Mk z@SilhH7Uk@7@gzivczyf;^}9N@t&pi1t{dy@9fV9mEi6JPs zBw*o7Cq6>I_$PVUWe(PF^So^2%vd};vW#{wPMS5UgZBCcFOGkL8sFw}1zdp!nvPc8 zHuHIW3BLqRz^%%X#vv~hL{xI;E-P@OUR(n_n8v2fiZG|`zWd=O_W%*_ z<|sCSXhBF1>=ISy>g-4^F-tM^0mnW0XYx%-5Vw`f1c&KJIh{=_hRQ!6>+Lv-VvP3Icj>(twF5zsd^Btja!p$Dri8Rl^U*to{`k`7&Dck6yiE*C1?+WU0Ui zAEvKrv)aBJF13%3*=Q5LUB1`lGExL=*6}1+PVOs@}EtVpbff z^;2LY?JiWMT(iZnLrz-ltxxGJu>*HUFfG+{q)}W~AEx06L z;Y+%~;huWeEE(~8^_}x?c!~E#5eO;AcPJ~u(C zWp~WJAC3Q9qw%wGg~9}b9tc>=hd{&K_zr9o0rPb~j$e=?s&y zFAW$l#AwUMd-rl?GFPA28LN3}-HM^NKDT1X1?wIeI%31)NAI)Yp2JGOW+3LKC~1~4 z%V|w(-e%p~tbJF7f`EhpeaIsL;H9;L_k8ZLqlU0Y{!G07Z;BrhTZOIWA3aK)xjlQv zPGS0#sx7rF4*&-%eC$S$gaQjfjziG&^A0=q$tA*-`q0%z7>d!=LJH-#QZ=*eP1ZIA zI0i91MMNI0MR}SMBR#X)C4s!Zg1NA#OoMz<)+i*X1t z)6A^jB2kZ<{Eb%EZ&gNXS9y6T+yImVq$L7z$-M0SHhY<0R9=gp*2?L`J8jMKb?hg3 z`$^0;7c-52>vdmg9u(zIK7Lt84~KFnJi7a9>4jRN2AKMcN!zpyleS^}5Mh1OyqA@N zv84U-y3Lq_Km6t%<@C=mGFejS-=~E$@|CcBve8Xu@9{UschXlyPgIF)p~5M!BOzdY zcd+41$|VZrI4`L|gm1YWLxhCCl5oA&`OK*kFCHX#t4$^ie;USL+?QEU354T~3E|67*(eoU-qm8j)DmKfp(VuD^K-xg`j`EYBoz!qLut($so z03VlW-0%_71uOzA*2~$oH*@_?&60O!w@Ad)O>bMMqRWqAggPzV8!%$+ z>>WG(9}#^_<3zA*H$1!Ch=u~B(TKTgKX-Nq>(qKhe9=Su^zu&cRfZKR7A)uVh8Kgq zdp?dPeV4+lKDT`6H`lKmcHV|ngDW^3<`fwJXE&@G`sww{hklXw99Ev33L%g_yD3b*PN#qwa`@xq>an(+(1-;&6n%hVYJy| zWsynSz#P9iv~spj_%A74EO2ju5bHKctZjXNvEuZL4--!Ln7XVc?Pjv4BXG(rplEpl z@6yEtPPzBqIrT8WmyLj7#8+W}V_~`MnALrY7yIKwD3XFg87WUI?NSMUj{7GxKmDvn zD~zK#pP?VNO=_dsN~E*_)cH9x{@CL!GkFkC{9b8UPkl<|^WFsa;roCBAz%%xjE{hh z|0IJRC?B|djjy5#gbl)F@Vco(b&2vjZrOQg+^Ubuv?;u8?XPQ>blfEiW`74o;0{^7 zlmo>Ar^4j1B^%w_^;@u_rgoOA98E6+73!IF9X#SQ7y>qEOo01vTwsP4U(nPgAdKI z*`$TIn0;gxj=Kf|hKht!D5?MoQgl4@-hY2n2Y_o*Hk~Zn)~Nnme}jCJwwc=-vq9n| zHFJ>(4a~Xg;DR31GX_i3ne1e}s}8kDqvek}`y0Js0~1jCgdN7Gf{Mg$Q9508b%Q}0 z{8($@aAjjUE;Q!D9A+Dn!4+!r5g{{EIW)s{VC3P2Kh`aVJVOjsH%w%r+G59&?PK7^ zCc;?C7)bSIJ>PZjx$D-SzNk_>KmI({B4DE>0V0mGB!EkAxFkRb*r+P=#ZjV-U}zpT zkG+(irAzk+FOBL~Ve2CtbKATvE!7szdoynm1N)r;O;`y|jp+p|2SRhnXsvN8yCNxW zAMmJ#V+&$qzY?_(Iv{?2wf zvrAxC(TLQu6t=SCPccu>J&3o~0^;%9S&G^sm z>0RSmAO%=z9S?!S!BQ}N;PGoNSO1mD;HYu(PX98P?R4%|GFsx2jk*13a*uo>g}2HM zWo0m{{%E*`0Y;_}2nA2NaH|*Z@ae)^*ng01R@J061eI@dl}F4mR^t8ANWy4bKuBgz z9nxO3wP;&kYv1aN2&>|jLv$;&g-Y0xpaH@b++8*EHEHLXz_uI$@r3Yqog4KGK*$rG zZ48M6IkA0`o{`54bltc2q7E}qn6G$Un_Iyc;w=Y4ci;Aw)A4j+wei~IPVzQbWi-7U z=l%LqOv}JNPlv~#0`K=(swE~!E4ERYQL$BLqbuK}%?u;$n|I-{tSx*>$CF!yFrItb zZpgSjU1L(hmBocuTR)m{c@TsJQj4N%^i!)3xAtw*=cGiv2+w#gv6FE+81;?jH z!bTXk-Ot_<72)`ax{cgpK`_G#XFe?ncmwpl!S5xUlQ-X7S;u38M`2J6r|bjx%1^7G zUAk<&Dm$!0M9$pRRO7_fiNej&k6G5LtV|K#YAk`S{td1yQ};QI=pxMEmjQd9Gfz7_ z$MV~xd9S+enmczVpsdaG zbgz$|Yp0M&8~s)p`DuClHeSde7>*fZF30^kr)vo$>Q7Obxq2vI?2^k)QoM%`hCxw> zF{xM}|4WrxrmuTTEVrb#RKNrWK(I8>ps#WT^v&8i>!KR`GAo*o@@YaW`rS-67`w}0 zMBJEvEq>I1B3{OXNEEqR@V3z^ z<)S7qDx(EfR$gveQ-nH=aA)8cIPx?%MTWZ`yjR0uCx%EiuFnl}CT+vEU1yy#_Ff-q z`xx7oqS3vHcO^$*<4~4J9=5sezgqh=_|rGU;w=5q$H3Hc=9%YC7*NEEc~3_IERJWQ zr0$+sFKL)2?X`0t%)(zgc5t3M7`NB=LGbbNcJaH&)C5lQRV9>9K)6lMxx?@!V=|=; zt~*8Vooe&fQCoR`7`x0kjV>NXD0wE~@BLTvmU*CM0q!kBrvTFrGbS2L-zK$bCzSCe z%|TtJPeJfO-B(v%UMxl+ulxvk&LJV(fLK`Pzf>1jGnj>sTmP%M4_%3CR|mc|V-yya zaz*&f!A_{q*Psd<389d+wtn>C{WxLKA4j#zp=knLV{AAeXaKM_sL9n7pKg?DeJ;MrUwtA5c z@8~!uF{RcnoVXZfYTF+Xfk1nDVfxpF%*cd;RphB5>G=Le z`+~7|3Qvgr#L6~iVe&JhLkF;v)t1f(3d+~kWtkJ0{D&c3a>g@3>Vo0gxj;LLvTkS@ z<5TPYBSv|g=_mX(5+Zc66+&V|G(uo)EF&=S!y{%J@~I#&X*FIYK#Adz=1%RZ;3A3? zBhhuw(huS9)Cm_Ki+J*J1TZn3r1td(ew@G*(pnu;2kiJ|%c(C+;& zuSP)D<+?8#L65so5OR;w<7hnUHs>u54%b9W!Wg~G4zOcK;a5P`V&XA4b>c-J1F&7aKJeV`)=UQ)4p*%WPZf;-GwU(Q5-UM z&GS3w&0Ngxg5%k2a6T36z{f{*3x5fix~wb}99(rKzw@0uyU>J|vuaq#Q(PX^14ph| z?E3B}F=cOIO;`O6iB`+myWeyW0*vPj>-%w*1~y_FEkEqIC+-d8%^n_HBT2gGS2)3o zUTkO@56>`nJ|#lH1!Z(%I3WdC(I3(SYksv8cFg!0pJTj_M55IW-jFzZ-a}#pV7z2kx7Pl#D^x;Q z^*$Iq;cExdBqGEa-bl;%@43$@ZTd$X@ErsLO+Xtx z3Fm3fq|VTJSd5R09RWCPCQ8a;+2Z3d6U0U{Mv~MM&K(SNp3lDiB3zcKFP%5*b^^bA z7yOA9(QO^M>cttCOGPIb3|m5hu)%4ylhrq3?3CdyT`=n=HyV_pEs-rnBpqj0N z)ONAz$rq^fdskiB1}4VR|HDsK<0Ltp?OUrk&_#pUghAa^^}c7y)gE)!%=`9<=~TK$ zGpWR}{YO6asA1+s*qYWF91X>3EkS4AVFovnu3I0Rj|8eUK~YfF!xj3}d3ez+5G4jP zgbIq^z}T6=7ex}Zo|`b7m=4d$_}QmXE+2;YJl6CB;2za8-_1Nqkpm*(A+9FtoO0fIJrID;i|Q9#(M*kZ#?_p`^^|FU;G4GQ_W|bd2-shRpTOM^ zn4S?q*RiGnME&GQd+3<-j`tMxyLZmN?j^X}L($$|5fGhv2M_bVAwcNlG<_HIs^Z8< zl}8M+2kFBb5KO_HZ9@!!Ah_gnk+WaKWII*0@`^}vcKJROIxPyGg87@kD}MIn-pTlb zD%fh})@tCniG#6v8k07!bqXN7&3nl(sXuY%$IcH9`=Xjj-W5M%akj zuc&I_81`A`m#}2!2!g8{oMzzZ2d8pcndV-7$nf1iEt)@jgWmD+t$;(LS>reP&5{7= z`$eb1IixdV!nQNz)ZdSg^zQ~d$^p>7)C$TfTMg;VrM$x9*fZJH;y4!@xYBcm41uDu zuVj0cJfLzdUOH2U&13pQeM@hj{Uel$n^_hUEWr(DOemsZoxcnodq_m8I^8E*U-WGm zyIC?>eCtpDfgATk>J;#0f7f<8HDizje&y*3Je9w_m-am04^tm-+*2>4du319w!^=f z{o%woF_L7r3BJ96Y@RN|!l_qQ=Em@3zp4Lm<32j*qpM#Qtc$VFym@tvpA>80=1P@$ zOJ?veFpTrfNYhb0&?~Knhhaw}?a(kP1~FT`E*^pT%WU-`ttOjcKrn0_t*DPqglPHW zIHgn}VrFKDD53}{fm+U2n?yKxXNVQwMY}AKjc+TNz0qg|FQ16H}!?V zT73VhEl2eIo5>%U^sVIp-u}GCJC_7Im3UNnWu?Za+LYgK|vbp*nLBj#^%!gI}X z0O`$TnXFspE<;!%2#yyQV6nvP_2hG#zeCYuB7ss{AZ6nEoibW@S;2w5b{lLu@|Yz7 zxQp>|$hDsrg5e+L|AK9&ZdJ4tKy`#=O(ges>Vl%-(Mj+85o3d0Eg_&HAAyAuj0)i~ zghq$|29;5kr3+`B4O4%Rei+4rIYD3`??f29PcVQA(P2^QX`0%T+t$)e9UfwF%zfGNW>F5(NBF6$`yQQix&&b zjse?-TxM>7WzM8+Osni)*X{$820=vM)@%-t7O@1f34JkvhANn0dI-KBF~ZOpj`iv4 zO4AR^sB>n1ApLAK9!&5W3iv?p7dG#2>I$`Eb4&jXbNA{}hw^wo!_bi)^wNtpTMjTx zR!&!N>)%@@t2WWLO6oWg;X^US70BqHHL>^~RgN7C)*+Gj?Fd`$D+mO74MS1HOV2QL zJ+POBA#-y!xYN>&`l$d;Y(|Y=q(e`_`Cj351dmGFfX8W5p%Y&X@hCJBUN%*~jlIa` z%mdp7+orC3Q?DJXftTnZXEg!<#qpxn(9kL$%0mY)I|rmRw3XFbNDwUXFEJ<Vc~(81JsOw3nfWqpxwRcxH~y^z2}kWHDT)oBzgyKZXfIM*`Im z3$+|)M*gJI#MXq+8Z$fpPtzJN7gl!;kI4GZ65DwpWh{ZP-r3d%g0NU=(wX#PX6H6h z2Fk*kZD#9*D%cV>-528$gHbu7CtiFCTGDU`t6>TdRM*dBAIW6u@1RenUF~T<)(kO` zdqrL-R!vtpSi&DW=c^v8Y3aTlSA&^8aK34o3;ATZ$Pz7g4?mUpt9%fEON!@4I~7ip zrb+q6JKx|Lq9~k=ojCndcnDXI;#JPz)c~B0m%}xPHv7KH8?@y#EvAUldi9Ivq=-6q`VyV-Q;rP-qV<-^;A9t z%G1M52S@5beh65rkpb`zAf9^h(b#Bv=0hbjfG!{Ju4(FyD zzWe)zKt`buM^lvE2|e9|e}$_dWRN8*3Vs7la>U2tamnp-{z=ZC7#JJNQ$Qq5vn}(N zV<$|zP&jE19);rw*JccjnK)wxvwe0Bk3{&U_my=>E(Mi=Ri+NyFF-}2-C zvh70_jT+hbDIU-n9CxTDLc5Na?}ElM8CwnOx@Qy!i3Ks1ip-@7*`6pJ#@!Y@+wEU4 zbj20-1I)sl)V7|u;va*lUxJ8JLm(OrCbK5I+Z*-{uaG&Rx#EkFgTigwclc8Wj#&Fi zb`iWNSei4|nOM8C8$wnqm+{TweI*2)zxvZ~(NLFXroo1CjhZxVf?E?@3laPPlT@9B zIhb>>V`zy}moAv|kR}u6CnNPm#7rwn=R+(?wU?Yc1mdSSFMh_-`PZ#(sIL8Yc#Zxn z9!p*<^BGK%c^fxJlWH%!h_@zn`%7y%B_UjCsLZn2sW?3IjZ=xXJNdzzL64HpnzhuH zv$yyL)9znU9OOx@=vE>%lZo2(W4436CZfp4z`Ml)q8I9?>F>hXe?^{x*iy_ z{)=@J%`E-_yl)?ZC|}0@*_*1X-3&M}m++(;F)P)_M+4LU!lVoKj0eeYBY=IKwPgbN z)SUplPpO(e>yP=crEb$kJp3z1Pn`AOVA9@McW}sL8P_3i zdq(M7zPwu*w`cR-2yT3X@FcyKi4*}43qo32 z@O|Ny!75X2PHXf1L1s5C04*<@@iVZxtjZjw7o7!uW<0f7m*$4b7&~F=*@@D! z-_+OD){L4k{o-W{XZ#Vl^a0tdzB&aF&UoHccybZzhjDL7oxJMa}s?+oH@ zv@@d-zS?X{F%xKwS6ZbQQUBxC%zWmtVRvt@@vkc@@lH`ab@d3F;W)Ent(euno66FB z^Giy#N?cSv^y> zu{Q>z53mY%GD^sVxECKO;=lmS5rt$)X)KklpS^Vcte-QXJw9e6OC+r(R4W?ZzD%Pt z3H%@`f~h)5!20wb#mvuRkNJA`2k*1bk%qW5Zb8#uhCqs0!@-@KAZlM@x+rX=!okKf zL0W6k)t2Bk&E3)8A@g#xqfei90&M(K5*#SQ93tZfb+?F_D`s@BH%<)u+hzq-&W;`X z#T7b~cF)n)D6+_0t240oNN4NM=J%BlOid;YsCHezLuut@`vVy3fMjXeWRy$Ukcn`{ zAtbJg44(J}MEP7dm^L?&jr|S5;dx6;MW7V|G@S?rQqC!8`Uqdi{Ut2HL{ktXBL6t| zjT=9GL={XX!m;W!4r2WFlbP6fwqLD)860Yv0UDk{`kn`q9|vQ<0h{BJ_x0NOG(^vs zX~=J`DO0ADZmQe!D3s(=vsw27)>`{g2IWiGjF>rF8(Et_T^aJ~H~IN3h8`G`w&U!F z<;QR<+O7AE)~HPe{zOFCK>&OJD+C9Isw(4$pnf(Q|6&%x59;`BjFpe6OfGdOixVnQ z8AYV(p)I{uX_|*rN;T`?AP#p*dxTFDguu% zbH>v@{676ADMM2{hNTqmZ#8sCjCt|A%DeI3nZek+K5CC}+~O6rE29_}xTE|eRbNZm zpeM^ea|u*49ygdDN^!9Br=g&#tc2lot+x>CniV^&yKD}Z`5s8i9D1i<_F=qOfOZp#tqjhquNJ&xXU-=oZIl@$a@)faaOv;d|DeP9+ zo5?K3$C`tWauG&O+q`)*+Qroz|LBN-+$J|98#92}>v zU-sL|?c@_aa!J<41zHmg!9l2^0M=Sc+{E7H&m#2P zjW#{d;2MHJIaoAed*f|zIro>pxxMVkF%zc#o8k5b9hCWa-_3Q-2k^>2DjqK})(YAh z){TpV(abaTPXtcs7ptnSsg;mLosGte6@UWQ{~(NqxaKLdElT^=u}!~_-`@OttA&Bn zdB7!qIRz6bF}wGR{YmK~fhjMmm~Y@N`>nfu^u(z$Ys1hLC`ekcJ1EMdHX0lXMtTOU zkBk^B)&ub32LIG^`ec@+bZ@V4oxjp4b>oX`)2Sb!Y45{s5QEQXSlp_#+D=LT1+Uy? zg$Z0n8@8^Af_rWcKU1qEfmbUC0gI6#2ge*9S-Z#C^|RpM55V$p!r;r=6XMa0?ZrI! zK6w0oI<{#I8td!OiZ>J@hQ93Q{KrI?8w?{?R2SwXRs?kvAw+oOaGN#BWb6x7oQ+-x zkGT#M@kUISzIl9RVHG=!;c=0rLoHQf$5SfhOe`(!xldhPjn+K~;+NJgo=>pz;S-qQ zA7EyQtI8`^AYejBDyNYWG5#hQYjv7diALW#ykJwN6J8=T*+(RK&2U819GWBUpNZR;K z$6e-Z31;(v1M0GqG1YxPY>6@7Gf;G)oimxF`7w6RGeJRk3RCg8e{#rSd)}>7R#Vb_ zC$1ZqILan-sm!u%9!)CB)3hFs0K!e5i3q4|W5>@R?zVGkbdZM$^9q4mq_yLSC>dhi zG=FQq#YjXzN%KnxwUQz%W$;8!9_;N*Pt*fxX!>sNrSjC~YP1qOd?!FWE5V}+yeY>u zgj?ErI$fuIs6An@BOuscAd=N+ka-em#faq8meKlqRIi8iMOyw-9H?;o;@hu(woso1 zC-rF==Tu|T8HtnJJ<12cqjL;1%8LX92N`f!JW={Nwh^6$ruk*QKT8{rk_f3ZBsO&e z7f)j>olV_-(B8p^mo2>Jb=6&HyyO^|P8Bo!gTY4u94E>eYUXwIC4Y@N>YHQt@?+$w zqHg(Q;2a*VPcu$1V$7^nzewL~eAu`I(7At-Pf5&<3vLD~M!&5?IMMx>2{R7iOJ^6T zG-Z%Z-pX=7xX!&z)8*k2 zC`Nch(5u>`wBPZH=++V*ZTnmNrU@;+YvqM?6lA$(Y9^JsmvJwgwn&&QOw-x)@9OJo z9$=dweh-XgNxx|wHIjv~$3Ft&zKT6ye}-GHjwO?SW@+O4bXUw=K-c`%7zW?LH!mRAg2v$ihH)8!abg)DXbvcWrcWWI^~@~YO3OJQ(GppsVAuTJ4$w{)iC zm4H#xCtrQ+)iXwb80RI{$uI3&>I@!u{F66^%)CM&Y7Lw^UPbY97=n^tY(!KtP!$Mh zkY)hZF;MGspXfJ)Tw(B~`eMWMOu%BIvoRTh6wYA3i@EW^Fpv2fsHCB|24*P@Ky{`Q z{v;;9wtAs1q76bD7#%l3=!~P3(sc9+iF)Ab>}SgS3&c(Y5}sR!p&>YJ+@wnnHfbVa znqNMMG3G}`rGj`H^GX!&I6qm=%<8+r`Al3WVPNcT(xjQhzi5=WgFg^-43!PaO6CLm zS0I>Tx+#?nYd=p*9TJ8p))4;o7B84u27-`v1(y8J0r#gM0^maZ z0`2%1rZ`oCkeOj5DNw|L;W}ju81eqPttDGWSOKOGLsk1i;Y|$p>a9x`%>Dub;5Wd{ z7L`MB8XrWKc654$>g2=!@S}dKZuK`V0YrHa0^ZEjr@zKB&V)Hc*=vmnSO&(FleI(5 zF>5!2a0&-QG5fD%)7Zu=co^@s8ANg5r`hhU84P$1w)Zg8i(;6q`f zVb0mSmVx<+Bo>%(%O3AiGWRn_U)G~%Z+w{i`|d>ArqD)*$TY2_jo+b{wStob&JCrs2Sk*i zt98$A`MpKiMx2g?EBu=yLLE-Na&c_N0@(U5m_C}3B^f3 zpE)iHPR|GWM)jo$ZZS9eZ$g}Iin}rJJnJ&W!Op{TeszuR#SBCU^-x4CnTQ>QBlne* z_dH_=-?&m%NZ7MS3v9i*=RZ$NIf@Q3IGcSITKOH{O&qo4_Uk1$X??tzNjkq`%fKN2 zgGx&cZhMrK^wNiea@uKPl#y!YSuFwnLmR+dsd_0E7hL#M53H|`hR6816XhNHK zL~wf!!R@;&*9=}f?*YdKIQjY;sHaUl+O`05(*fvhwgfyfHhYR@$?l>w(ffkpE9J=4A{CDCHpc~{f_Ct?dy>;O6Puw;5v*7$cbMe2w+XS8p@Kn9#}+ZOFLN_=jq%pyjZ`8;P64L2(mb^Z0i3( zTbn3N{j&%$_wu67`ras|Q+rs%ec{f9*Z-lVnnz8T@deg*KZMZEV2EK|$lhhj20jo2 zTXsv?TYGNWQ;tJe-@3lzXzHF)p215EA>h zhmxruoj@%OIQY$d9hK-=V7S6fq#ue7_|W>U%-q6pNBU|!fTgvoVN*CAY-{}%)jN_p zFpE0}ao>mTYLso|*-MCCYuQw@eCr!~J!K*=EL-Gl6DCRn4q_Gua(YYr*FAwV+>^Jy?Z95@MA6_zl_G-pUr{|t+%cx10oJ=&K-vWiT z#QvS!%6yY>oD(Qb#Jl?1lYB2Y4pFL> zjV6u*suNEpQ2LTC`_8g@_at;~QPvNtju^QnCsd1(mzP&-NwOofZnyU@AE>(TQe!nh zTW92r`d7HVh}bkrS^OV`>Mr!G6E%kfrxO{QW@)WGt-WhnPHVp{LFy1w?jW8E)-Rz= zT54NNt7+zt2%7DJ9T}@F!KnksJWXj47ibjb$`9loAnl@EL=+x4Z_8g#fT#+-esc)?@oMvU~P?XLQsFr$X zw4nT#Xhgfj`NZNP!ht439>WY^{HJ+><1VP*2o>sMG>-D?mMNAx9j`51Ll31pGm zp5G5~T4TXbpQ0CebD?q4Ccg|GBmCv}Q97Zcojo1LZIc?_!9NWHbYOr@)|J;kdejkw zJv@oI^DzK9k}iCJ5UH%C>3s0Oe6mnAEIaC*P8?Q1F#Qnx?MLs-FFgp8>ZcTblTUy2 zn1vr0F@geHa7!K6KpLb)?T-I^MLYfRcOp z4!;p>y02~sbVHyU0^JbkhCtCEpa3-$PKDDn?CGji!xMW1>47lsW08%IgsHMqC>R7I zX4%Op#q{m5jxBCXR7u>-Lv7x0>QL1Vn+UHaE3BEHn9X6_V&0etkOt~-vBu1AZh8L( zWj~D>;8`LrJnaSU>Q`&_T$jI8pc2KzQQn-la}Rmgv#$N^X6RjmUw7_q2y{cB8v@-B zXaxeO%~*cRy$2qs#sb@5mbY2hg_hU2IM#XCvD zz>}0^O!shdj!MM$11mlX`c~Ncf7qk+Uxb8>>`!_PhtyX%cj%R^@x+_QvgOX!Wxs<; o+H_L6S5BMU0&TMC-fiFiAC)cT@AlCzZU6uP07*qoM6N<$f{HbV$p8QV From 827712a949212e58d29400820ae35db12bd29d81 Mon Sep 17 00:00:00 2001 From: Uwe Winter Date: Thu, 16 Jun 2022 09:15:55 +1000 Subject: [PATCH 048/106] derive postgres parameter group version from the postgres version variable. this works for postgres 10+ (#1954) --- tf_files/aws/commons/kube.tf | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tf_files/aws/commons/kube.tf b/tf_files/aws/commons/kube.tf index 137c6126c..cc081ac95 100644 --- a/tf_files/aws/commons/kube.tf +++ b/tf_files/aws/commons/kube.tf @@ -117,10 +117,13 @@ resource "aws_db_instance" "db_indexd" { # See https://www.postgresql.org/docs/9.6/static/runtime-config-logging.html # and https://www.postgresql.org/docs/9.6/static/runtime-config-query.html#RUNTIME-CONFIG-QUERY-ENABLE # for detail parameter descriptions +locals { + pg_family_version = "${replace( var.indexd_engine_version ,"/\\.[0-9]/", "" )}" +} resource "aws_db_parameter_group" "rds-cdis-pg" { name = "${var.vpc_name}-rds-cdis-pg" - family = "postgres9.6" + family = "postgres${local.pg_family_version}" # make index searches cheaper per row parameter { From 626440522a5392bf69c41fa5c8ec8e36507cc339 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 17 Jun 2022 14:52:05 -0500 Subject: [PATCH 049/106] =?UTF-8?q?chore(npm-package-security-updates):=20?= =?UTF-8?q?Updated=20npm=20packages=20to=20fix=20secu=E2=80=A6=20(#1962)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(npm-package-security-updates): Updated npm packages to fix security vulnerabilities * chore(npm-package-security-updates): Updated npm packages to fix security vulnerabilities * chore(npm-package-security-updates): Updated npm packages to fix security vulnerabilities Co-authored-by: Edward Malinowski --- Docker/awshelper/Dockerfile | 1 + package-lock.json | 482 +++++++++++++++++++++++------------- package.json | 11 +- 3 files changed, 320 insertions(+), 174 deletions(-) diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile index 8660fa23c..4b7437ed6 100644 --- a/Docker/awshelper/Dockerfile +++ b/Docker/awshelper/Dockerfile @@ -22,6 +22,7 @@ RUN apt-get update && apt-get upgrade -y \ libpq-dev \ lsb-release \ netcat-openbsd \ + networkd-dispatcher \ net-tools \ openssh-client \ openssh-server \ diff --git a/package-lock.json b/package-lock.json index 5d9d6116e..f88a979b5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -4,6 +4,38 @@ "lockfileVersion": 1, "requires": true, "dependencies": { + "@fast-csv/format": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/@fast-csv/format/-/format-4.3.5.tgz", + "integrity": "sha512-8iRn6QF3I8Ak78lNAa+Gdl5MJJBM5vRHivFtMRUWINdevNo00K7OXxS2PshawLKTejVwieIlPmK5YlLu6w4u8A==", + "requires": { + "@types/node": "^14.0.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.isboolean": "^3.0.3", + "lodash.isequal": "^4.5.0", + "lodash.isfunction": "^3.0.9", + "lodash.isnil": "^4.0.0" + } + }, + "@fast-csv/parse": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/@fast-csv/parse/-/parse-4.3.6.tgz", + "integrity": "sha512-uRsLYksqpbDmWaSmzvJcuApSEe38+6NQZBUsuAyMZKqHxH0g1wcJgsKUvN3WC8tewaqFjBMMGrkHmC+T7k8LvA==", + "requires": { + "@types/node": "^14.0.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.groupby": "^4.6.0", + "lodash.isfunction": "^3.0.9", + "lodash.isnil": "^4.0.0", + "lodash.isundefined": "^3.0.1", + "lodash.uniq": "^4.5.0" + } + }, + "@types/node": { + "version": "14.18.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.21.tgz", + "integrity": "sha512-x5W9s+8P4XteaxT/jKF0PSb7XEvo5VmqEWgsMlyeY4ZlLK8I6aH6g5TPPyDlLAep+GYf4kefb7HFyc7PAO3m+Q==" + }, "JSONStream": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", @@ -33,15 +65,20 @@ "uri-js": "^4.2.2" } }, + "ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==" + }, "array-flatten": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" }, "asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", "requires": { "safer-buffer": "~2.1.0" } @@ -49,60 +86,74 @@ "assert-plus": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==" }, "async": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", - "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", - "requires": { - "lodash": "^4.17.14" - } + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.4.tgz", + "integrity": "sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==" }, "asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, "aws-sdk": { - "version": "2.656.0", - "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.656.0.tgz", - "integrity": "sha512-UzqDvvt6i7gpuzEdK0GT/JOfBJcsCPranzZWdQ9HR4+5E0m5kf5gybZ6OX+UseIAE2/WND6Dv0aHgiI21AKenw==", + "version": "2.1154.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1154.0.tgz", + "integrity": "sha512-SIxLcWGsnW9Sl2P+a+uoqebBsfjeAZZOQokzgDj3VoESnFzsjI+2REi9CdvvSvwlfFUP7sFr6A0khrYNDJLebQ==", "requires": { - "buffer": "4.9.1", + "buffer": "4.9.2", "events": "1.1.1", "ieee754": "1.1.13", - "jmespath": "0.15.0", + "jmespath": "0.16.0", "querystring": "0.2.0", "sax": "1.2.1", "url": "0.10.3", - "uuid": "3.3.2", + "uuid": "8.0.0", "xml2js": "0.4.19" + }, + "dependencies": { + "uuid": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz", + "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw==" + } } }, "aws-sign2": { "version": "0.7.0", "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" + "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==" }, "aws4": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.9.1.tgz", - "integrity": "sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug==" + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", + "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" + }, + "balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, "base64-js": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", - "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==" + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" }, "bcrypt-pbkdf": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", "requires": { "tweetnacl": "^0.14.3" } }, + "big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==" + }, "body-parser": { "version": "1.19.0", "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", @@ -127,10 +178,19 @@ } } }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, "buffer": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.1.tgz", - "integrity": "sha1-bRu2AbB6TvztlwlBMgkwJ8lbwpg=", + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", "requires": { "base64-js": "^1.0.2", "ieee754": "^1.1.4", @@ -140,7 +200,7 @@ "buffer-queue": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/buffer-queue/-/buffer-queue-1.0.0.tgz", - "integrity": "sha1-PSU/4vCrcOhR1yhxLozW+RSowAI=" + "integrity": "sha512-HNAysvwrmORbm5w5rB6yCz2Sab+ATCW6RSAOVWJmaRnPviPfuNO8+f3R0MyCJMUhL8sMx88LcawtIcfjHERhVA==" }, "bytes": { "version": "3.1.0", @@ -150,7 +210,7 @@ "caseless": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" + "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==" }, "combined-stream": { "version": "1.0.8", @@ -160,6 +220,11 @@ "delayed-stream": "~1.0.0" } }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, "content-disposition": { "version": "0.5.3", "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", @@ -193,12 +258,12 @@ "core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==" }, "dashdash": { "version": "1.14.1", "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", "requires": { "assert-plus": "^1.0.0" } @@ -211,20 +276,15 @@ "ms": "2.0.0" } }, - "decimal.js": { - "version": "10.2.0", - "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.2.0.tgz", - "integrity": "sha512-vDPw+rDgn3bZe1+F/pyEwb1oMG2XTlRVgAa6B4KccTEpYgF8w6eQllVbQcfIJnZyvzFtFpxnpGtx8dd7DJp/Rw==" - }, "delay": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/delay/-/delay-4.3.0.tgz", - "integrity": "sha512-Lwaf3zVFDMBop1yDuFZ19F9WyGcZcGacsbdlZtWjQmM50tOcMntm1njF/Nb/Vjij3KaSvCF+sEYGKrrjObu2NA==" + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/delay/-/delay-5.0.0.tgz", + "integrity": "sha512-ReEBKkIfe4ya47wlPYf/gu5ib6yUG0/Aez0JQZQz94kiWtRQvZIQbTiehsnwHvLSWJnQdhVeqYue7Id1dKr0qw==" }, "delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" }, "depd": { "version": "1.1.2", @@ -239,7 +299,7 @@ "ecc-jsbn": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", "requires": { "jsbn": "~0.1.0", "safer-buffer": "^2.1.0" @@ -251,29 +311,63 @@ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" }, "elasticdump": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/elasticdump/-/elasticdump-6.26.0.tgz", - "integrity": "sha512-nVi3R+wM5O1uhK3xsMXjU2Vn0mn6XErgFI7zEHipWeOcPLDatc/svcq4lue2zhJMthq2MwHaS+d9N57ypZeHlA==", + "version": "6.84.1", + "resolved": "https://registry.npmjs.org/elasticdump/-/elasticdump-6.84.1.tgz", + "integrity": "sha512-qgHJeGGNMJFwGMpidCOCKZsbq6bUth2cvns1QdrJnCIoojv5x0J4C6Xm5zh8sZCYr7y5nrwfgMUkrbMNLHdGwQ==", "requires": { "JSONStream": "^1.3.5", - "async": "^2.0.1", - "aws-sdk": "^2.652.0", - "aws4": "^1.9.1", - "bytes": "^3.1.0", - "decimal.js": "^10.2.0", - "delay": "^4.3.0", - "ini": "^1.3.5", - "lodash": "^4.17.15", - "lossless-json": "^1.0.3", - "minimist": "^1.2.5", - "p-queue": "^6.3.0", - "pump": "^3.0.0", - "request": "2.x.x", - "requestretry": "^4.1.0", + "async": "^2.6.4", + "aws-sdk": "2.1122.0", + "aws4": "^1.11.0", + "big.js": "^5.2.2", + "bytes": "^3.1.2", + "delay": "^5.0.0", + "extends-classes": "1.0.5", + "fast-csv": "4.3.6", + "http-status": "^1.5.1", + "ini": "^2.0.0", + "lodash": "^4.17.21", + "lossless-json": "^1.0.5", + "minimist": "^1.2.6", + "p-queue": "^6.6.2", + "request": "2.88.2", + "requestretry": "^7.1.0", "s3-stream-upload": "2.0.2", "s3urls": "^1.5.2", + "semver": "5.7.1", "socks5-http-client": "^1.0.4", "socks5-https-client": "^1.2.1" + }, + "dependencies": { + "async": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "requires": { + "lodash": "^4.17.14" + } + }, + "aws-sdk": { + "version": "2.1122.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1122.0.tgz", + "integrity": "sha512-545VawhsCQ7yEx9jZKV0hTTW3FS/waycISWMvnNwqRfpU9o4FQ4DSu3je7ekn5yFKM+91dxJC+IfJgtIV8WaUw==", + "requires": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.16.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "uuid": "3.3.2", + "xml2js": "0.4.19" + } + }, + "bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" + } } }, "encodeurl": { @@ -281,14 +375,6 @@ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=" }, - "end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "requires": { - "once": "^1.4.0" - } - }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", @@ -300,14 +386,14 @@ "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" }, "eventemitter3": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.0.tgz", - "integrity": "sha512-qerSRB0p+UDEssxTtm6EDKcE7W4OaoisfIMl4CngyEhjpYglocpNg6UEqCvemdGhosAsg4sO2dXJOdyBifPGCg==" + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" }, "events": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" + "integrity": "sha512-kEcvvCBByWXGnZy6JUlgAp2gBIUjfCAV6P6TgT1/aaQKcmuAEC4OZTV1I4EWQLz2gxZw76atuVyvHhTxvi0Flw==" }, "express": { "version": "4.17.1", @@ -363,15 +449,32 @@ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" }, + "extends-classes": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/extends-classes/-/extends-classes-1.0.5.tgz", + "integrity": "sha512-ccyBHFN+wFM0dz0hvuQntSH9KST9951ua1hr3yxeFfu+h3H/eHw1RavE8XAEVi9K8dh534Mk3xA+pjk7VHkUcQ==", + "requires": { + "method-missing": "^1.1.2" + } + }, "extsprintf": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" + "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==" + }, + "fast-csv": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/fast-csv/-/fast-csv-4.3.6.tgz", + "integrity": "sha512-2RNSpuwwsJGP0frGsOmTb9oUF+VkFSM4SyLTDgwf2ciHWTarN0lQTC+F2f/t5J9QjW+c65VFIAAu85GsvMIusw==", + "requires": { + "@fast-csv/format": "4.3.5", + "@fast-csv/parse": "4.3.6" + } }, "fast-deep-equal": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz", - "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==" + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "fast-json-stable-stringify": { "version": "2.1.0", @@ -395,7 +498,7 @@ "forever-agent": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" + "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==" }, "form-data": { "version": "2.3.3", @@ -420,7 +523,7 @@ "getpass": { "version": "0.1.7", "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", "requires": { "assert-plus": "^1.0.0" } @@ -428,14 +531,14 @@ "har-schema": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" + "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==" }, "har-validator": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz", - "integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==", + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", "requires": { - "ajv": "^6.5.5", + "ajv": "^6.12.3", "har-schema": "^2.0.0" } }, @@ -461,13 +564,18 @@ "http-signature": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==", "requires": { "assert-plus": "^1.0.0", "jsprim": "^1.2.2", "sshpk": "^1.7.0" } }, + "http-status": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.2.tgz", + "integrity": "sha512-HzxX+/hV/8US1Gq4V6R6PgUmJ5Pt/DGATs4QhdEOpG8LrdS9/3UG2nnOvkqUpRks04yjVtV5p/NODjO+wvf6vg==" + }, "iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", @@ -487,9 +595,9 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "ini": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", - "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==" }, "ip-address": { "version": "6.1.0", @@ -504,7 +612,7 @@ "jsbn": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", - "integrity": "sha1-sBMHyym2GKHtJux56RH4A8TaAEA=" + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==" } } }, @@ -516,7 +624,7 @@ "is-typedarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" }, "isarray": { "version": "1.0.0", @@ -526,22 +634,22 @@ "isstream": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" + "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==" }, "jmespath": { - "version": "0.15.0", - "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", - "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=" + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.16.0.tgz", + "integrity": "sha512-9FzQjJ7MATs1tSpnco1K6ayiYE3figslrXA72G2HQ/n76RzvYlofyi5QM+iX4YRs/pu3yzxlVQSST23+dMDknw==" }, "jsbn": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" }, "json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, "json-schema-traverse": { "version": "0.4.1", @@ -551,21 +659,21 @@ "json-stringify-safe": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==" }, "jsonparse": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", - "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=" + "integrity": "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==" }, "jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", + "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", "requires": { "assert-plus": "1.0.0", "extsprintf": "1.3.0", - "json-schema": "0.2.3", + "json-schema": "0.4.0", "verror": "1.10.0" } }, @@ -574,10 +682,50 @@ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, + "lodash.escaperegexp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz", + "integrity": "sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw==" + }, + "lodash.groupby": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/lodash.groupby/-/lodash.groupby-4.6.0.tgz", + "integrity": "sha512-5dcWxm23+VAoz+awKmBaiBvzox8+RqMgFhi7UvX9DHZr2HdxHXM/Wrf8cfKpsW37RNrvtPn6hSwNqurSILbmJw==" + }, + "lodash.isboolean": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", + "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==" + }, + "lodash.isequal": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", + "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==" + }, + "lodash.isfunction": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/lodash.isfunction/-/lodash.isfunction-3.0.9.tgz", + "integrity": "sha512-AirXNj15uRIMMPihnkInB4i3NHeb4iBtNg9WRWuK2o31S+ePwwNmDPaTL3o7dTJ+VXNZim7rFs4rxN4YU1oUJw==" + }, + "lodash.isnil": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/lodash.isnil/-/lodash.isnil-4.0.0.tgz", + "integrity": "sha512-up2Mzq3545mwVnMhTDMdfoG1OurpA/s5t88JmQX809eH3C8491iu2sfKhTfhQtKY78oPNhiaHJUpT/dUDAAtng==" + }, + "lodash.isundefined": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/lodash.isundefined/-/lodash.isundefined-3.0.1.tgz", + "integrity": "sha512-MXB1is3s899/cD8jheYYE2V9qTHwKvt+npCwpD+1Sxm3Q3cECXCiYHjeHWXNwr6Q0SOBPrYUDxendrO6goVTEA==" + }, + "lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, "lossless-json": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/lossless-json/-/lossless-json-1.0.3.tgz", - "integrity": "sha512-r4w0WrhIHV1lOTVGbTg4Toqwso5x6C8pM7Q/Nto2vy4c7yUSdTYVYlj16uHVX3MT1StpSELDv8yrqGx41MBsDA==" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/lossless-json/-/lossless-json-1.0.5.tgz", + "integrity": "sha512-RicKUuLwZVNZ6ZdJHgIZnSeA05p8qWc5NW0uR96mpPIjN9WDLUg9+kj1esQU1GkPn9iLZVKatSQK5gyiaFHgJA==" }, "media-typer": { "version": "0.3.0", @@ -589,6 +737,11 @@ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" }, + "method-missing": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/method-missing/-/method-missing-1.2.4.tgz", + "integrity": "sha512-tmj4CKZJVQd/ZuN9hnYD8HBAs/3RdDdqUeJG9RbVYlEZLuPYK4EW+EekMqLsCV4w1HastX+Pk2Ov87OQmeo01A==" + }, "methods": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", @@ -612,10 +765,18 @@ "mime-db": "1.40.0" } }, + "minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "requires": { + "brace-expansion": "^1.1.7" + } + }, "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" }, "ms": { "version": "2.0.0", @@ -640,26 +801,18 @@ "ee-first": "1.1.1" } }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "requires": { - "wrappy": "1" - } - }, "p-finally": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=" + "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==" }, "p-queue": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.3.0.tgz", - "integrity": "sha512-fg5dJlFpd5+3CgG3/0ogpVZUeJbjiyXFg0nu53hrOYsybqSiDyxyOpad0Rm6tAiGjgztAwkyvhlYHC53OiAJOA==", + "version": "6.6.2", + "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", + "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", "requires": { - "eventemitter3": "^4.0.0", - "p-timeout": "^3.1.0" + "eventemitter3": "^4.0.4", + "p-timeout": "^3.2.0" } }, "p-timeout": { @@ -683,7 +836,7 @@ "performance-now": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" + "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==" }, "process-nextick-args": { "version": "2.0.1", @@ -704,29 +857,20 @@ "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" }, - "pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, "punycode": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" + "integrity": "sha512-RofWgt/7fL5wP1Y7fxE7/EmTLzQVnB0ycyibJ0OOHIlJqTNzglYFxVwETOcIoJqJmpDXJ9xImDv+Fq34F/d4Dw==" }, "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", + "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==" }, "querystring": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=" + "integrity": "sha512-X/xY82scca2tau62i9mDyU9K+I+djTMUsvwf7xnUX5GLvVzgJybOJf4Y6o9Zx3oJK/LSXg5tTZBjwzqVPaPO2g==" }, "range-parser": { "version": "1.2.1", @@ -793,19 +937,18 @@ } }, "requestretry": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/requestretry/-/requestretry-4.1.0.tgz", - "integrity": "sha512-q3IT2vz5vkcMT6xgwB/BWzsmnu7N/27l9fW86U48gt9Mwrce5rSEyFvpAW7Il1/B78/NBUlYBvcCY1RzWUWy7w==", + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/requestretry/-/requestretry-7.1.0.tgz", + "integrity": "sha512-TqVDgp251BW4b8ddQ2ptaj/57Z3LZHLscAUT7v6qs70buqF2/IoOVjYbpjJ6HiW7j5+waqegGI8xKJ/+uzgDmw==", "requires": { "extend": "^3.0.2", - "lodash": "^4.17.10", - "when": "^3.7.7" + "lodash": "^4.17.15" } }, "s3-stream-upload": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/s3-stream-upload/-/s3-stream-upload-2.0.2.tgz", - "integrity": "sha1-YDQvEtSqBuqPOJ+3YaU5Ou3KAX8=", + "integrity": "sha512-hSfGZ4InIUMH29niWCAkcDvmOGwADSy7j2Ktm6+nKI+ub6nPoLOboo1D+Q3mEIutTHu0J4+Sv92J0GOk5hAonQ==", "requires": { "buffer-queue": "~1.0.0", "readable-stream": "^2.3.0" @@ -814,7 +957,7 @@ "s3signed": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/s3signed/-/s3signed-0.1.0.tgz", - "integrity": "sha1-rgO4YllBMhXtQ+mShcjDR1eXNfs=", + "integrity": "sha512-08Jc0+GAaFjXgvl8qQytu6+wVBfcUUyCJDocj5kBUeq9YA+6mAM/6psDNxrg4PVkkLBvAK75mnjlaGckfOtDKA==", "requires": { "aws-sdk": "^2.0.4" } @@ -822,16 +965,16 @@ "s3urls": { "version": "1.5.2", "resolved": "https://registry.npmjs.org/s3urls/-/s3urls-1.5.2.tgz", - "integrity": "sha1-GCqZEgj8GrUhREPrJQ/I9TtLyeo=", + "integrity": "sha512-3f4kprxnwAqoiVdR/XFoc997YEt0b6oY1VKrhl+kuWnHaUQ2cVe73TcQaww8geX5FKPuGBHl90xv70q7SlbBew==", "requires": { "minimist": "^1.1.0", "s3signed": "^0.1.0" } }, "safe-buffer": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.0.tgz", - "integrity": "sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg==" + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" }, "safer-buffer": { "version": "2.1.2", @@ -841,7 +984,12 @@ "sax": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", - "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=" + "integrity": "sha512-8I2a3LovHTOpm7NV5yOyO8IHqgVsfK4+UuySrXU8YXkSRX7k6hCV9b3HrkKCr3nMpgj+0bmocaJJWpvp1oc7ZA==" + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" }, "send": { "version": "0.17.1", @@ -916,9 +1064,9 @@ "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==" }, "sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", + "version": "1.17.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz", + "integrity": "sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==", "requires": { "asn1": "~0.2.3", "assert-plus": "^1.0.0", @@ -954,7 +1102,7 @@ "through": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" }, "toidentifier": { "version": "1.0.0", @@ -980,7 +1128,7 @@ "tunnel-agent": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", "requires": { "safe-buffer": "^5.0.1" } @@ -988,7 +1136,7 @@ "tweetnacl": { "version": "0.14.5", "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" }, "type-is": { "version": "1.6.18", @@ -1005,9 +1153,9 @@ "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=" }, "uri-js": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", - "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", "requires": { "punycode": "^2.1.0" }, @@ -1022,7 +1170,7 @@ "url": { "version": "0.10.3", "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", - "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", + "integrity": "sha512-hzSUW2q06EqL1gKM/a+obYHLIO6ct2hwPuviqTTOcfFVc61UbfJ2Q32+uGL/HCPxKqrdGB5QUwIe7UqlDgwsOQ==", "requires": { "punycode": "1.3.2", "querystring": "0.2.0" @@ -1031,7 +1179,7 @@ "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, "utils-merge": { "version": "1.0.1", @@ -1051,23 +1199,13 @@ "verror": { "version": "1.10.0", "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", "requires": { "assert-plus": "^1.0.0", "core-util-is": "1.0.2", "extsprintf": "^1.2.0" } }, - "when": { - "version": "3.7.8", - "resolved": "https://registry.npmjs.org/when/-/when-3.7.8.tgz", - "integrity": "sha1-xxMLan6gRpPoQs3J56Hyqjmjn4I=" - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, "xml2js": { "version": "0.4.19", "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", @@ -1080,7 +1218,7 @@ "xmlbuilder": { "version": "9.0.7", "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", - "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=" + "integrity": "sha512-7YXTQc3P2l9+0rjaUbLwMKRhtmwg1M1eDf6nag7urC7pIPYLD9W/jmzQ4ptRSUbodw5S0jfoGTflLemQibSpeQ==" } } } diff --git a/package.json b/package.json index 9cb5d795c..fd2761a59 100644 --- a/package.json +++ b/package.json @@ -7,8 +7,15 @@ "doc": "doc" }, "dependencies": { - "elasticdump": "^6.26.0", - "express": "^4.17.1" + "ansi-regex": "^6.0.1", + "async": "^3.2.2", + "aws-sdk": "^2.814.0", + "elasticdump": "^6.84.1", + "express": "^4.17.1", + "json-schema": "^0.4.0", + "minimatch": "^3.0.5", + "minimist": "^1.2.6", + "requestretry": "^7.0.0" }, "devDependencies": {}, "scripts": { From 300a4898474cfbfb3991a3e0ae73e7e15aefaa00 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 21 Jun 2022 05:44:10 -0500 Subject: [PATCH 050/106] fix(kubecost-setup): Added missing variable (#1951) * fix(kubecost-setup): Added missing variable * fix(kubecost-setup): Added missing variable Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-ingress.sh | 2 +- tf_files/aws/kubecost/variables.tf | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index 26cad3bd6..bf718c29e 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -248,7 +248,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then EOM if ! gen3 awsrole info "$roleName" > /dev/null; then # setup role gen3_log_info "creating IAM role for ingress: $roleName, linking to sa $saName" - gen3 awsrole create "$roleName" "$saName" || return 1 + gen3 awsrole create "$roleName" "$saName" "kube-system" || return 1 aws iam put-role-policy --role-name "$roleName" --policy-document file://${ingressPolicy} --policy-name "$policyName" 1>&2 gen3 awsrole sa-annotate $saName $roleName kube-system else diff --git a/tf_files/aws/kubecost/variables.tf b/tf_files/aws/kubecost/variables.tf index 6666b43e6..786c82083 100644 --- a/tf_files/aws/kubecost/variables.tf +++ b/tf_files/aws/kubecost/variables.tf @@ -8,6 +8,10 @@ variable "parent_account_id" { default = "" } +variable "cur_s3_bucket" { + default = "" +} + # If master setup variable "slave_account_id" { From 1dd76782f575998b66f31ac401783702bfeae2c8 Mon Sep 17 00:00:00 2001 From: Atharva Rane <41084525+atharvar28@users.noreply.github.com> Date: Tue, 21 Jun 2022 16:06:13 -0400 Subject: [PATCH 051/106] patching livenessProbe (#1963) * patching livenessProbe and adding startupProbe * adjusting the time livenessProbe --- kube/services/portal/portal-deploy.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index 25ab33878..41a31b157 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -64,11 +64,11 @@ spec: path: /portal/ port: 80 initialDelaySeconds: 60 - periodSeconds: 60 + periodSeconds: 30 timeoutSeconds: 30 # portal sometimes takes a long time to come up ... - # has to fetch the dictionary, relay compile, etc - failureThreshold: 10 + failureThreshold: 30 resources: requests: cpu: 0.6 From 5d7a8164747350f6907c3d79428d0ce48bec839e Mon Sep 17 00:00:00 2001 From: Jing Huang <71466688+jingh8@users.noreply.github.com> Date: Fri, 24 Jun 2022 14:44:18 -0500 Subject: [PATCH 052/106] Add timeout to jenkins pipeline stages (#1980) * Add timeout to jenkins pipeline stages --- Jenkinsfile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 54b8c6d9d..365f1ca24 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -386,6 +386,9 @@ spec: } stage('WaitForQuayBuild') { + options { + timeout(time: 30, unit: 'MINUTES') // timeout on this stage + } steps { script { try { @@ -449,6 +452,9 @@ spec: } stage('K8sReset') { + options { + timeout(time: 1, unit: 'HOURS') // timeout on this stage + } steps { script { try { @@ -531,6 +537,9 @@ spec: } stage('RunTests') { + options { + timeout(time: 3, unit: 'HOURS') // timeout on this stage + } steps { script { try { From 011f042a845ec1b5172c10bfcccf2b724d52a82f Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 27 Jun 2022 11:12:32 -0500 Subject: [PATCH 053/106] Update web_wildcard_whitelist (#1981) (#1982) The data from https://www.genome.jp/kegg/catalog/org_list.html is used by the BRH notebook https://brh.data-commons.org/dashboard/Public/notebooks/BDCat_Biolincc_Framingham_study_exploration.html Co-authored-by: swatkatsrazor <92522789+swatkatsrazor@users.noreply.github.com> --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 3b04333b1..283a0044a 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -102,3 +102,4 @@ .nesi.org.nz .agdr.org.nz .agha.umccr.org +.genome.jp From 549357981329073c83b40799e5831e5efe73980f Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 27 Jun 2022 15:20:16 -0400 Subject: [PATCH 054/106] Added public keys for Aidan Hilt to the appropriate files. (#1979) Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- files/authorized_keys/ops_team | 3 ++- files/authorized_keys/squid_authorized_keys_admin | 1 + files/authorized_keys/vpn_authorized_keys_admin | 1 + tf_files/aws/commons/cluster.yaml | 1 + tf_files/aws/publicvm/root.tf | 1 + 5 files changed, 6 insertions(+), 1 deletion(-) diff --git a/files/authorized_keys/ops_team b/files/authorized_keys/ops_team index bdf99df54..a55a0717e 100644 --- a/files/authorized_keys/ops_team +++ b/files/authorized_keys/ops_team @@ -1,4 +1,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJvyFWGv620FmGM+PczcQN47xJJQrvXOGtt/n+tW1DP87w2rTPuvsROc4pgB7ztj1EkFC9VkeaJbW/FmWxrw2z9CTHGBoxpBgfDDLsFzi91U2dfWxRCBt639sLBfJxHFo717Xg7L7PdFmFiowgGnqfwUOJf3Rk8OixnhEA5nhdihg5gJwCVOKty8Qx73fuSOAJwKntcsqtFCaIvoj2nOjqUOrs++HG6+Fe8tGLdS67/tvvgW445Ik5JZGMpa9y0hJxmZj1ypsZv/6cZi2ohLEBCngJO6d/zfDzP48Beddv6HtL rarya_id_rsa ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu \ No newline at end of file +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan \ No newline at end of file diff --git a/files/authorized_keys/squid_authorized_keys_admin b/files/authorized_keys/squid_authorized_keys_admin index 3208bd50e..251eedbdb 100644 --- a/files/authorized_keys/squid_authorized_keys_admin +++ b/files/authorized_keys/squid_authorized_keys_admin @@ -4,3 +4,4 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg8 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdIXKLMs14c8U9exX/sOoIcvOCZ4v2pKsjdM1VBA56GyI98E1R+hxTBecHeWri9MeQcZkrlmjqT3ZzCb87+n0W2LEWquLNfeheAEq61ogi0taxWEpnb4rIAr1U9aS3d0mk5NIIivrwaUHTIvUhH8mn4Pek0GgybZAsjN/MpZ9PZwUtXNmjZoY5gWR0QO4ZWu7ARknFoNcTXwpWyl/Khhal0KKhdB38y3MpJc03IIqhem15e78jRlko04CAZX3zlFAQwbxnrpgrJUMYeY8fZqpV6FiWC40yu+n9KwAZkmtrc45mkxahj8c3QtJ/Z3t33yXEN9PEHV6z104STYi2cPVD rpollard@news-MacBook-Pro.local +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan \ No newline at end of file diff --git a/files/authorized_keys/vpn_authorized_keys_admin b/files/authorized_keys/vpn_authorized_keys_admin index 8583ff198..a55a0717e 100644 --- a/files/authorized_keys/vpn_authorized_keys_admin +++ b/files/authorized_keys/vpn_authorized_keys_admin @@ -2,3 +2,4 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan \ No newline at end of file diff --git a/tf_files/aws/commons/cluster.yaml b/tf_files/aws/commons/cluster.yaml index 0f25ccf0c..811bbdc32 100644 --- a/tf_files/aws/commons/cluster.yaml +++ b/tf_files/aws/commons/cluster.yaml @@ -8,6 +8,7 @@ sshAuthorizedKeys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdIXKLMs14c8U9exX/sOoIcvOCZ4v2pKsjdM1VBA56GyI98E1R+hxTBecHeWri9MeQcZkrlmjqT3ZzCb87+n0W2LEWquLNfeheAEq61ogi0taxWEpnb4rIAr1U9aS3d0mk5NIIivrwaUHTIvUhH8mn4Pek0GgybZAsjN/MpZ9PZwUtXNmjZoY5gWR0QO4ZWu7ARknFoNcTXwpWyl/Khhal0KKhdB38y3MpJc03IIqhem15e78jRlko04CAZX3zlFAQwbxnrpgrJUMYeY8fZqpV6FiWC40yu+n9KwAZkmtrc45mkxahj8c3QtJ/Z3t33yXEN9PEHV6z104STYi2cPVD rpollard@news-MacBook-Pro.local + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan region: ${aws_region} kmsKeyArn: "${kms_key}" apiEndpoints: diff --git a/tf_files/aws/publicvm/root.tf b/tf_files/aws/publicvm/root.tf index 7400722e9..d51f4663f 100644 --- a/tf_files/aws/publicvm/root.tf +++ b/tf_files/aws/publicvm/root.tf @@ -121,6 +121,7 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg8 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdIXKLMs14c8U9exX/sOoIcvOCZ4v2pKsjdM1VBA56GyI98E1R+hxTBecHeWri9MeQcZkrlmjqT3ZzCb87+n0W2LEWquLNfeheAEq61ogi0taxWEpnb4rIAr1U9aS3d0mk5NIIivrwaUHTIvUhH8mn4Pek0GgybZAsjN/MpZ9PZwUtXNmjZoY5gWR0QO4ZWu7ARknFoNcTXwpWyl/Khhal0KKhdB38y3MpJc03IIqhem15e78jRlko04CAZX3zlFAQwbxnrpgrJUMYeY8fZqpV6FiWC40yu+n9KwAZkmtrc45mkxahj8c3QtJ/Z3t33yXEN9PEHV6z104STYi2cPVD rpollard@news-MacBook-Pro.local +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan EOM ) ( From 62d501b93db9313d61769c85f20b9d55c5772c82 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Mon, 27 Jun 2022 17:24:27 -0500 Subject: [PATCH 055/106] feat: base for Apache Superset deployment (#1926) * feat: base for Apache Superset deployment * Initial kube-setup-superset script * Add ingress and deploy files * Fix networkpolicies * remove namespace * Add ingress * feat(superset): Added netnolimit netpolicy * feat(superset): Updated revproxy netpolicy to add superset app/port * feat(superset): Added superset network policies * feat: read previous value of SECRET key for proper migration * fix: no need for indentation * fix: extra superset configuration * feat: add redis netpolicy * fix: reuse SECRET_KEY * fix: no need for extra variable Co-authored-by: Jawad Qureshi Co-authored-by: Edward Malinowski --- .secrets.baseline | 418 +++++++++++++- gen3/bin/kube-setup-superset.sh | 135 +++++ .../gen3/services/revproxy_netpolicy.yaml | 4 + .../services/superset-redis_netpolicy.yaml | 42 ++ .../gen3/services/superset_netpolicy.yaml | 42 ++ kube/services/superset/superset-deploy.yaml | 429 ++++++++++++++ kube/services/superset/superset-ingress.yaml | 25 + kube/services/superset/superset-redis.yaml | 368 ++++++++++++ .../superset/superset-secrets-template.yaml | 42 ++ kube/services/superset/values.yaml | 532 ++++++++++++++++++ 10 files changed, 2033 insertions(+), 4 deletions(-) create mode 100644 gen3/bin/kube-setup-superset.sh create mode 100644 kube/services/netpolicy/gen3/services/superset-redis_netpolicy.yaml create mode 100644 kube/services/netpolicy/gen3/services/superset_netpolicy.yaml create mode 100644 kube/services/superset/superset-deploy.yaml create mode 100644 kube/services/superset/superset-ingress.yaml create mode 100644 kube/services/superset/superset-redis.yaml create mode 100644 kube/services/superset/superset-secrets-template.yaml create mode 100644 kube/services/superset/values.yaml diff --git a/.secrets.baseline b/.secrets.baseline index 3bc358cfb..605b7e518 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-05-15T00:10:09Z", + "generated_at": "2022-06-21T21:12:27Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -61,12 +61,14 @@ "Chef/repo/data_bags/README.md": [ { "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366", + "is_secret": false, "is_verified": false, "line_number": 45, "type": "Secret Keyword" }, { "hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee", + "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -75,6 +77,7 @@ "Docker/Jenkins-CI-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, "line_number": 124, "type": "Secret Keyword" @@ -83,6 +86,7 @@ "Docker/Jenkins-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, "line_number": 138, "type": "Secret Keyword" @@ -91,6 +95,16 @@ "Docker/Jenkins/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, + "is_verified": false, + "line_number": 113, + "type": "Secret Keyword" + } + ], + "Docker/Jenkins2/Dockerfile": [ + { + "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, "line_number": 113, "type": "Secret Keyword" @@ -99,6 +113,7 @@ "Docker/sidecar/service.key": [ { "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", + "is_secret": false, "is_verified": false, "line_number": 1, "type": "Private Key" @@ -107,6 +122,7 @@ "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ { "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -115,6 +131,7 @@ "ansible/roles/awslogs/defaults/main.yaml": [ { "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", + "is_secret": false, "is_verified": false, "line_number": 30, "type": "Basic Auth Credentials" @@ -123,12 +140,14 @@ "ansible/roles/slurm/README.md": [ { "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", + "is_secret": false, "is_verified": false, "line_number": 86, "type": "Base64 High Entropy String" }, { "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476", + "is_secret": false, "is_verified": false, "line_number": 86, "type": "Secret Keyword" @@ -137,6 +156,7 @@ "apis_configs/config_helper.py": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", + "is_secret": false, "is_verified": false, "line_number": 66, "type": "Basic Auth Credentials" @@ -145,6 +165,7 @@ "apis_configs/fence_credentials.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 23, "type": "Secret Keyword" @@ -153,18 +174,21 @@ "apis_configs/fence_settings.py": [ { "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Basic Auth Credentials" }, { "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", + "is_secret": false, "is_verified": false, "line_number": 58, "type": "Secret Keyword" }, { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "is_secret": false, "is_verified": false, "line_number": 80, "type": "Basic Auth Credentials" @@ -173,6 +197,7 @@ "apis_configs/indexd_settings.py": [ { "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc", + "is_secret": false, "is_verified": false, "line_number": 59, "type": "Basic Auth Credentials" @@ -181,6 +206,7 @@ "apis_configs/peregrine_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -189,6 +215,7 @@ "apis_configs/sheepdog_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -197,6 +224,7 @@ "doc/Gen3-data-upload.md": [ { "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e", + "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -205,6 +233,7 @@ "doc/api.md": [ { "hashed_secret": "625de83a7517422051911680cc803921ff99db90", + "is_secret": false, "is_verified": false, "line_number": 47, "type": "Hex High Entropy String" @@ -213,24 +242,28 @@ "doc/gen3OnK8s.md": [ { "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069", + "is_secret": false, "is_verified": false, "line_number": 113, "type": "Secret Keyword" }, { "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2", + "is_secret": false, "is_verified": false, "line_number": 143, "type": "Secret Keyword" }, { "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48", + "is_secret": false, "is_verified": false, "line_number": 170, "type": "Secret Keyword" }, { "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce", + "is_secret": false, "is_verified": false, "line_number": 189, "type": "Secret Keyword" @@ -239,6 +272,7 @@ "doc/kube-setup-data-ingestion-job.md": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 30, "type": "Secret Keyword" @@ -247,6 +281,7 @@ "doc/logs.md": [ { "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Secret Keyword" @@ -255,6 +290,7 @@ "doc/slurm_cluster.md": [ { "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6", + "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" @@ -263,12 +299,14 @@ "files/dashboard/usage-reports/package-lock.json": [ { "hashed_secret": "65ecd0650541b6caecdb6986f1871c2e6a95bdfe", + "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "e35a49e53bb97044b35cc0e4d963b4ac49e9ac7e", + "is_secret": false, "is_verified": false, "line_number": 15, "type": "Base64 High Entropy String" @@ -277,12 +315,14 @@ "gen3/bin/api.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 407, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "is_secret": false, "is_verified": false, "line_number": 477, "type": "Secret Keyword" @@ -291,6 +331,7 @@ "gen3/bin/kube-dev-namespace.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 135, "type": "Secret Keyword" @@ -299,6 +340,7 @@ "gen3/bin/kube-setup-argo.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_secret": false, "is_verified": false, "line_number": 182, "type": "Secret Keyword" @@ -307,6 +349,7 @@ "gen3/bin/kube-setup-certs.sh": [ { "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", + "is_secret": false, "is_verified": false, "line_number": 50, "type": "Secret Keyword" @@ -315,12 +358,14 @@ "gen3/bin/kube-setup-dashboard.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_secret": false, "is_verified": false, "line_number": 40, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "is_secret": false, "is_verified": false, "line_number": 41, "type": "Secret Keyword" @@ -329,12 +374,14 @@ "gen3/bin/kube-setup-data-ingestion-job.sh": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" }, { "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c", + "is_secret": false, "is_verified": false, "line_number": 102, "type": "Secret Keyword" @@ -343,6 +390,7 @@ "gen3/bin/kube-setup-dicom-server.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 43, "type": "Secret Keyword" @@ -351,12 +399,14 @@ "gen3/bin/kube-setup-jenkins.sh": [ { "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 22, "type": "Secret Keyword" @@ -365,6 +415,7 @@ "gen3/bin/kube-setup-metadata.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 35, "type": "Secret Keyword" @@ -373,18 +424,21 @@ "gen3/bin/kube-setup-revproxy.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_secret": false, "is_verified": false, "line_number": 32, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 49, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -393,18 +447,21 @@ "gen3/bin/kube-setup-secrets.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 79, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 82, "type": "Secret Keyword" }, { "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8", + "is_secret": false, "is_verified": false, "line_number": 95, "type": "Secret Keyword" @@ -413,12 +470,14 @@ "gen3/bin/kube-setup-sftp.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 36, "type": "Secret Keyword" }, { "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4", + "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -427,6 +486,7 @@ "gen3/bin/kube-setup-sheepdog.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 33, "type": "Secret Keyword" @@ -435,24 +495,28 @@ "gen3/bin/kube-setup-sower-jobs.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 120, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 122, "type": "Secret Keyword" @@ -461,18 +525,21 @@ "gen3/bin/kube-setup-ssjdispatcher.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 117, "type": "Secret Keyword" }, { "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93", + "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 197, "type": "Secret Keyword" @@ -481,12 +548,14 @@ "gen3/lib/aws.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "is_secret": false, "is_verified": false, "line_number": 550, "type": "Secret Keyword" }, { "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", + "is_secret": false, "is_verified": false, "line_number": 570, "type": "Secret Keyword" @@ -495,12 +564,14 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ { "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", + "is_secret": false, "is_verified": false, "line_number": 33, "type": "Basic Auth Credentials" }, { "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db", + "is_secret": false, "is_verified": false, "line_number": 286, "type": "Secret Keyword" @@ -509,6 +580,7 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -517,6 +589,7 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -525,6 +598,7 @@ "gen3/lib/logs/utils.sh": [ { "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5", + "is_secret": false, "is_verified": false, "line_number": 3, "type": "Secret Keyword" @@ -533,6 +607,7 @@ "gen3/lib/manifestDefaults/hatchery/hatchery.json": [ { "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039", + "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -541,12 +616,14 @@ "gen3/lib/onprem.sh": [ { "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", + "is_secret": false, "is_verified": false, "line_number": 68, "type": "Secret Keyword" }, { "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", + "is_secret": false, "is_verified": false, "line_number": 84, "type": "Secret Keyword" @@ -555,12 +632,14 @@ "gen3/lib/secrets/rotate-postgres.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 250, "type": "Secret Keyword" @@ -569,42 +648,49 @@ "gen3/lib/testData/etlconvert/expected2.yaml": [ { "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", + "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "is_secret": false, "is_verified": false, "line_number": 13, "type": "Base64 High Entropy String" }, { "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "is_secret": false, "is_verified": false, "line_number": 16, "type": "Base64 High Entropy String" }, { "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "is_secret": false, "is_verified": false, "line_number": 33, "type": "Base64 High Entropy String" }, { "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", + "is_secret": false, "is_verified": false, "line_number": 35, "type": "Base64 High Entropy String" }, { "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", + "is_secret": false, "is_verified": false, "line_number": 36, "type": "Base64 High Entropy String" @@ -613,6 +699,7 @@ "gen3/test/secretsTest.sh": [ { "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", + "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" @@ -621,24 +708,28 @@ "gen3/test/terraformTest.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "is_secret": false, "is_verified": false, "line_number": 156, "type": "Secret Keyword" }, { "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", + "is_secret": false, "is_verified": false, "line_number": 172, "type": "Base64 High Entropy String" }, { "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "is_secret": false, "is_verified": false, "line_number": 175, "type": "Base64 High Entropy String" }, { "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c", + "is_secret": false, "is_verified": false, "line_number": 175, "type": "Secret Keyword" @@ -647,344 +738,401 @@ "kube/services/datadog/values.yaml": [ { "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661", + "is_secret": false, "is_verified": false, - "line_number": 7, + "line_number": 15, "type": "Secret Keyword" } ], "kube/services/fenceshib/fenceshib-configmap.yaml": [ { "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c", + "is_secret": false, "is_verified": false, "line_number": 375, "type": "Base64 High Entropy String" }, { "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f", + "is_secret": false, "is_verified": false, "line_number": 376, "type": "Base64 High Entropy String" }, { "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b", + "is_secret": false, "is_verified": false, "line_number": 377, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5", + "is_secret": false, "is_verified": false, "line_number": 378, "type": "Base64 High Entropy String" }, { "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a", + "is_secret": false, "is_verified": false, "line_number": 379, "type": "Base64 High Entropy String" }, { "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9", + "is_secret": false, "is_verified": false, "line_number": 380, "type": "Base64 High Entropy String" }, { "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12", + "is_secret": false, "is_verified": false, "line_number": 381, "type": "Base64 High Entropy String" }, { "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7", + "is_secret": false, "is_verified": false, "line_number": 382, "type": "Base64 High Entropy String" }, { "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a", + "is_secret": false, "is_verified": false, "line_number": 383, "type": "Base64 High Entropy String" }, { "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3", + "is_secret": false, "is_verified": false, "line_number": 384, "type": "Base64 High Entropy String" }, { "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c", + "is_secret": false, "is_verified": false, "line_number": 385, "type": "Base64 High Entropy String" }, { "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52", + "is_secret": false, "is_verified": false, "line_number": 386, "type": "Base64 High Entropy String" }, { "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110", + "is_secret": false, "is_verified": false, "line_number": 387, "type": "Base64 High Entropy String" }, { "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7", + "is_secret": false, "is_verified": false, "line_number": 388, "type": "Base64 High Entropy String" }, { "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e", + "is_secret": false, "is_verified": false, "line_number": 389, "type": "Base64 High Entropy String" }, { "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1", + "is_secret": false, "is_verified": false, "line_number": 390, "type": "Base64 High Entropy String" }, { "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53", + "is_secret": false, "is_verified": false, "line_number": 391, "type": "Base64 High Entropy String" }, { "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a", + "is_secret": false, "is_verified": false, "line_number": 392, "type": "Base64 High Entropy String" }, { "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294", + "is_secret": false, "is_verified": false, "line_number": 393, "type": "Base64 High Entropy String" }, { "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41", + "is_secret": false, "is_verified": false, "line_number": 394, "type": "Base64 High Entropy String" }, { "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1", + "is_secret": false, "is_verified": false, "line_number": 395, "type": "Base64 High Entropy String" }, { "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb", + "is_secret": false, "is_verified": false, "line_number": 396, "type": "Base64 High Entropy String" }, { "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433", + "is_secret": false, "is_verified": false, "line_number": 397, "type": "Base64 High Entropy String" }, { "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a", + "is_secret": false, "is_verified": false, "line_number": 398, "type": "Base64 High Entropy String" }, { "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2", + "is_secret": false, "is_verified": false, "line_number": 399, "type": "Base64 High Entropy String" }, { "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165", + "is_secret": false, "is_verified": false, "line_number": 419, "type": "Base64 High Entropy String" }, { "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9", + "is_secret": false, "is_verified": false, "line_number": 420, "type": "Base64 High Entropy String" }, { "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f", + "is_secret": false, "is_verified": false, "line_number": 423, "type": "Base64 High Entropy String" }, { "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7", + "is_secret": false, "is_verified": false, "line_number": 424, "type": "Base64 High Entropy String" }, { "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577", + "is_secret": false, "is_verified": false, "line_number": 425, "type": "Base64 High Entropy String" }, { "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4", + "is_secret": false, "is_verified": false, "line_number": 426, "type": "Base64 High Entropy String" }, { "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92", + "is_secret": false, "is_verified": false, "line_number": 427, "type": "Base64 High Entropy String" }, { "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09", + "is_secret": false, "is_verified": false, "line_number": 428, "type": "Base64 High Entropy String" }, { "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084", + "is_secret": false, "is_verified": false, "line_number": 429, "type": "Base64 High Entropy String" }, { "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a", + "is_secret": false, "is_verified": false, "line_number": 430, "type": "Base64 High Entropy String" }, { "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5", + "is_secret": false, "is_verified": false, "line_number": 431, "type": "Base64 High Entropy String" }, { "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0", + "is_secret": false, "is_verified": false, "line_number": 432, "type": "Base64 High Entropy String" }, { "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3", + "is_secret": false, "is_verified": false, "line_number": 433, "type": "Base64 High Entropy String" }, { "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b", + "is_secret": false, "is_verified": false, "line_number": 434, "type": "Base64 High Entropy String" }, { "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708", + "is_secret": false, "is_verified": false, "line_number": 435, "type": "Base64 High Entropy String" }, { "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72", + "is_secret": false, "is_verified": false, "line_number": 436, "type": "Base64 High Entropy String" }, { "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5", + "is_secret": false, "is_verified": false, "line_number": 437, "type": "Base64 High Entropy String" }, { "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f", + "is_secret": false, "is_verified": false, "line_number": 439, "type": "Base64 High Entropy String" }, { "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529", + "is_secret": false, "is_verified": false, "line_number": 440, "type": "Base64 High Entropy String" }, { "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb", + "is_secret": false, "is_verified": false, "line_number": 441, "type": "Base64 High Entropy String" }, { "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a", + "is_secret": false, "is_verified": false, "line_number": 442, "type": "Base64 High Entropy String" }, { "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688", + "is_secret": false, "is_verified": false, "line_number": 443, "type": "Base64 High Entropy String" }, { "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58", + "is_secret": false, "is_verified": false, "line_number": 444, "type": "Base64 High Entropy String" }, { "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e", + "is_secret": false, "is_verified": false, "line_number": 445, "type": "Base64 High Entropy String" }, { "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46", + "is_secret": false, "is_verified": false, "line_number": 446, "type": "Base64 High Entropy String" }, { "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f", + "is_secret": false, "is_verified": false, "line_number": 448, "type": "Base64 High Entropy String" }, { "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829", + "is_secret": false, "is_verified": false, "line_number": 449, "type": "Base64 High Entropy String" }, { "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41", + "is_secret": false, "is_verified": false, "line_number": 450, "type": "Base64 High Entropy String" }, { "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b", + "is_secret": false, "is_verified": false, "line_number": 451, "type": "Base64 High Entropy String" }, { "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903", + "is_secret": false, "is_verified": false, "line_number": 452, "type": "Base64 High Entropy String" }, { "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02", + "is_secret": false, "is_verified": false, "line_number": 453, "type": "Base64 High Entropy String" @@ -993,6 +1141,7 @@ "kube/services/jobs/indexd-authz-job.yaml": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", + "is_secret": false, "is_verified": false, "line_number": 70, "type": "Basic Auth Credentials" @@ -1001,12 +1150,14 @@ "kube/services/monitoring/grafana-values.yaml": [ { "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385", + "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4", + "is_secret": false, "is_verified": false, "line_number": 166, "type": "Secret Keyword" @@ -1015,20 +1166,23 @@ "kube/services/ohdsi-atlas/README.md": [ { "hashed_secret": "6e71f9f2b1e96de5a712f899ed26477ebc260a73", + "is_secret": false, "is_verified": false, - "line_number": 87, + "line_number": 105, "type": "Secret Keyword" }, { "hashed_secret": "317b889ca9fa8789dc1b85714568b1bdf2c7baf3", + "is_secret": false, "is_verified": false, - "line_number": 90, + "line_number": 108, "type": "Secret Keyword" } ], "kube/services/revproxy/helpers.js": [ { "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", + "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" @@ -1037,800 +1191,1020 @@ "kube/services/revproxy/helpersTest.js": [ { "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", + "is_secret": false, "is_verified": false, "line_number": 22, "type": "JSON Web Token" } ], + "kube/services/superset/superset-deploy.yaml": [ + { + "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", + "is_secret": false, + "is_verified": false, + "line_number": 38, + "type": "Secret Keyword" + } + ], + "kube/services/superset/superset-redis.yaml": [ + { + "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", + "is_secret": false, + "is_verified": false, + "line_number": 165, + "type": "Secret Keyword" + }, + { + "hashed_secret": "244f421f896bdcdd2784dccf4eaf7c8dfd5189b5", + "is_secret": false, + "is_verified": false, + "line_number": 260, + "type": "Secret Keyword" + } + ], + "kube/services/superset/superset/superset-deploy.yaml": [ + { + "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", + "is_secret": false, + "is_verified": false, + "line_number": 38, + "type": "Secret Keyword" + } + ], + "kube/services/superset/superset/superset-redis.yaml": [ + { + "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", + "is_secret": false, + "is_verified": false, + "line_number": 169, + "type": "Secret Keyword" + }, + { + "hashed_secret": "244f421f896bdcdd2784dccf4eaf7c8dfd5189b5", + "is_secret": false, + "is_verified": false, + "line_number": 266, + "type": "Secret Keyword" + } + ], + "kube/services/superset/values.yaml": [ + { + "hashed_secret": "6f803b24314c39062efe38d0c1da8c472f47eab3", + "is_secret": false, + "is_verified": false, + "line_number": 54, + "type": "Secret Keyword" + }, + { + "hashed_secret": "6eae3a5b062c6d0d79f070c26e6d62486b40cb46", + "is_secret": false, + "is_verified": false, + "line_number": 86, + "type": "Secret Keyword" + }, + { + "hashed_secret": "3eb416223e9e69e6bb8ee19793911ad1ad2027d8", + "is_secret": false, + "is_verified": false, + "line_number": 212, + "type": "Secret Keyword" + }, + { + "hashed_secret": "ff55435345834a3fe224936776c2aa15f6ed5358", + "is_secret": false, + "is_verified": false, + "line_number": 396, + "type": "Secret Keyword" + }, + { + "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", + "is_secret": false, + "is_verified": false, + "line_number": 503, + "type": "Secret Keyword" + } + ], "package-lock.json": [ { "hashed_secret": "c95b6bc99445e7ed9177040f5ef94d0cdb38fb21", + "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "a896da46c897d3a0d007843006621f78dbcabf51", + "is_secret": false, "is_verified": false, "line_number": 19, "type": "Base64 High Entropy String" }, { "hashed_secret": "84b662fc9a2a275f90d0afafe6ce08a4d0928ac8", + "is_secret": false, "is_verified": false, "line_number": 28, "type": "Base64 High Entropy String" }, { "hashed_secret": "6ebe9724873357aaea25e329efb726fa61b843e7", + "is_secret": false, "is_verified": false, "line_number": 39, "type": "Base64 High Entropy String" }, { "hashed_secret": "f1dbba169db046906924ccd784068a2306096634", + "is_secret": false, "is_verified": false, "line_number": 44, "type": "Base64 High Entropy String" }, { "hashed_secret": "2c7bd6cdc39b5b8a0f32aa11988a0ec769526cdb", + "is_secret": false, "is_verified": false, "line_number": 52, "type": "Base64 High Entropy String" }, { "hashed_secret": "1addd61f68d977408128e530959437821a6d8b66", + "is_secret": false, "is_verified": false, "line_number": 57, "type": "Base64 High Entropy String" }, { "hashed_secret": "9787d966f19a0d8d0021b31d34cfdfcebdb9c28a", + "is_secret": false, "is_verified": false, "line_number": 65, "type": "Base64 High Entropy String" }, { "hashed_secret": "76693e518c3c8123e9a197821b506292322a0a95", + "is_secret": false, "is_verified": false, "line_number": 70, "type": "Base64 High Entropy String" }, { "hashed_secret": "fa83dcbf0f435ee38066d19a2a43815510f96bc4", + "is_secret": false, "is_verified": false, "line_number": 86, "type": "Base64 High Entropy String" }, { "hashed_secret": "017a7eab3d63331ecfe768927c8907a5a31888e5", + "is_secret": false, "is_verified": false, "line_number": 91, "type": "Base64 High Entropy String" }, { "hashed_secret": "92b56edda4f2906f548fe77c015490e6ba2ee4c3", + "is_secret": false, "is_verified": false, "line_number": 96, "type": "Base64 High Entropy String" }, { "hashed_secret": "936b0959aa13f1decc76be1d80acaac0860847b7", + "is_secret": false, "is_verified": false, "line_number": 101, "type": "Base64 High Entropy String" }, { "hashed_secret": "4bad86c43b7cd06efc130272d8e4de2b32636371", + "is_secret": false, "is_verified": false, "line_number": 109, "type": "Base64 High Entropy String" }, { "hashed_secret": "d11716ecfa623706b733654d78f4e7af3c117efa", + "is_secret": false, "is_verified": false, "line_number": 143, "type": "Base64 High Entropy String" }, { "hashed_secret": "0cc93dfdf4ae08bc374b99af985b25d2427f71d8", + "is_secret": false, "is_verified": false, "line_number": 148, "type": "Base64 High Entropy String" }, { "hashed_secret": "80f8d53f3fedde239f695d6a4c44c78b4aff0a44", + "is_secret": false, "is_verified": false, "line_number": 153, "type": "Base64 High Entropy String" }, { "hashed_secret": "83307cb75a4a44ba528f4a0aefcec2a8018dc6d8", + "is_secret": false, "is_verified": false, "line_number": 158, "type": "Base64 High Entropy String" }, { "hashed_secret": "c96d81662cc7919208154e7152fa0033391b7bcd", + "is_secret": false, "is_verified": false, "line_number": 166, "type": "Base64 High Entropy String" }, { "hashed_secret": "7156492f40fb2479a45780b3d2959c29b27b6374", + "is_secret": false, "is_verified": false, "line_number": 181, "type": "Base64 High Entropy String" }, { "hashed_secret": "885304335818f51938422166d361cddacfd626d0", + "is_secret": false, "is_verified": false, "line_number": 186, "type": "Base64 High Entropy String" }, { "hashed_secret": "915ca894a8ec19ffcd55555e6c8daac1fe882751", + "is_secret": false, "is_verified": false, "line_number": 191, "type": "Base64 High Entropy String" }, { "hashed_secret": "7ea379a1bf787a21401c8c39f285e4e84b478d72", + "is_secret": false, "is_verified": false, "line_number": 201, "type": "Base64 High Entropy String" }, { "hashed_secret": "8e948a3b773d1a2e4b6f4220216efa734315246d", + "is_secret": false, "is_verified": false, "line_number": 209, "type": "Base64 High Entropy String" }, { "hashed_secret": "1a321d0b0d9b6d75888ce7ae121ac222cec1eddd", + "is_secret": false, "is_verified": false, "line_number": 217, "type": "Base64 High Entropy String" }, { "hashed_secret": "1a6bfe25744ad6c6ce27c3a52dbd98c15be12a5c", + "is_secret": false, "is_verified": false, "line_number": 222, "type": "Base64 High Entropy String" }, { "hashed_secret": "04450eaacfa844f84926d04d6a07534cde99b28e", + "is_secret": false, "is_verified": false, "line_number": 227, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4c295435d09bbdfb91ced9040379166d67ccbd2", + "is_secret": false, "is_verified": false, "line_number": 232, "type": "Base64 High Entropy String" }, { "hashed_secret": "bb2bf296d6e086b471d45a26af9fd57f55289a75", + "is_secret": false, "is_verified": false, "line_number": 237, "type": "Base64 High Entropy String" }, { "hashed_secret": "9579b6a23d94d56f2f163233b716d8752e6b3bde", + "is_secret": false, "is_verified": false, "line_number": 256, "type": "Base64 High Entropy String" }, { "hashed_secret": "796925256bc0f4dc43cdfab7fbff852eace18f42", + "is_secret": false, "is_verified": false, "line_number": 287, "type": "Base64 High Entropy String" }, { "hashed_secret": "7e280af4ec2d573144d98e89ed2e1dfd817ca48f", + "is_secret": false, "is_verified": false, "line_number": 295, "type": "Base64 High Entropy String" }, { "hashed_secret": "941b3e7836a6f26d32311893ac5d9ad0a52c45ca", + "is_secret": false, "is_verified": false, "line_number": 300, "type": "Base64 High Entropy String" }, { "hashed_secret": "34743e1f7d9541c4a726b998f20baf828c694213", + "is_secret": false, "is_verified": false, "line_number": 305, "type": "Base64 High Entropy String" }, { "hashed_secret": "c4fea87bd49c4427d7215d57ada9ff3177e0c471", + "is_secret": false, "is_verified": false, "line_number": 310, "type": "Base64 High Entropy String" }, { "hashed_secret": "85324324e21d0dfbfb5248ac92fa0f289d2e25f8", + "is_secret": false, "is_verified": false, "line_number": 315, "type": "Base64 High Entropy String" }, { "hashed_secret": "19eea0e64f6a3311b04e472035df10c23f23dd0a", + "is_secret": false, "is_verified": false, "line_number": 352, "type": "Base64 High Entropy String" }, { "hashed_secret": "acce4ef8d841ffa646256da3af7b79ad5cb78158", + "is_secret": false, "is_verified": false, "line_number": 364, "type": "Base64 High Entropy String" }, { "hashed_secret": "22e7ae9b65ade417baac61e6f0d84a54783ba759", + "is_secret": false, "is_verified": false, "line_number": 369, "type": "Base64 High Entropy String" }, { "hashed_secret": "8e71b7828c7c554f05dbbabddd63301b5fc56771", + "is_secret": false, "is_verified": false, "line_number": 374, "type": "Base64 High Entropy String" }, { "hashed_secret": "fea0d9c5b0c53c41e6a0a961a49cccc170847120", + "is_secret": false, "is_verified": false, "line_number": 379, "type": "Base64 High Entropy String" }, { "hashed_secret": "ebe2160ede628e0faeac9fe70c215cd38d28d8f6", + "is_secret": false, "is_verified": false, "line_number": 384, "type": "Base64 High Entropy String" }, { "hashed_secret": "9cb2b0347722893cde39bbe83f9df7c3c6e1b7c3", + "is_secret": false, "is_verified": false, "line_number": 398, "type": "Base64 High Entropy String" }, { "hashed_secret": "344e37e02a35dd31cc7dc945b7fe7b2da88344c0", + "is_secret": false, "is_verified": false, "line_number": 403, "type": "Base64 High Entropy String" }, { "hashed_secret": "31a41817127c8d2b7b304c326b05d7319934e7a6", + "is_secret": false, "is_verified": false, "line_number": 413, "type": "Base64 High Entropy String" }, { "hashed_secret": "150852e9f1e877547306d59618a136fb535b40e3", + "is_secret": false, "is_verified": false, "line_number": 418, "type": "Base64 High Entropy String" }, { "hashed_secret": "277e32c5ba00ef90c6f76c7004fde2ecac6d2e18", + "is_secret": false, "is_verified": false, "line_number": 423, "type": "Base64 High Entropy String" }, { "hashed_secret": "b95e69c7f4328ea641952f875c3b079a1585c9d1", + "is_secret": false, "is_verified": false, "line_number": 431, "type": "Base64 High Entropy String" }, { "hashed_secret": "6b30fe731c8444c0263b57aacbdaedb771ec01a5", + "is_secret": false, "is_verified": false, "line_number": 436, "type": "Base64 High Entropy String" }, { "hashed_secret": "98eafa06e0c7e089c19e79dedf5989c3eb2f0568", + "is_secret": false, "is_verified": false, "line_number": 445, "type": "Base64 High Entropy String" }, { "hashed_secret": "bf47364c2d4ad0308ef016fe4a89f6c7dc21ef86", + "is_secret": false, "is_verified": false, "line_number": 464, "type": "Base64 High Entropy String" }, { "hashed_secret": "3e6c18abd5b90c63da0bd8b4c0d3a142e3d5a83d", + "is_secret": false, "is_verified": false, "line_number": 474, "type": "Base64 High Entropy String" }, { "hashed_secret": "209bf9cfe9000c6851cd4f94165d30ee1cd3dca1", + "is_secret": false, "is_verified": false, "line_number": 482, "type": "Base64 High Entropy String" }, { "hashed_secret": "cf09cb791688fe019284bfdc362abc41918645a5", + "is_secret": false, "is_verified": false, "line_number": 487, "type": "Base64 High Entropy String" }, { "hashed_secret": "6c1392daf02b9ba2a21c49c82508048525d5bc4b", + "is_secret": false, "is_verified": false, "line_number": 492, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4e2bf4f3a071b223da2f270d5a2348d65105d3e", + "is_secret": false, "is_verified": false, "line_number": 497, "type": "Base64 High Entropy String" }, { "hashed_secret": "98d583792218c3c06ecbcac66e5bedcdaabd63e7", + "is_secret": false, "is_verified": false, "line_number": 507, "type": "Base64 High Entropy String" }, { "hashed_secret": "575c9b4e0765ae6ab9a4f38eb1186ea361691f73", + "is_secret": false, "is_verified": false, "line_number": 514, "type": "Base64 High Entropy String" }, { "hashed_secret": "16225dde2ec301d038a0bdbda68de4a174fbfdd0", + "is_secret": false, "is_verified": false, "line_number": 519, "type": "Base64 High Entropy String" }, { "hashed_secret": "80d73b6f7e87f07e3ae70ef1e692aa9569574551", + "is_secret": false, "is_verified": false, "line_number": 524, "type": "Base64 High Entropy String" }, { "hashed_secret": "38952752ebde485c02a80bff1d81ebe95664bcca", + "is_secret": false, "is_verified": false, "line_number": 529, "type": "Base64 High Entropy String" }, { "hashed_secret": "150b60d278251f2470dd690016afe038bc1bb7f1", + "is_secret": false, "is_verified": false, "line_number": 534, "type": "Base64 High Entropy String" }, { "hashed_secret": "535582d92da3a4158e592ec29868bfd8467b8bce", + "is_secret": false, "is_verified": false, "line_number": 539, "type": "Base64 High Entropy String" }, { "hashed_secret": "23b096d9b48ed5d9a778d3db5807c5c7a2357c93", + "is_secret": false, "is_verified": false, "line_number": 544, "type": "Base64 High Entropy String" }, { "hashed_secret": "127f92724797904fb4e6de2dfff2c71c07739612", + "is_secret": false, "is_verified": false, "line_number": 549, "type": "Base64 High Entropy String" }, { "hashed_secret": "f74b21c2fc87ad48118b3723372ecfe25aaae730", + "is_secret": false, "is_verified": false, "line_number": 559, "type": "Base64 High Entropy String" }, { "hashed_secret": "bc788b9febb8e95114c2e78a9d5297f80bbedb2c", + "is_secret": false, "is_verified": false, "line_number": 564, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", + "is_secret": false, "is_verified": false, "line_number": 575, "type": "Base64 High Entropy String" }, { "hashed_secret": "36a64bd1be32f031420a87c448636720426e0072", + "is_secret": false, "is_verified": false, "line_number": 580, "type": "Base64 High Entropy String" }, { "hashed_secret": "06a3dc8802aa9b4f2f48ad081cbe64482ce9f491", + "is_secret": false, "is_verified": false, "line_number": 585, "type": "Base64 High Entropy String" }, { "hashed_secret": "6c8453f18e4aa0280d847454c9a803c12e2d14d7", + "is_secret": false, "is_verified": false, "line_number": 590, "type": "Base64 High Entropy String" }, { "hashed_secret": "3df46004e168f8d8e3422adfbf0b7c237a41f437", + "is_secret": false, "is_verified": false, "line_number": 595, "type": "Base64 High Entropy String" }, { "hashed_secret": "5c270f653b2fcd5b7c700b53f8543df4147a4aba", + "is_secret": false, "is_verified": false, "line_number": 600, "type": "Base64 High Entropy String" }, { "hashed_secret": "98a159a135963e5e65a546879c332b2c3942aec3", + "is_secret": false, "is_verified": false, "line_number": 605, "type": "Base64 High Entropy String" }, { "hashed_secret": "58d846ede841bbec0d67a42d03426806635fee2f", + "is_secret": false, "is_verified": false, "line_number": 610, "type": "Base64 High Entropy String" }, { "hashed_secret": "23e42656fba130d56c20abddb94b6b7bfcad69a8", + "is_secret": false, "is_verified": false, "line_number": 618, "type": "Base64 High Entropy String" }, { "hashed_secret": "f883f0bd87d8455814f491e2067bd3f62454c7c2", + "is_secret": false, "is_verified": false, "line_number": 623, "type": "Base64 High Entropy String" }, { "hashed_secret": "8ece0f01da9189bae69a60da116040400bbc10e5", + "is_secret": false, "is_verified": false, "line_number": 628, "type": "Base64 High Entropy String" }, { "hashed_secret": "75a3c0b9934bd460ff7af9763edb25d749ab7b4e", + "is_secret": false, "is_verified": false, "line_number": 633, "type": "Base64 High Entropy String" }, { "hashed_secret": "baac57cb314beab87420d1da6906a1d2377c7d73", + "is_secret": false, "is_verified": false, "line_number": 638, "type": "Base64 High Entropy String" }, { "hashed_secret": "d0a953de593a0a7b26b925a6476d8382cd31cb0e", + "is_secret": false, "is_verified": false, "line_number": 654, "type": "Base64 High Entropy String" }, { "hashed_secret": "8b15238d25347ab18f4cbbe191de9aed597c8ea4", + "is_secret": false, "is_verified": false, "line_number": 659, "type": "Base64 High Entropy String" }, { "hashed_secret": "1e2ab7a2fd9b6afcbe08afcb9dc652b76cf367d8", + "is_secret": false, "is_verified": false, "line_number": 668, "type": "Base64 High Entropy String" }, { "hashed_secret": "ae745d719f97b3ddb9791348b1f29ff8208c0c5c", + "is_secret": false, "is_verified": false, "line_number": 676, "type": "Base64 High Entropy String" }, { "hashed_secret": "b72a53c8bebd6540eeffeba5b0c28965bbb2a664", + "is_secret": false, "is_verified": false, "line_number": 681, "type": "Base64 High Entropy String" }, { "hashed_secret": "97cbb7fbdfe498c80489e26bcdc78fce5db9b258", + "is_secret": false, "is_verified": false, "line_number": 686, "type": "Base64 High Entropy String" }, { "hashed_secret": "bc98c415b1c6ee93adf8e97a4a536b6342337c19", + "is_secret": false, "is_verified": false, "line_number": 691, "type": "Base64 High Entropy String" }, { "hashed_secret": "5a6baaacb03a030567b857cb8cfe440407e6385e", + "is_secret": false, "is_verified": false, "line_number": 696, "type": "Base64 High Entropy String" }, { "hashed_secret": "e55a8322e5c7485be2f721155d9ed15afc586a4c", + "is_secret": false, "is_verified": false, "line_number": 705, "type": "Base64 High Entropy String" }, { "hashed_secret": "47709a15a1b02a87f65dfcd5f3e78e0d2206c95f", + "is_secret": false, "is_verified": false, "line_number": 710, "type": "Base64 High Entropy String" }, { "hashed_secret": "5782d0f39536b22f2c6aa29d3b815a57f43e4800", + "is_secret": false, "is_verified": false, "line_number": 719, "type": "Base64 High Entropy String" }, { "hashed_secret": "401f90e6afa890c5ee44071351e4a149e7c1f5e0", + "is_secret": false, "is_verified": false, "line_number": 724, "type": "Base64 High Entropy String" }, { "hashed_secret": "51f38b23af543da8b637a3bd62f5fb2c460e3b3d", + "is_secret": false, "is_verified": false, "line_number": 729, "type": "Base64 High Entropy String" }, { "hashed_secret": "8287678ab8009ae16b02930c9e260d1f28578fbe", + "is_secret": false, "is_verified": false, "line_number": 734, "type": "Base64 High Entropy String" }, { "hashed_secret": "d4c050e6914eb68a5c657fb8bb09f6ac5eae1e86", + "is_secret": false, "is_verified": false, "line_number": 739, "type": "Base64 High Entropy String" }, { "hashed_secret": "922ac7db4914c20910496a41c474631928d6c2f2", + "is_secret": false, "is_verified": false, "line_number": 750, "type": "Base64 High Entropy String" }, { "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", + "is_secret": false, "is_verified": false, "line_number": 771, "type": "Base64 High Entropy String" }, { "hashed_secret": "d7966031d8525b080d7234049cbb040ac9a3f908", + "is_secret": false, "is_verified": false, "line_number": 798, "type": "Base64 High Entropy String" }, { "hashed_secret": "ff3d359d573f78d89424e03ec8688eee19305f9f", + "is_secret": false, "is_verified": false, "line_number": 808, "type": "Base64 High Entropy String" }, { "hashed_secret": "949b4ff40f26797f9290fe46eaa8691caef5c5ab", + "is_secret": false, "is_verified": false, "line_number": 817, "type": "Base64 High Entropy String" }, { "hashed_secret": "ce4ea19f66e9140bdb497b19c6ae94c32ee565f0", + "is_secret": false, "is_verified": false, "line_number": 825, "type": "Base64 High Entropy String" }, { "hashed_secret": "f6368525e9e22577efc8d8b737794e845958ba92", + "is_secret": false, "is_verified": false, "line_number": 834, "type": "Base64 High Entropy String" }, { "hashed_secret": "1508bbaf29927b5348d4df62823dab122a0d3b48", + "is_secret": false, "is_verified": false, "line_number": 839, "type": "Base64 High Entropy String" }, { "hashed_secret": "12917e7235ce486ca51a296b896afa5e3b4fda54", + "is_secret": false, "is_verified": false, "line_number": 844, "type": "Base64 High Entropy String" }, { "hashed_secret": "49e05eb75fd04d8f44cf235d4e8eddc30a2b93e5", + "is_secret": false, "is_verified": false, "line_number": 849, "type": "Base64 High Entropy String" }, { "hashed_secret": "aa8ea120ddc5aaa27cb02e0b04ac1c53b249a724", + "is_secret": false, "is_verified": false, "line_number": 869, "type": "Base64 High Entropy String" }, { "hashed_secret": "b3e00452fd69737cc747d0661fa3b3949a4a0805", + "is_secret": false, "is_verified": false, "line_number": 876, "type": "Base64 High Entropy String" }, { "hashed_secret": "af2ceb518ddc689b0e2a03ffebb64d4499817c17", + "is_secret": false, "is_verified": false, "line_number": 887, "type": "Base64 High Entropy String" }, { "hashed_secret": "7da94b235f996b5c65b66c3e70b5eeaf97bab5d4", + "is_secret": false, "is_verified": false, "line_number": 892, "type": "Base64 High Entropy String" }, { "hashed_secret": "f8363d7113ba35fd06b33afe20c8ad21a3202197", + "is_secret": false, "is_verified": false, "line_number": 900, "type": "Base64 High Entropy String" }, { "hashed_secret": "6902b24068ea12c3a3e31596614aa6fa0fba3c39", + "is_secret": false, "is_verified": false, "line_number": 908, "type": "Base64 High Entropy String" }, { "hashed_secret": "2c732c0a0dccfc1588888172188ce9a1abb7166e", + "is_secret": false, "is_verified": false, "line_number": 916, "type": "Base64 High Entropy String" }, { "hashed_secret": "c59aac9ab2704f627d29c762e716ba84b15be3f1", + "is_secret": false, "is_verified": false, "line_number": 921, "type": "Base64 High Entropy String" }, { "hashed_secret": "20249a3c96028e5ad19143d86ec5d2ee233935ed", + "is_secret": false, "is_verified": false, "line_number": 937, "type": "Base64 High Entropy String" }, { "hashed_secret": "2a57a9814486d6f83257ec94e65d1024819611b8", + "is_secret": false, "is_verified": false, "line_number": 942, "type": "Base64 High Entropy String" }, { "hashed_secret": "d5e822897b1f37e6ce1a864e2ba9af8f9bfc5539", + "is_secret": false, "is_verified": false, "line_number": 950, "type": "Base64 High Entropy String" }, { "hashed_secret": "dbee1beb29275ad50ef0a68067ca144985beca2c", + "is_secret": false, "is_verified": false, "line_number": 957, "type": "Base64 High Entropy String" }, { "hashed_secret": "b0cb4b5554183f2c7bc1ca25d902db5769798a7a", + "is_secret": false, "is_verified": false, "line_number": 962, "type": "Base64 High Entropy String" }, { "hashed_secret": "29f79b77802802c5ae2d3c2acb9179280de37914", + "is_secret": false, "is_verified": false, "line_number": 967, "type": "Base64 High Entropy String" }, { "hashed_secret": "18469023a89dd192b5275d8b955c9fd2202e0c03", + "is_secret": false, "is_verified": false, "line_number": 983, "type": "Base64 High Entropy String" }, { "hashed_secret": "0d3ce7468071b4e48ba9cd014ade7037dc57ef41", + "is_secret": false, "is_verified": false, "line_number": 991, "type": "Base64 High Entropy String" }, { "hashed_secret": "955d2d24c472b4eb0b4488f935a0f65e38001df8", + "is_secret": false, "is_verified": false, "line_number": 996, "type": "Base64 High Entropy String" }, { "hashed_secret": "42e05c82cd06a9ed1d15e0f472c2efc4b3254cae", + "is_secret": false, "is_verified": false, "line_number": 1010, "type": "Base64 High Entropy String" }, { "hashed_secret": "7a87fb248397359e9c6ca6e46f39805789059102", + "is_secret": false, "is_verified": false, "line_number": 1018, "type": "Base64 High Entropy String" }, { "hashed_secret": "7fbf450bf4ee54f013454f70af3a9743c0909f54", + "is_secret": false, "is_verified": false, "line_number": 1034, "type": "Base64 High Entropy String" }, { "hashed_secret": "df8e0babfad52a541f6e470cf3a143402c2c2a1e", + "is_secret": false, "is_verified": false, "line_number": 1039, "type": "Base64 High Entropy String" }, { "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", + "is_secret": false, "is_verified": false, "line_number": 1044, "type": "Base64 High Entropy String" }, { "hashed_secret": "9e897caf5658aea914e1034f46663cadb5a76348", + "is_secret": false, "is_verified": false, "line_number": 1054, "type": "Base64 High Entropy String" }, { "hashed_secret": "3aec99f39b829f94874ccd0a0d90315c6690cb94", + "is_secret": false, "is_verified": false, "line_number": 1064, "type": "Base64 High Entropy String" }, { "hashed_secret": "eca5fc6e4f5f895143d3fcedefc42dfe6e79f918", + "is_secret": false, "is_verified": false, "line_number": 1069, "type": "Base64 High Entropy String" }, { "hashed_secret": "307a947aa422c67fdefb07178198a004fb2c0d94", + "is_secret": false, "is_verified": false, "line_number": 1074, "type": "Base64 High Entropy String" }, { "hashed_secret": "0ba2fc9a137313ae1fdda2b5476dedf0595bda3a", + "is_secret": false, "is_verified": false, "line_number": 1083, "type": "Base64 High Entropy String" @@ -1839,6 +2213,7 @@ "tf_files/aws/cognito/README.md": [ { "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70", + "is_secret": false, "is_verified": false, "line_number": 106, "type": "Secret Keyword" @@ -1847,12 +2222,14 @@ "tf_files/aws/commons/README.md": [ { "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed", + "is_secret": false, "is_verified": false, "line_number": 60, "type": "Secret Keyword" }, { "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e", + "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -1861,6 +2238,7 @@ "tf_files/aws/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", + "is_secret": false, "is_verified": false, "line_number": 135, "type": "Hex High Entropy String" @@ -1869,12 +2247,14 @@ "tf_files/aws/modules/common-logging/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", + "is_secret": false, "is_verified": false, "line_number": 57, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", + "is_secret": false, "is_verified": false, "line_number": 59, "type": "Hex High Entropy String" @@ -1883,24 +2263,28 @@ "tf_files/aws/modules/common-logging/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", + "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -1909,18 +2293,21 @@ "tf_files/aws/modules/common-logging/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" @@ -1929,6 +2316,7 @@ "tf_files/aws/modules/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", + "is_secret": false, "is_verified": false, "line_number": 113, "type": "Hex High Entropy String" @@ -1937,12 +2325,14 @@ "tf_files/aws/modules/management-logs/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", + "is_secret": false, "is_verified": false, "line_number": 54, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", + "is_secret": false, "is_verified": false, "line_number": 56, "type": "Hex High Entropy String" @@ -1951,24 +2341,28 @@ "tf_files/aws/modules/management-logs/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", + "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -1977,36 +2371,42 @@ "tf_files/aws/modules/management-logs/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Base64 High Entropy String" }, { "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" }, { "hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" @@ -2015,6 +2415,7 @@ "tf_files/aws/rds/sample.tfvars": [ { "hashed_secret": "76c3c4836dee37d8d0642949f84092a9a24bbf46", + "is_secret": false, "is_verified": false, "line_number": 7, "type": "Secret Keyword" @@ -2023,6 +2424,7 @@ "tf_files/aws/slurm/README.md": [ { "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", + "is_secret": false, "is_verified": false, "line_number": 83, "type": "Secret Keyword" @@ -2031,6 +2433,7 @@ "tf_files/azure/cloud.tf": [ { "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4", + "is_secret": false, "is_verified": false, "line_number": 424, "type": "Secret Keyword" @@ -2039,6 +2442,7 @@ "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", + "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -2047,6 +2451,7 @@ "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", + "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -2055,6 +2460,7 @@ "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", + "is_secret": false, "is_verified": false, "line_number": 262, "type": "Secret Keyword" @@ -2063,18 +2469,21 @@ "tf_files/gcp/commons/sample.tfvars": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 11, "type": "Secret Keyword" }, { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105", + "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" @@ -2083,6 +2492,7 @@ "tf_files/shared/modules/k8s_configs/creds.tpl": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 8, "type": "Secret Keyword" diff --git a/gen3/bin/kube-setup-superset.sh b/gen3/bin/kube-setup-superset.sh new file mode 100644 index 000000000..0f1219695 --- /dev/null +++ b/gen3/bin/kube-setup-superset.sh @@ -0,0 +1,135 @@ +#!/bin/bash +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +# lib --------------------- + +new_client() { + local hostname=$(gen3 api hostname) + superset_hostname="superset.${hostname}" + gen3_log_info "kube-setup-superset" "creating fence oidc client for $superset_hostname" + local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client superset --urls https://${superset_hostname}/oauth-authorized/fence --username superset | tail -1) + # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + # try delete client + g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client superset > /dev/null 2>&1 + secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client superset --urls https://${superset_hostname}/oauth-authorized/fence --username superset | tail -1) + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + gen3_log_err "kube-setup-superset" "Failed generating oidc client for superset: $secrets" + return 1 + fi + fi + local FENCE_CLIENT_ID="${BASH_REMATCH[2]}" + local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}" + gen3_log_info "create superset-secret" + mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/superset" + + cat - < /dev/null 2>&1; then + local credsPath="$(gen3_secrets_folder)/g3auto/superset/appcreds.json" + if [ -f "$credsPath" ]; then + gen3 secrets sync + return 0 + fi + mkdir -p "$(dirname "$credsPath")" + if ! new_client > "$credsPath"; then + gen3_log_err "Failed to setup superset fence client" + rm "$credsPath" || true + return 1 + fi + gen3 secrets sync + fi + + if ! g3kubectl describe secret superset-g3auto | grep dbcreds.json > /dev/null 2>&1; then + gen3_log_info "create database" + if ! gen3 db setup superset; then + gen3_log_err "Failed setting up database for superset service" + return 1 + fi + gen3 secrets sync + fi +} + + +setup_secrets() { + # superset_secret.yaml populate and apply. + gen3_log_info "Deploying secrets for superset" + # subshell + + ( + if ! dbcreds="$(gen3 db creds superset)"; then + gen3_log_err "unable to find db creds for superset service" + return 1 + fi + + if ! appcreds="$(gen3 secrets decode superset-g3auto appcreds.json)"; then + gen3_log_err "unable to find app creds for superset service" + return 1 + fi + + local hostname=$(gen3 api hostname) + export DB_NAME=$(jq -r ".db_database" <<< "$dbcreds") + export DB_USER=$(jq -r ".db_username" <<< "$dbcreds") + export DB_PASS=$(jq -r ".db_password" <<< "$dbcreds") + export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds") + + export FENCE_URL="https://${hostname}/user/user" + export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration" + export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds" ) + export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds" ) + if secret_key="$(gen3 secrets decode superset-env SECRET_KEY)"; then + export SECRET_KEY="$secret_key" + else + export SECRET_KEY=$(random_alphanumeric 32) + fi + envsubst <"${GEN3_HOME}/kube/services/superset/superset-secrets-template.yaml" | g3kubectl apply -f - + ) +} + +setup_ingress() { + local hostname=$(gen3 api hostname) + certs=$(aws acm list-certificates --certificate-statuses ISSUED | jq --arg hostname $hostname -c '.CertificateSummaryList[] | select(.DomainName | contains("*."+$hostname))') + if [ "$certs" = "" ]; then + gen3_log_info "no certs found for *.${hostname}. exiting" + exit 22 + fi + gen3_log_info "Found ACM certificate for *.$hostname" + export ARN=$(jq -r .CertificateArn <<< $certs) + export superset_hostname="superset.${hostname}" + envsubst <${GEN3_HOME}/kube/services/superset/superset-ingress.yaml | g3kubectl apply -f - +} + +setup_redis() { + g3kubectl apply -f "${GEN3_HOME}/kube/services/superset/superset-redis.yaml" +} + +# main -------------------------------------- +# deploy superset +if [[ $# -gt 0 && "$1" == "new-client" ]]; then + new_client + exit $? +elif [[ $# -gt 0 && "$1" == "ingress" ]]; then + setup_ingress + exit $? +fi + +setup_redis +setup_creds + +setup_secrets +setup_ingress + +g3kubectl apply -f "${GEN3_HOME}/kube/services/superset/superset-deploy.yaml" + +gen3_log_info "The superset service has been deployed onto the k8s cluster." diff --git a/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml b/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml index 3396532b6..3f6eb5603 100644 --- a/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml @@ -13,6 +13,9 @@ spec: - ambassador-gen3 - auspice - ohdsi-atlas + - superset + - superset-worker + - superset-redis-master ingress: - from: - ipBlock: @@ -24,6 +27,7 @@ spec: - port: 81 - port: 82 - port: 443 + - port: 8088 egress: - to: - namespaceSelector: diff --git a/kube/services/netpolicy/gen3/services/superset-redis_netpolicy.yaml b/kube/services/netpolicy/gen3/services/superset-redis_netpolicy.yaml new file mode 100644 index 000000000..1b3ea7357 --- /dev/null +++ b/kube/services/netpolicy/gen3/services/superset-redis_netpolicy.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: netpolicy-egress-to-superset-redis-master +spec: + egress: + - to: + - podSelector: + matchLabels: + app: superset-redis-master + + podSelector: + matchExpressions: + - key: app + operator: In + values: + - superset + - superset-worker + policyTypes: + - Egress + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: netpolicy-ingress-to-superset-redis-master +spec: + ingress: + - from: + - podSelector: + matchExpressions: + - key: app + operator: In + values: + - superset + - superset-worker + podSelector: + matchLabels: + app: superset-redis-master + policyTypes: + - Ingress diff --git a/kube/services/netpolicy/gen3/services/superset_netpolicy.yaml b/kube/services/netpolicy/gen3/services/superset_netpolicy.yaml new file mode 100644 index 000000000..1b3ea7357 --- /dev/null +++ b/kube/services/netpolicy/gen3/services/superset_netpolicy.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: netpolicy-egress-to-superset-redis-master +spec: + egress: + - to: + - podSelector: + matchLabels: + app: superset-redis-master + + podSelector: + matchExpressions: + - key: app + operator: In + values: + - superset + - superset-worker + policyTypes: + - Egress + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: netpolicy-ingress-to-superset-redis-master +spec: + ingress: + - from: + - podSelector: + matchExpressions: + - key: app + operator: In + values: + - superset + - superset-worker + podSelector: + matchLabels: + app: superset-redis-master + policyTypes: + - Ingress diff --git a/kube/services/superset/superset-deploy.yaml b/kube/services/superset/superset-deploy.yaml new file mode 100644 index 000000000..222c9a660 --- /dev/null +++ b/kube/services/superset/superset-deploy.yaml @@ -0,0 +1,429 @@ +--- +# Source: superset/templates/secret-superset-config.yaml +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: v1 +kind: Secret +metadata: + name: superset-config + labels: + app: superset + chart: superset-0.6.1 + release: "superset" + heritage: "Helm" +type: Opaque +stringData: + superset_config.py: | + + import os + from cachelib.redis import RedisCache + + def env(key, default=None): + return os.getenv(key, default) + + MAPBOX_API_KEY = env('MAPBOX_API_KEY', '') + CACHE_CONFIG = { + 'CACHE_TYPE': 'redis', + 'CACHE_DEFAULT_TIMEOUT': 300, + 'CACHE_KEY_PREFIX': 'superset_', + 'CACHE_REDIS_HOST': env('REDIS_HOST'), + 'CACHE_REDIS_PORT': env('REDIS_PORT'), + 'CACHE_REDIS_PASSWORD': env('REDIS_PASSWORD'), + 'CACHE_REDIS_DB': env('REDIS_DB', 1), + } + DATA_CACHE_CONFIG = CACHE_CONFIG + + SQLALCHEMY_DATABASE_URI = f"postgresql+psycopg2://{env('DB_USER')}:{env('DB_PASS')}@{env('DB_HOST')}:{env('DB_PORT')}/{env('DB_NAME')}" + SQLALCHEMY_TRACK_MODIFICATIONS = True + SECRET_KEY = env('SECRET_KEY', '') + + # Flask-WTF flag for CSRF + WTF_CSRF_ENABLED = True + # Add endpoints that need to be exempt from CSRF protection + WTF_CSRF_EXEMPT_LIST = [] + # A CSRF token that expires in 1 year + WTF_CSRF_TIME_LIMIT = 60 * 60 * 24 * 365 + class CeleryConfig(object): + CELERY_IMPORTS = ('superset.sql_lab', ) + CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}} + BROKER_URL = f"redis://{env('REDIS_HOST')}:{env('REDIS_PORT')}/0" + CELERY_RESULT_BACKEND = f"redis://{env('REDIS_HOST')}:{env('REDIS_PORT')}/0" + + CELERY_CONFIG = CeleryConfig + RESULTS_BACKEND = RedisCache( + host=env('REDIS_HOST'), + port=env('REDIS_PORT'), + key_prefix='superset_results' + ) + + + # Overrides + # enable_proxy_fix + # This will make sure the redirect_uri is properly computed, even with SSL offloading + ENABLE_PROXY_FIX = True + + # oauth2Override + import logging + from superset.security import SupersetSecurityManager + + class CustomSsoSecurityManager(SupersetSecurityManager): + + def oauth_user_info(self, provider, response=None): + logging.info("Oauth2 provider: {0}.".format(provider)) + if provider == 'fence': + # As example, this line request a GET to base_url + '/' + userDetails with Bearer Authentication, + # and expects that authorization server checks the token, and response with user details + me = self.appbuilder.sm.oauth_remotes[provider].get(env('FENCE_URL')).json() + logging.info("user_data: {0}".format(me)) + # logging.info("user_data as dir: {0}".format(dir(me))) + return { 'name' : me['name'], 'email' : me['email'], 'id' : me['email'], 'username' : me['email'], 'first_name':'', 'last_name':''} + + CUSTOM_SECURITY_MANAGER = CustomSsoSecurityManager + + from flask_appbuilder.security.manager import AUTH_OAUTH + AUTH_TYPE = AUTH_OAUTH + OAUTH_PROVIDERS = [ + { 'name':'fence', + 'token_key':'access_token', # Name of the token in the response of access_token_url + 'icon':'fa-address-card', # Icon for the provider + 'remote_app': { + 'client_id': env('FENCE_CLIENT_ID'), # Client Id (Identify Superset application) + 'client_secret': env('FENCE_CLIENT_SECRET'), # Secret for this Client Id (Identify Superset application) + 'client_kwargs':{ + 'scope': 'openid user data' + }, + 'server_metadata_url': env('FENCE_METADATA_URL') + } + } + ] + + # Map Authlib roles to superset roles + AUTH_ROLE_ADMIN = 'Admin' + AUTH_ROLE_PUBLIC = 'Public' + + # Will allow user self registration, allowing to create Flask users from Authorized User + AUTH_USER_REGISTRATION = True + + # The default user self registration role + AUTH_USER_REGISTRATION_ROLE = "Public" + + + superset_init.sh: | + #!/bin/sh + set -eu + echo "Upgrading DB schema..." + superset db upgrade + echo "Initializing roles..." + superset init + + echo "Creating admin user..." + superset fab create-admin \ + --username admin \ + --firstname Superset \ + --lastname Admin \ + --email admin@superset.com \ + --password admin \ + || true + + if [ -f "/app/configs/import_datasources.yaml" ]; then + echo "Importing database connections.... " + superset import_datasources -p /app/configs/import_datasources.yaml + fi + superset_bootstrap.sh: | + #!/bin/bash + rm -rf /var/lib/apt/lists/* && \ + pip install \ + sqlalchemy-trino==0.4.1 \ + pymssql==2.2.5 \ + Authlib==1.0.1 \ + psycopg2-binary==2.9.1 \ + redis==3.5.3 && \ + if [ ! -f ~/bootstrap ]; then echo "Running Superset with uid 0" > ~/bootstrap; fi +--- +# Source: superset/templates/service.yaml +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: v1 +kind: Service +metadata: + name: superset + labels: + app: superset + chart: superset-0.6.1 + release: superset + heritage: Helm +spec: + type: NodePort + ports: + - port: 8088 + targetPort: http + protocol: TCP + name: http + selector: + app: superset + release: superset +--- +# Source: superset/templates/deployment-worker.yaml +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: superset-worker + labels: + app: superset-worker + chart: superset-0.6.1 + release: superset + heritage: Helm +spec: + replicas: 1 + selector: + matchLabels: + app: superset-worker + release: superset + template: + metadata: + annotations: + checksum/superset_config.py: 44092ea9a5a0d886f392d2804bb2af8fb356a891351005bee3b868b114036f01 + checksum/connections: a91716d6d1088e870fbe02159dc0b066dd011885aa08a22fbe60ea1cd4720f82 + checksum/extraConfigs: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + checksum/extraSecrets: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + checksum/extraSecretEnv: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + checksum/configOverrides: fdad02b1a387b3699858d0e2c9bd002705d72ccc3df97e969f1b7ff910b7b352 + checksum/configOverridesFiles: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + + labels: + app: superset-worker + release: superset + netnolimit: "yes" + spec: + securityContext: + runAsUser: 0 + initContainers: + - command: + - /bin/sh + - -c + - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done + envFrom: + - secretRef: + name: 'superset-env' + image: 'busybox:latest' + imagePullPolicy: 'IfNotPresent' + name: wait-for-postgres + containers: + - name: superset + image: "apache/superset:latest" + imagePullPolicy: Always + command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker"] + env: + - name: "SUPERSET_PORT" + value: "8088" + envFrom: + - secretRef: + name: "superset-env" + volumeMounts: + - name: superset-config + mountPath: "/app/pythonpath" + readOnly: true + resources: + {} + volumes: + - name: superset-config + secret: + secretName: superset-config +--- +# Source: superset/templates/deployment.yaml +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: superset + labels: + app: superset + chart: superset-0.6.1 + release: superset + heritage: Helm +spec: + replicas: 1 + selector: + matchLabels: + app: superset + release: superset + template: + metadata: + annotations: + # Force reload on config changes + checksum/superset_config.py: 44092ea9a5a0d886f392d2804bb2af8fb356a891351005bee3b868b114036f01 + checksum/superset_init.sh: e6b1e8eac1f7a79a07a6c72a0e2ee6e09654eeb439c6bbe61bfd676917c41e02 + checksum/superset_bootstrap.sh: a6edf034118d68cef7203cc3181bb6c72b6244cdedf270ee4accc9ae9ff92b2e + checksum/connections: a91716d6d1088e870fbe02159dc0b066dd011885aa08a22fbe60ea1cd4720f82 + checksum/extraConfigs: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + checksum/extraSecrets: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + checksum/extraSecretEnv: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + checksum/configOverrides: fdad02b1a387b3699858d0e2c9bd002705d72ccc3df97e969f1b7ff910b7b352 + checksum/configOverridesFiles: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + labels: + app: superset + release: superset + netnolimit: "yes" + spec: + securityContext: + runAsUser: 0 + initContainers: + - command: + - /bin/sh + - -c + - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done + envFrom: + - secretRef: + name: 'superset-env' + image: 'busybox:latest' + imagePullPolicy: 'IfNotPresent' + name: wait-for-postgres + containers: + - name: superset + image: "apache/superset:latest" + imagePullPolicy: Always + command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; /usr/bin/run-server.sh"] + env: + - name: "SUPERSET_PORT" + value: "8088" + envFrom: + - secretRef: + name: "superset-env" + volumeMounts: + - name: superset-config + mountPath: "/app/pythonpath" + readOnly: true + ports: + - name: http + containerPort: 8088 + protocol: TCP + resources: + {} + + volumes: + - name: superset-config + secret: + secretName: superset-config +--- +# Source: superset/templates/init-job.yaml +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: batch/v1 +kind: Job +metadata: + name: superset-init-db + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": "before-hook-creation" +spec: + template: + metadata: + name: superset-init-db + labels: + netnolimit: "yes" + spec: + securityContext: + runAsUser: 0 + initContainers: + - command: + - /bin/sh + - -c + - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done + envFrom: + - secretRef: + name: 'superset-env' + image: 'busybox:latest' + imagePullPolicy: 'IfNotPresent' + name: wait-for-postgres + containers: + - name: superset-init-db + image: "apache/superset:latest" + envFrom: + - secretRef: + name: superset-env + imagePullPolicy: Always + volumeMounts: + - name: superset-config + mountPath: "/app/pythonpath" + readOnly: true + command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; . /app/pythonpath/superset_init.sh"] + resources: + {} + volumes: + - name: superset-config + secret: + secretName: superset-config + restartPolicy: Never diff --git a/kube/services/superset/superset-ingress.yaml b/kube/services/superset/superset-ingress.yaml new file mode 100644 index 000000000..c0646de74 --- /dev/null +++ b/kube/services/superset/superset-ingress.yaml @@ -0,0 +1,25 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: superset-ingress + annotations: + # TODO: Make this configurable + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/tags: Environment=$vpc_name + alb.ingress.kubernetes.io/certificate-arn: $ARN + alb.ingress.kubernetes.io/group.name: "$vpc_name" + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' +spec: + ingressClassName: alb + rules: + - host: $superset_hostname + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: superset + port: + number: 8088 diff --git a/kube/services/superset/superset-redis.yaml b/kube/services/superset/superset-redis.yaml new file mode 100644 index 000000000..c1c77f391 --- /dev/null +++ b/kube/services/superset/superset-redis.yaml @@ -0,0 +1,368 @@ +# Source: superset/charts/redis/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: superset-redis + labels: + app.kubernetes.io/name: redis + helm.sh/chart: redis-16.3.1 + app.kubernetes.io/instance: superset + app.kubernetes.io/managed-by: Helm +--- +# Source: superset/charts/redis/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: superset-redis-configuration + labels: + app.kubernetes.io/name: redis + helm.sh/chart: redis-16.3.1 + app.kubernetes.io/instance: superset + app.kubernetes.io/managed-by: Helm +data: + redis.conf: |- + # User-supplied common configuration: + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + # End of common configuration + master.conf: |- + dir /data + # User-supplied master configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of master configuration + replica.conf: |- + dir /data + slave-read-only yes + # User-supplied replica configuration: + rename-command FLUSHDB "" + rename-command FLUSHALL "" + # End of replica configuration +--- +# Source: superset/charts/redis/templates/health-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: superset-redis-health + labels: + app.kubernetes.io/name: redis + helm.sh/chart: redis-16.3.1 + app.kubernetes.io/instance: superset + app.kubernetes.io/managed-by: Helm +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ $? == 124 ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ + ping + ) + if [ $? == 124 ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then + echo "$response" + exit 1 + fi + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ $? == 124 ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ + ping + ) + if [ $? == 124 ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status +--- +# Source: superset/charts/redis/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: superset-redis-scripts + labels: + app.kubernetes.io/name: redis + helm.sh/chart: redis-16.3.1 + app.kubernetes.io/instance: superset + app.kubernetes.io/managed-by: Helm +data: + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + ARGS=("--port" "${REDIS_PORT}") + ARGS+=("--protected-mode" "no") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + exec redis-server "${ARGS[@]}" +--- +# Source: superset/charts/redis/templates/headless-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: superset-redis-headless + labels: + app.kubernetes.io/name: redis + helm.sh/chart: redis-16.3.1 + app.kubernetes.io/instance: superset + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/name: redis + app.kubernetes.io/instance: superset +--- +# Source: superset/charts/redis/templates/master/service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + gen3.io/network-ingress: "superset,superset-worker" + name: superset-redis-master + labels: + app.kubernetes.io/name: redis + helm.sh/chart: redis-16.3.1 + app.kubernetes.io/instance: superset + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: master +spec: + type: ClusterIP + + ports: + - name: tcp-redis + port: 6379 + targetPort: redis + selector: + app.kubernetes.io/name: redis + app.kubernetes.io/instance: superset + app.kubernetes.io/component: master +--- +# Source: superset/charts/redis/templates/master/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + gen3.io/network-ingress: "superset,superset-worker" + name: superset-redis-master + labels: + app.kubernetes.io/name: redis + helm.sh/chart: redis-16.3.1 + app.kubernetes.io/instance: superset + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis + app.kubernetes.io/instance: superset + app.kubernetes.io/component: master + serviceName: superset-redis-headless + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + template: + metadata: + labels: + app: superset-redis-master + dbomop-data: "yes" + app.kubernetes.io/name: redis + helm.sh/chart: redis-16.3.1 + app.kubernetes.io/instance: superset + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: master + annotations: + checksum/configmap: b765300878bf502421423c6a14a4c4ea2fa093089a9649b698e6301f5c5815b2 + checksum/health: f25ecd3c820553a892da51cdc4ae25d85969f58033bdf527cd9a2bb05fcbcb83 + checksum/scripts: 6a772f276bc7b11f7beaf7add93416a2438de881b4085cf2b9c8e09453cf37e5 + checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + spec: + + securityContext: + fsGroup: 1001 + serviceAccountName: superset-redis + affinity: + podAffinity: + + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: redis + app.kubernetes.io/instance: superset + app.kubernetes.io/component: master + namespaces: + - "qa-mickey" + topologyKey: kubernetes.io/hostname + weight: 1 + nodeAffinity: + + terminationGracePeriodSeconds: 30 + containers: + - name: redis + image: docker.io/bitnami/redis:6.2.6-debian-10-r120 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + command: + - /bin/bash + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + - name: REDIS_TLS_ENABLED + value: "no" + - name: REDIS_PORT + value: "6379" + ports: + - name: redis + containerPort: 6379 + livenessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: 6 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh 5 + readinessProbe: + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 2 + successThreshold: 1 + failureThreshold: 5 + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh 1 + resources: + limits: {} + requests: {} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: redis-data + mountPath: /data + subPath: + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + volumes: + - name: start-scripts + configMap: + name: superset-redis-scripts + defaultMode: 0755 + - name: health + configMap: + name: superset-redis-health + defaultMode: 0755 + - name: config + configMap: + name: superset-redis-configuration + - name: redis-tmp-conf + emptyDir: {} + - name: tmp + emptyDir: {} + - name: redis-data + emptyDir: {} diff --git a/kube/services/superset/superset-secrets-template.yaml b/kube/services/superset/superset-secrets-template.yaml new file mode 100644 index 000000000..5d300d6a6 --- /dev/null +++ b/kube/services/superset/superset-secrets-template.yaml @@ -0,0 +1,42 @@ +--- +# Source: superset/templates/secret-env.yaml +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +apiVersion: v1 +kind: Secret +metadata: + name: superset-env + labels: + app: superset + chart: superset-0.6.1 + release: "superset" + heritage: "Helm" +type: Opaque +stringData: + REDIS_HOST: "superset-redis-headless" + REDIS_PORT: "6379" + DB_HOST: $DB_HOST + DB_PORT: "5432" + DB_USER: $DB_USER + DB_PASS: $DB_PASS + DB_NAME: $DB_NAME + FENCE_URL: $FENCE_URL + FENCE_CLIENT_ID: $FENCE_CLIENT_ID + FENCE_CLIENT_SECRET: $FENCE_CLIENT_SECRET + FENCE_METADATA_URL: $FENCE_METADATA_URL + SECRET_KEY: $SECRET_KEY +--- diff --git a/kube/services/superset/values.yaml b/kube/services/superset/values.yaml new file mode 100644 index 000000000..cd9435978 --- /dev/null +++ b/kube/services/superset/values.yaml @@ -0,0 +1,532 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Default values for superset. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +# User ID directive. This user must have enough permissions to run the bootstrap script +# Runn containers as root is not recommended in production. Change this to another UID - e.g. 1000 to be more secure +runAsUser: 0 + +# Create custom service account for Superset. If create: true and name is not provided, superset.fullname will be used. +# serviceAccountName: superset +serviceAccount: + create: false + +# Install additional packages and do any other bootstrap configuration in this script +# For production clusters it's recommended to build own image with this step done in CI +bootstrapScript: | + #!/bin/bash + rm -rf /var/lib/apt/lists/* && \ + pip install \ + sqlalchemy-trino==0.4.1 \ + pymssql==2.2.5 \ + Authlib==1.0.1 \ + psycopg2-binary==2.9.1 \ + redis==3.5.3 && \ + if [ ! -f ~/bootstrap ]; then echo "Running Superset with uid {{ .Values.runAsUser }}" > ~/bootstrap; fi + +## The name of the secret which we will use to generate a superset_config.py file +## Note: this secret must have the key superset_config.py in it and can include other files as well +## +configFromSecret: '{{ template "superset.fullname" . }}-config' + +## The name of the secret which we will use to populate env vars in deployed pods +## This can be useful for secret keys, etc. +## +envFromSecret: '{{ template "superset.fullname" . }}-env' +## This can be a list of template strings +envFromSecrets: [] + +## Extra environment variables that will be passed into pods +## +extraEnv: {} + # Extend timeout to allow long running queries. + # GUNICORN_TIMEOUT: 300 + + + # OAUTH_HOME_DOMAIN: .. + # # If a whitelist is not set, any address that can use your OAuth2 endpoint will be able to login. + # # this includes any random Gmail address if your OAuth2 Web App is set to External. + # OAUTH_WHITELIST_REGEX: ... + +## Extra environment variables in RAW format that will be passed into pods +## +extraEnvRaw: [] + # Load DB password from other secret (e.g. for zalando operator) + # - name: DB_PASS + # valueFrom: + # secretKeyRef: + # name: superset.superset-postgres.credentials.postgresql.acid.zalan.do + # key: password + +## Extra environment variables to pass as secrets +## +extraSecretEnv: {} + # MAPBOX_API_KEY: ... + # # Google API Keys: https://console.cloud.google.com/apis/credentials + # GOOGLE_KEY: ... + # GOOGLE_SECRET: ... + +extraConfigs: {} + # import_datasources.yaml: | + # databases: + # - allow_file_upload: true + # allow_ctas: true + # allow_cvas: true + # database_name: example-db + # extra: "{\r\n \"metadata_params\": {},\r\n \"engine_params\": {},\r\n \"\ + # metadata_cache_timeout\": {},\r\n \"schemas_allowed_for_file_upload\": []\r\n\ + # }" + # sqlalchemy_uri: example://example-db.local + # tables: [] + +extraSecrets: {} + +extraVolumes: [] + # - name: customConfig + # configMap: + # name: '{{ template "superset.fullname" . }}-custom-config' + # - name: additionalSecret + # secret: + # secretName: my-secret + # defaultMode: 0600 + +extraVolumeMounts: [] + # - name: customConfig + # mountPath: /mnt/config + # readOnly: true + # - name: additionalSecret: + # mountPath: /mnt/secret + +# A dictionary of overrides to append at the end of superset_config.py - the name does not matter +# WARNING: the order is not guaranteed +configOverrides: + enable_proxy_fix: | + # This will make sure the redirect_uri is properly computed, even with SSL offloading + ENABLE_PROXY_FIX = True + oauth2Override: | + import logging + from superset.security import SupersetSecurityManager + + base_url = os.getenv("OAUTH_HOME_DOMAIN").rstrip("/") + + class CustomSsoSecurityManager(SupersetSecurityManager): + + def oauth_user_info(self, provider, response=None): + logging.info("Oauth2 provider: {0}.".format(provider)) + if provider == 'fence': + # As example, this line request a GET to base_url + '/' + userDetails with Bearer Authentication, + # and expects that authorization server checks the token, and response with user details + me = self.appbuilder.sm.oauth_remotes[provider].get(base_url + '/user/user').json() + logging.info("user_data: {0}".format(me)) + # logging.info("user_data as dir: {0}".format(dir(me))) + return { 'name' : me['name'], 'email' : me['email'], 'id' : me['email'], 'username' : me['email'], 'first_name':'', 'last_name':''} + + CUSTOM_SECURITY_MANAGER = CustomSsoSecurityManager + + from flask_appbuilder.security.manager import AUTH_OAUTH + AUTH_TYPE = AUTH_OAUTH + OAUTH_PROVIDERS = [ + { 'name': 'fence', + 'token_key': 'access_token', # Name of the token in the response of access_token_url + 'icon': 'fa fa-openid', # Icon for the provider + 'remote_app': { + 'client_id': os.environ.get("OAUTH_FENCE_KEY"), # Client Id (Identify Superset application) + 'client_secret': os.environ.get("OAUTH_FENCE_SECRET"), # Secret for this Client Id (Identify Superset application) + 'client_kwargs': { + 'scope': 'openid user data' + }, + 'server_metadata_url': base_url + '/.well-known/openid-configuration' + } + } + ] + + # Map Authlib roles to superset roles + AUTH_ROLE_ADMIN = 'Admin' + AUTH_ROLE_PUBLIC = 'Public' + + # Will allow user self registration, allowing to create Flask users from Authorized User + AUTH_USER_REGISTRATION = True + + # The default user self registration role + AUTH_USER_REGISTRATION_ROLE = 'Public' + + AUTH_USER_REGISTRATION_ROLE_JMESPATH = "contains(['@uchicago.edu'], email) && 'Admin' || 'Public'" + secret: | + # Generate your own secret key for encryption. Use openssl rand -base64 42 to generate a good key + SECRET_KEY = 'PLACEHOLDER_RANDOM' + +#configOverrides: { +# WEBDRIVER_BASEURL : "https://qa-mickey.planx-pla.net/superset/" +#} + # extend_timeout: | + # # Extend timeout to allow long running queries. + # SUPERSET_WEBSERVER_TIMEOUT = ... + # enable_oauth: | + # from flask_appbuilder.security.manager import (AUTH_DB, AUTH_OAUTH) + # AUTH_TYPE = AUTH_OAUTH + + # OAUTH_PROVIDERS = [ + # { + # "name": "google", + # "whitelist": [ os.getenv("OAUTH_WHITELIST_REGEX", "") ], + # "icon": "fa-google", + # "token_key": "access_token", + # "remote_app": { + # "client_id": os.environ.get("GOOGLE_KEY"), + # "client_secret": os.environ.get("GOOGLE_SECRET"), + # "api_base_url": "https://www.googleapis.com/oauth2/v2/", + # "client_kwargs": {"scope": "email profile"}, + # "request_token_url": None, + # "access_token_url": "https://accounts.google.com/o/oauth2/token", + # "authorize_url": "https://accounts.google.com/o/oauth2/auth", + # "authorize_params": {"hd": os.getenv("OAUTH_HOME_DOMAIN", "")} + # } + # } + # ] + # # Map Authlib roles to superset roles + # AUTH_ROLE_ADMIN = 'Admin' + # AUTH_ROLE_PUBLIC = 'Public' + # # Will allow user self registration, allowing to create Flask users from Authorized User + # AUTH_USER_REGISTRATION = True + # # The default user self registration role + # AUTH_USER_REGISTRATION_ROLE = "Admin" + # secret: | + # # Generate your own secret key for encryption. Use openssl rand -base64 42 to generate a good key + # SECRET_KEY = 'YOUR_OWN_RANDOM_GENERATED_SECRET_KEY' +# Same as above but the values are files +configOverridesFiles: {} + # extend_timeout: extend_timeout.py + # enable_oauth: enable_oauth.py + + +configMountPath: "/app/pythonpath" + +extraConfigMountPath: "/app/configs" + +image: + repository: apache/superset + tag: latest + pullPolicy: Always + +imagePullSecrets: [] + +initImage: + repository: busybox + tag: latest + pullPolicy: IfNotPresent + +service: + type: NodePort + port: 8088 + annotations: {} + # cloud.google.com/load-balancer-type: "Internal" + loadBalancerIP: null + +ingress: + enabled: false + # ingressClassName: nginx + annotations: {} + # kubernetes.io/tls-acme: "true" + ## Extend timeout to allow long running queries. + # nginx.ingress.kubernetes.io/proxy-connect-timeout: "300" + # nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + path: / + pathType: ImplementationSpecific + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +## +## Custom hostAliases for all superset pods +## https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ +hostAliases: [] +# - hostnames: +# - nodns.my.lan +# ip: 18.27.36.45 + + +## +## Superset node configuration +supersetNode: + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; /usr/bin/run-server.sh" + connections: + # Change in case of bringing your own redis and then also set redis.enabled:false + redis_host: '{{ template "superset.fullname" . }}-redis-headless' + # redis_password: superset + redis_port: "6379" + # You need to change below configuration incase bringing own PostgresSQL instance and also set postgresql.enabled:false + db_host: "PLACEHOLDER" + db_port: "5432" + db_user: PLACEHOLDER + db_pass: PLACEHOLDER + db_name: PLACEHOLDER + env: {} + forceReload: false # If true, forces deployment to reload on each upgrade + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: '{{ tpl .Values.envFromSecret . }}' + command: [ "/bin/sh", "-c", "until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done" ] + ## Annotations to be added to supersetNode deployment + deploymentAnnotations: {} + ## Annotations to be added to supersetNode pods + podAnnotations: {} + ## Labels to be added to supersetNode pods + podLabels: + dbsuperset: "yes" + public: "yes" +## +## Superset worker configuration +supersetWorker: + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker" + forceReload: false # If true, forces deployment to reload on each upgrade + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: '{{ tpl .Values.envFromSecret . }}' + command: [ "/bin/sh", "-c", "until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done" ] + ## Annotations to be added to supersetWorker deployment + deploymentAnnotations: {} + ## Annotations to be added to supersetWorker pods + podAnnotations: {} + ## Labels to be added to supersetWorker pods + podLabels: + dbsuperset: "yes" + public: "yes" +## +## Superset beat configuration (to trigger scheduled jobs like reports) +supersetCeleryBeat: + # This is only required if you intend to use alerts and reports + enabled: false + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app beat --pidfile /tmp/celerybeat.pid --schedule /tmp/celerybeat-schedule" + forceReload: false # If true, forces deployment to reload on each upgrade + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: '{{ tpl .Values.envFromSecret . }}' + command: [ "/bin/sh", "-c", "until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done" ] + ## Annotations to be added to supersetCeleryBeat deployment + deploymentAnnotations: {} + ## Annotations to be added to supersetCeleryBeat pods + podAnnotations: {} + ## Labels to be added to supersetCeleryBeat pods + podLabels: + dbsuperset: "yes" + public: "yes" +## +## Init job configuration +init: + # Configure resources + # Warning: fab command consumes a lot of ram and can + # cause the process to be killed due to OOM if it exceeds limit + # Make sure you are giving a strong password for the admin user creation( else make sure you are changing after setup) + # Also change the admin email to your own custom email. + resources: {} + # limits: + # cpu: + # memory: + # requests: + # cpu: + # memory: + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; . {{ .Values.configMountPath }}/superset_init.sh" + enabled: true + loadExamples: false + createAdmin: false + adminUser: + username: 'PLACEHOLDER' + firstname: 'PLACEHOLDER' + lastname: 'PLACEHOLDER' + email: 'PLACEHOLDER' + password: 'PLACEHOLDER' + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: '{{ tpl .Values.envFromSecret . }}' + command: [ "/bin/sh", "-c", "until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done" ] + initscript: |- + #!/bin/sh + set -eu + echo "Upgrading DB schema..." + superset db upgrade + echo "Initializing roles..." + superset init + {{ if .Values.init.createAdmin }} + echo "Creating admin user..." + superset fab create-admin \ + --username {{ .Values.init.adminUser.username }} \ + --firstname {{ .Values.init.adminUser.firstname }} \ + --lastname {{ .Values.init.adminUser.lastname }} \ + --email {{ .Values.init.adminUser.email }} \ + --password {{ .Values.init.adminUser.password }} \ + || true + {{- end }} + {{ if .Values.init.loadExamples }} + echo "Loading examples..." + superset load_examples + {{- end }} + if [ -f "{{ .Values.extraConfigMountPath }}/import_datasources.yaml" ]; then + echo "Importing database connections.... " + superset import_datasources -p {{ .Values.extraConfigMountPath }}/import_datasources.yaml + fi + ## Annotations to be added to init job pods + podAnnotations: {} + +## +## Configuration values for the postgresql dependency. +## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md +postgresql: + ## + ## Use the PostgreSQL chart dependency. + ## Set to false if bringing your own PostgreSQL. + enabled: false + ## + ## The name of an existing secret that contains the postgres password. + existingSecret: + ## Name of the key containing the secret. + existingSecretKey: postgresql-password + ## + ## If you are bringing your own PostgreSQL, you should set postgresHost and + ## also probably service.port, postgresqlUsername, postgresqlPassword, and postgresqlDatabase + ## postgresHost: + ## + ## PostgreSQL port + service: + port: 5432 + ## PostgreSQL User to create. + postgresqlUsername: superset + ## + ## PostgreSQL Password for the new user. + ## If not set, a random 10 characters password will be used. + postgresqlPassword: superset + ## + ## PostgreSQL Database to create. + postgresqlDatabase: superset + ## + ## Persistent Volume Storage configuration. + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes + persistence: + ## + ## Enable PostgreSQL persistence using Persistent Volume Claims. + enabled: true + ## + ## Persistant class + # storageClass: classname + ## + ## Access modes: + accessModes: + - ReadWriteOnce + +## Configuration values for the Redis dependency. +## ref: https://github.com/bitnami/charts/blob/master/bitnami/redis +## More documentation can be found here: https://artifacthub.io/packages/helm/bitnami/redis +redis: + ## + ## Use the redis chart dependency. + ## + ## If you are bringing your own redis, you can set the host in supersetNode.connections.redis_host + ## + ## Set to false if bringing your own redis. + enabled: true + ## + ## Set architecture to standalone/replication + architecture: standalone + ## + ## Auth configuration: + ## + auth: + ## Enable password authentication + enabled: false + ## The name of an existing secret that contains the redis password. + existingSecret: "" + ## Name of the key containing the secret. + existingSecretKey: "" + ## Redis password + password: superset + ## + ## Master configuration + ## + master: + ## + ## Image configuration + # image: + ## + ## docker registry secret names (list) + # pullSecrets: nil + ## + ## Configure persistance + persistence: + ## + ## Use a PVC to persist data. + enabled: false + ## + ## Persistant class + # storageClass: classname + ## + ## Access mode: + accessModes: + - ReadWriteOnce + +nodeSelector: {} + +tolerations: [] + +affinity: {} From 7a68bfafca75ecfa015c8112d391400e8428c8f6 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Tue, 28 Jun 2022 09:15:12 -0500 Subject: [PATCH 056/106] Update web_wildcard_whitelist (#1984) --- files/squid_whitelist/web_wildcard_whitelist | 23 ++++++++++---------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 283a0044a..1b87923e4 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -1,4 +1,6 @@ .adfs.federation.va.gov +.agdr.org.nz +.agha.umccr.org .alpinelinux.org .amazonaws.com .amazoncognito.com @@ -6,7 +8,6 @@ .anaconda.org .apache.org .azureedge.net -.qg3.apps.qualys.com .bioconductor.org .bionimbus.org .bitbucket.org @@ -39,34 +40,40 @@ .erlang-solutions.com .extjs.com .fedoraproject.org +.genome.jp .github.com .githubusercontent.com .gitlab.com .googleapis.com .googleusercontent.com +.gstatic.com .hashicorp.com .healdata.org .idph.illinois.gov .immport.org -.jenkins-ci.org .jenkins.io +.jenkins-ci.org .k8s.io +.kegg.jp .kidsfirstdrc.org .letsencrypt.org .maven.org .metacpan.org .midrc.org +.nesi.org.nz .newrelic.com .niaiddata.org .nih.gov .nodesource.com .novocraft.com .occ-data.org +.occ-pla.net .oicr.on.ca .okta.com .opensciencedatacloud.org .osuosl.org .paloaltonetworks.com +.pandemicresponsecommons.org .perl.org .planx-ci.io .planx-pla.net @@ -75,8 +82,10 @@ .pypi.python.org .pythonhosted.org .pyx4me.com +.qg3.apps.qualys.com .quay.io .rcsb.org +.rstudio.com .rubygems.org .sa-update.pccc.com .sencha.com @@ -91,15 +100,7 @@ .ucsc.edu .veracode.com .virtualbox.org +.visitdata.org .xmission.com .yahooapis.com -.pandemicresponsecommons.org -.occ-pla.net -.rstudio.com -.gstatic.com -.visitdata.org .yarnpkg.com -.nesi.org.nz -.agdr.org.nz -.agha.umccr.org -.genome.jp From 6fec800abb8152dafac78cc9934c890e15c75848 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 29 Jun 2022 14:02:40 -0400 Subject: [PATCH 057/106] Feat/terraform1x install (#1983) * Added the ability to install Terraform 1.2 to the work vms. * Fixed a typo in kube-setup-workvm.sh --- gen3/bin/kube-setup-workvm.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gen3/bin/kube-setup-workvm.sh b/gen3/bin/kube-setup-workvm.sh index b740b388b..c41ab3224 100644 --- a/gen3/bin/kube-setup-workvm.sh +++ b/gen3/bin/kube-setup-workvm.sh @@ -138,6 +138,15 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then /bin/rm -rf "${XDG_RUNTIME_DIR}/t12" } + install_terraform1.2() { + mkdir "${XDG_RUNTIME_DIR}/t1.2" + curl -o "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" https://releases.hashicorp.com/terraform/1.2.3/terraform_1.2.3_linux_amd64.zip + sudo /bin/rm -rf /usr/local/bin/terraform1.2 > /dev/null 2>&1 || true + unzip "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" -d "${XDG_RUNTIME_DIR}/t1.2"; + sudo cp "${XDG_RUNTIME_DIR}/t1.2/terraform" "/usr/local/bin/terraform1.2" + /bin/rm -rf "${XDG_RUNTIME_DIR}/t1.2" + } + if ! which terraform > /dev/null 2>&1; then install_terraform else @@ -154,6 +163,14 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then install_terraform12 fi fi + if ! which terraform1.2 > /dev/null 2>&1; then + install_terraform1.2 + else + T12_VERSION=$(terraform1.2 --version | head -1 | awk '{ print $2 }' | sed 's/^[^0-9]*//') + if ! semver_ge "$T12_VERSION" "1.2.3"; then + install_terraform12 + fi + fi ) if [[ -f /etc/systemd/timesyncd.conf ]] \ From 5e28af2a7de911154ab90b32a21899aee22f3479 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Wed, 29 Jun 2022 18:54:34 -0500 Subject: [PATCH 058/106] Update kube-setup-cedar-wrapper.sh (#1986) --- gen3/bin/kube-setup-cedar-wrapper.sh | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh index 129e4de64..e620aceb4 100644 --- a/gen3/bin/kube-setup-cedar-wrapper.sh +++ b/gen3/bin/kube-setup-cedar-wrapper.sh @@ -7,14 +7,13 @@ gen3_load "gen3/lib/kube-setup-init" cedar_api_key_file="$(gen3_secrets_folder)/cedar_api_key.txt" if [[ ! -f "$cedar_api_key_file" ]]; then - gen3_log_err "No CEDAR api key present in ${cedar_api_key_file}" + gen3_log_err "No CEDAR api key present in ${cedar_api_key_file}, not rolling CEDAR wrapper" else if g3kubectl get secret cedar-service-api-key > /dev/null 2>&1; then g3kubectl delete secret cedar-service-api-key fi g3kubectl create secret generic "cedar-service-api-key" --from-file=cedar_api_key.txt=${cedar_api_key_file} + + g3kubectl apply -f "${GEN3_HOME}/kube/services/cedar-wrapper/cedar-wrapper-service.yaml" + gen3 roll cedar-wrapper fi - -g3kubectl apply -f "${GEN3_HOME}/kube/services/cedar-wrapper/cedar-wrapper-service.yaml" -gen3 roll cedar-wrapper - From d89a583dbc415645f7b181d030acef5483b1330a Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Fri, 1 Jul 2022 10:07:08 -0500 Subject: [PATCH 059/106] chore: sync eks config default variables to config.tfvars (#1970) --- .secrets.baseline | 4 +- gen3/lib/aws.sh | 94 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 4 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 605b7e518..8ede85939 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -550,14 +550,14 @@ "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", "is_secret": false, "is_verified": false, - "line_number": 550, + "line_number": 640, "type": "Secret Keyword" }, { "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", "is_secret": false, "is_verified": false, - "line_number": 570, + "line_number": 660, "type": "Secret Keyword" } ], diff --git a/gen3/lib/aws.sh b/gen3/lib/aws.sh index 1fa972997..9dd6e4402 100644 --- a/gen3/lib/aws.sh +++ b/gen3/lib/aws.sh @@ -454,10 +454,100 @@ EOM if [[ "$GEN3_WORKSPACE" =~ _eks$ ]]; then commonsName=${GEN3_WORKSPACE//_eks/} cat - < Date: Fri, 1 Jul 2022 08:56:48 -0700 Subject: [PATCH 060/106] fix command (#1987) --- kube/services/jobs/metadata-aggregate-sync-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml index 58ce0b332..d7fbc1a46 100644 --- a/kube/services/jobs/metadata-aggregate-sync-job.yaml +++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml @@ -54,7 +54,7 @@ spec: key: AGG_MDS_NAMESPACE optional: true imagePullPolicy: Always - command: ["/bin/ash"] + command: ["/bin/bash"] args: - "-c" - | From 023d2d7ae4c7ad208a935947ad552979384748c6 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 6 Jul 2022 08:29:19 -0700 Subject: [PATCH 061/106] switch from bash to sh (#1990) --- kube/services/jobs/metadata-aggregate-sync-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml index d7fbc1a46..e4f6761f7 100644 --- a/kube/services/jobs/metadata-aggregate-sync-job.yaml +++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml @@ -54,7 +54,7 @@ spec: key: AGG_MDS_NAMESPACE optional: true imagePullPolicy: Always - command: ["/bin/bash"] + command: ["/bin/sh"] args: - "-c" - | From c0d2b22e70291b7f153fff72b913b3bfc70fe5a6 Mon Sep 17 00:00:00 2001 From: Sai Shanmukha Narumanchi Date: Tue, 12 Jul 2022 08:27:41 -0500 Subject: [PATCH 062/106] Add Gen3Env env var (#1978) --- kube/services/jobs/etl-job.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml index d9af1df0d..8540f3902 100644 --- a/kube/services/jobs/etl-job.yaml +++ b/kube/services/jobs/etl-job.yaml @@ -46,6 +46,11 @@ spec: value: 6g - name: ETL_FORCED GEN3_ETL_FORCED|-value: "TRUE"-| + - name: gen3Env + valueFrom: + configMapKeyRef: + name: manifest-global + key: hostname - name: slackWebHook valueFrom: configMapKeyRef: From 2bbdf6832b657fe84943ea8b704369abe45c04c9 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 12 Jul 2022 15:15:56 -0400 Subject: [PATCH 063/106] PXP-8028 Fence DB migration: cd to where it can find Alembic files (#1991) --- kube/services/fence/fence-deploy.yaml | 1 + kube/services/jobs/fence-db-migrate-job.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 8804d37b0..95d2b5496 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -275,6 +275,7 @@ spec: if fence-create migrate --help > /dev/null 2>&1; then if ! grep -E 'ENABLE_DB_MIGRATION"?: *false' /var/www/fence/fence-config.yaml; then echo "Running db migration: fence-create migrate" + cd /fence fence-create migrate else echo "Db migration disabled in fence-config" diff --git a/kube/services/jobs/fence-db-migrate-job.yaml b/kube/services/jobs/fence-db-migrate-job.yaml index 22b9dcbad..f8d2a001c 100644 --- a/kube/services/jobs/fence-db-migrate-job.yaml +++ b/kube/services/jobs/fence-db-migrate-job.yaml @@ -89,6 +89,7 @@ spec: - | echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml + cd /fence fence-create migrate if [[ $? != 0 ]]; then echo "WARNING: non zero exit code: $?" From 152d9fac516395343da9768822e9fd27955e4ebc Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Tue, 12 Jul 2022 14:50:30 -0500 Subject: [PATCH 064/106] Feat/kayako wrapper (#1992) * adding cloud-auto setup for kayako wrapper service * updating secrets * change method to create Co-authored-by: Mingfei Shao <2475897+mfshao@users.noreply.github.com> --- files/squid_whitelist/web_whitelist | 1 + gen3/bin/kube-roll-all.sh | 6 ++ gen3/bin/kube-setup-kayako-wrapper.sh | 32 ++++++ .../kayako-wrapper/kayako-wrapper-deploy.yaml | 98 +++++++++++++++++++ .../kayako-wrapper-service.yaml | 19 ++++ .../kayako-wrapper-service.conf | 31 ++++++ 6 files changed, 187 insertions(+) create mode 100644 gen3/bin/kube-setup-kayako-wrapper.sh create mode 100644 kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml create mode 100644 kube/services/kayako-wrapper/kayako-wrapper-service.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/kayako-wrapper-service.conf diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 219f6b41e..c62367982 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -142,6 +142,7 @@ sa-update.space-pro.be security.debian.org services.mathworks.com streaming.stat.iastate.edu +support.datacommons.io us-central1-docker.pkg.dev www.google.com www.icpsr.umich.edu diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 06f381a3d..8b1abe88a 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -308,6 +308,12 @@ else gen3_log_info "not deploying cedar-wrapper - no manifest entry for '.versions[\"cedar-wrapper\"]'" fi +if g3k_manifest_lookup '.versions["kayako-wrapper"]' 2> /dev/null; then + gen3 kube-setup-kayako-wrapper & +else + gen3_log_info "not deploying kayako-wrapper - no manifest entry for '.versions[\"kayako-wrapper\"]'" +fi + gen3_log_info "enable network policy" gen3 kube-setup-networkpolicy "enable" || true & diff --git a/gen3/bin/kube-setup-kayako-wrapper.sh b/gen3/bin/kube-setup-kayako-wrapper.sh new file mode 100644 index 000000000..a6274b2ea --- /dev/null +++ b/gen3/bin/kube-setup-kayako-wrapper.sh @@ -0,0 +1,32 @@ +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets + + +kayako_api_key_file="$(gen3_secrets_folder)/kayako_api_key.txt" +kayako_secret_key_file="$(gen3_secrets_folder)/kayako_secret_key.txt" + +if [[ ! -f "$kayako_api_key_file" ]]; then + gen3_log_err "No kayako api key present in ${kayako_api_key_file}, not rolling kayako wrapper" + exit 1 +fi +if [[ ! -f "$kayako_secret_key_file" ]]; then + gen3_log_err "No kayako secret key present in ${kayako_secret_key_file}, not rolling kayako wrapper" + exit 1 +fi + +if g3kubectl get secret kayako-service-api-key > /dev/null 2>&1; then + g3kubectl delete secret kayako-service-api-key +fi +if g3kubectl get secret kayako-service-secret-key > /dev/null 2>&1; then + g3kubectl delete secret kayako-service-secret-key +fi + +g3kubectl create secret generic "kayako-service-api-key" --from-file=kayako_api_key.txt=${kayako_api_key_file} +g3kubectl create secret generic "kayako-service-secret-key" --from-file=kayako_secret_key.txt=${kayako_secret_key_file} + +g3kubectl apply -f "${GEN3_HOME}/kube/services/kayako-wrapper/kayako-wrapper-service.yaml" +gen3 roll kayako-wrapper + +gen3_log_info "The kayako wrapper service has been deployed onto the kubernetes cluster" diff --git a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml new file mode 100644 index 000000000..0f22a62d0 --- /dev/null +++ b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml @@ -0,0 +1,98 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kayako-wrapper-deployment +spec: + selector: + matchLabels: + app: kayako-wrapper + revisionHistoryLimit: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 2 + maxUnavailable: 25% + template: + metadata: + labels: + app: kayako-wrapper + public: "yes" + netnolimit: "yes" + userhelper: "yes" + GEN3_DATE_LABEL + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - kayako-wrapper + topologyKey: "kubernetes.io/hostname" + automountServiceAccountToken: false + volumes: + - name: ca-volume + secret: + secretName: "service-ca" + - name: config-volume + secret: + secretName: "kayako-wrapper-config" + - name: privacy-policy + configMap: + name: "privacy-policy" + - name: kayako-service-secrets + projected: + sources: + - secret: + name: kayako-service-api-key + - secret: + name: kayako-service-secret-key + containers: + - name: kayako-wrapper + GEN3_KAYAKO-WRAPPER_IMAGE + readinessProbe: + httpGet: + path: /_status/ + port: 8000 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + livenessProbe: + httpGet: + path: /_status/ + port: 8000 + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 30 + failureThreshold: 6 + resources: + requests: + cpu: 0.6 + memory: 512Mi + limits: + cpu: 2 + memory: 4096Mi + ports: + - containerPort: 8000 + command: + - /bin/bash + - /src/start.sh + env: + - name: HOSTNAME + value: revproxy-service + volumeMounts: + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + - name: "kayako-service-api-key" + readOnly: true + mountPath: "/mnt/secrets/" + - name: "kayako-service-secrets" + readOnly: true + mountPath: "/mnt/secrets/" + imagePullPolicy: Always diff --git a/kube/services/kayako-wrapper/kayako-wrapper-service.yaml b/kube/services/kayako-wrapper/kayako-wrapper-service.yaml new file mode 100644 index 000000000..23045722d --- /dev/null +++ b/kube/services/kayako-wrapper/kayako-wrapper-service.yaml @@ -0,0 +1,19 @@ +kind: Service +apiVersion: v1 +metadata: + name: kayako-wrapper-service +spec: + selector: + app: kayako-wrapper + ports: + - protocol: TCP + port: 80 + targetPort: 8000 + name: http + nodePort: null + - protocol: TCP + port: 443 + targetPort: 8000 + name: https + nodePort: null + type: ClusterIP diff --git a/kube/services/revproxy/gen3.nginx.conf/kayako-wrapper-service.conf b/kube/services/revproxy/gen3.nginx.conf/kayako-wrapper-service.conf new file mode 100644 index 000000000..7caf721f2 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/kayako-wrapper-service.conf @@ -0,0 +1,31 @@ + + location /kayako/ { + + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $authz_resource "/kayako"; + set $authz_method "create"; + set $authz_service "kayako"; + # # be careful - sub-request runs in same context as this request + auth_request_set $remoteUser $upstream_http_REMOTE_USER; + auth_request_set $saved_set_cookie $upstream_http_set_cookie; + auth_request /gen3-authz; + + proxy_set_header REMOTE_USER $remoteUser; + set $proxy_service "kayako-wrapper-service"; + set $upstream http://kayako-wrapper-service$des_domain; + rewrite ^/kayako/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/kayako/; + + + proxy_set_header Authorization "$access_token"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + client_max_body_size 0; + } From 20f9d11a729c9d04704082dba10105453f591638 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Tue, 12 Jul 2022 15:54:08 -0500 Subject: [PATCH 065/106] fix: secret mount (#1993) --- kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml index 0f22a62d0..a0e849494 100644 --- a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml +++ b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml @@ -89,9 +89,6 @@ spec: readOnly: true mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" subPath: "ca.pem" - - name: "kayako-service-api-key" - readOnly: true - mountPath: "/mnt/secrets/" - name: "kayako-service-secrets" readOnly: true mountPath: "/mnt/secrets/" From 3d5f3deedf6fa3d33b1e47d6dda9531fe3937c72 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Thu, 14 Jul 2022 22:03:07 -0500 Subject: [PATCH 066/106] use g3auto secrets for cedar and kayako wrapper (#1995) * use g3auto secrets for cedar and kayako wrapper * add doc --- doc/kube-setup-cedar-wrapper.md | 5 ++++ doc/kube-setup-kayako-wrapper.md | 5 ++++ gen3/bin/kube-setup-cedar-wrapper.sh | 24 ++++++++--------- gen3/bin/kube-setup-kayako-wrapper.sh | 27 +++++++------------ .../cedar-wrapper/cedar-wrapper-deploy.yaml | 11 ++++---- .../kayako-wrapper/kayako-wrapper-deploy.yaml | 20 +++++++------- 6 files changed, 47 insertions(+), 45 deletions(-) create mode 100644 doc/kube-setup-cedar-wrapper.md create mode 100644 doc/kube-setup-kayako-wrapper.md diff --git a/doc/kube-setup-cedar-wrapper.md b/doc/kube-setup-cedar-wrapper.md new file mode 100644 index 000000000..65ffbfd32 --- /dev/null +++ b/doc/kube-setup-cedar-wrapper.md @@ -0,0 +1,5 @@ +# TL;DR + +Setup CEDAR wrapper service and deployment + +Need to supply a `cedar_api_key.txt` file under `$(gen3_secrets_folder)/g3auto/cedar/` diff --git a/doc/kube-setup-kayako-wrapper.md b/doc/kube-setup-kayako-wrapper.md new file mode 100644 index 000000000..8f0302d44 --- /dev/null +++ b/doc/kube-setup-kayako-wrapper.md @@ -0,0 +1,5 @@ +# TL;DR + +Setup Kayako wrapper service and deployment + +Need to supply a `kayako_api_key.txt` and a `kayako_secret_key.txt` file under `$(gen3_secrets_folder)/g3auto/kayako/` diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh index e620aceb4..9a899a770 100644 --- a/gen3/bin/kube-setup-cedar-wrapper.sh +++ b/gen3/bin/kube-setup-cedar-wrapper.sh @@ -3,17 +3,17 @@ gen3_load "gen3/lib/kube-setup-init" [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets +if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then + gen3_log_err "No cedar-g3auto secret, not rolling CEDAR wrapper" + return 1 +fi -cedar_api_key_file="$(gen3_secrets_folder)/cedar_api_key.txt" - -if [[ ! -f "$cedar_api_key_file" ]]; then - gen3_log_err "No CEDAR api key present in ${cedar_api_key_file}, not rolling CEDAR wrapper" -else - if g3kubectl get secret cedar-service-api-key > /dev/null 2>&1; then - g3kubectl delete secret cedar-service-api-key - fi - g3kubectl create secret generic "cedar-service-api-key" --from-file=cedar_api_key.txt=${cedar_api_key_file} - - g3kubectl apply -f "${GEN3_HOME}/kube/services/cedar-wrapper/cedar-wrapper-service.yaml" - gen3 roll cedar-wrapper +if ! gen3 secrets decode cedar-g3auto cedar_api_key.txt > /dev/null 2>&1; then + gen3_log_err "No CEDAR api key present in cedar-g3auto secret, not rolling CEDAR wrapper" + return 1 fi + +g3kubectl apply -f "${GEN3_HOME}/kube/services/cedar-wrapper/cedar-wrapper-service.yaml" +gen3 roll cedar-wrapper + +gen3_log_info "The CEDAR wrapper service has been deployed onto the kubernetes cluster" diff --git a/gen3/bin/kube-setup-kayako-wrapper.sh b/gen3/bin/kube-setup-kayako-wrapper.sh index a6274b2ea..59abc829c 100644 --- a/gen3/bin/kube-setup-kayako-wrapper.sh +++ b/gen3/bin/kube-setup-kayako-wrapper.sh @@ -3,28 +3,21 @@ gen3_load "gen3/lib/kube-setup-init" [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets - -kayako_api_key_file="$(gen3_secrets_folder)/kayako_api_key.txt" -kayako_secret_key_file="$(gen3_secrets_folder)/kayako_secret_key.txt" - -if [[ ! -f "$kayako_api_key_file" ]]; then - gen3_log_err "No kayako api key present in ${kayako_api_key_file}, not rolling kayako wrapper" - exit 1 -fi -if [[ ! -f "$kayako_secret_key_file" ]]; then - gen3_log_err "No kayako secret key present in ${kayako_secret_key_file}, not rolling kayako wrapper" - exit 1 +if ! g3kubectl get secrets/kayako-g3auto > /dev/null 2>&1; then + gen3_log_err "No kayako-g3auto secret, not rolling Kayako wrapper" + return 1 fi -if g3kubectl get secret kayako-service-api-key > /dev/null 2>&1; then - g3kubectl delete secret kayako-service-api-key +if ! gen3 secrets decode kayako-g3auto kayako_api_key.txt> /dev/null 2>&1; then + gen3_log_err "No Kayako api key present in kayako-g3auto secret, not rolling Kayako wrapper" + return 1 fi -if g3kubectl get secret kayako-service-secret-key > /dev/null 2>&1; then - g3kubectl delete secret kayako-service-secret-key + +if ! gen3 secrets decode kayako-g3auto kayako_secret_key.txt> /dev/null 2>&1; then + gen3_log_err "No Kayako secret key present in kayako-g3auto secret, not rolling Kayako wrapper" + return 1 fi -g3kubectl create secret generic "kayako-service-api-key" --from-file=kayako_api_key.txt=${kayako_api_key_file} -g3kubectl create secret generic "kayako-service-secret-key" --from-file=kayako_secret_key.txt=${kayako_secret_key_file} g3kubectl apply -f "${GEN3_HOME}/kube/services/kayako-wrapper/kayako-wrapper-service.yaml" gen3 roll kayako-wrapper diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml index dd56a617e..954bc5f06 100644 --- a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml +++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml @@ -44,9 +44,6 @@ spec: - name: privacy-policy configMap: name: "privacy-policy" - - name: cedar-service-api-key - secret: - secretName: "cedar-service-api-key" containers: - name: cedar-wrapper GEN3_CEDAR-WRAPPER_IMAGE @@ -80,12 +77,14 @@ spec: env: - name: HOSTNAME value: revproxy-service + - name: API_KEY + valueFrom: + secretKeyRef: + name: cedar-g3auto + key: "cedar_api_key.txt" volumeMounts: - name: "ca-volume" readOnly: true mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" subPath: "ca.pem" - - name: "cedar-service-api-key" - readOnly: true - mountPath: "/mnt/secrets/" imagePullPolicy: Always diff --git a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml index a0e849494..936f72520 100644 --- a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml +++ b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml @@ -44,13 +44,6 @@ spec: - name: privacy-policy configMap: name: "privacy-policy" - - name: kayako-service-secrets - projected: - sources: - - secret: - name: kayako-service-api-key - - secret: - name: kayako-service-secret-key containers: - name: kayako-wrapper GEN3_KAYAKO-WRAPPER_IMAGE @@ -84,12 +77,19 @@ spec: env: - name: HOSTNAME value: revproxy-service + - name: API_KEY + valueFrom: + secretKeyRef: + name: kayako-g3auto + key: "kayako_api_key.txt" + - name: SECRET_KEY + valueFrom: + secretKeyRef: + name: kayako-g3auto + key: "kayako_secret_key.txt" volumeMounts: - name: "ca-volume" readOnly: true mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" subPath: "ca.pem" - - name: "kayako-service-secrets" - readOnly: true - mountPath: "/mnt/secrets/" imagePullPolicy: Always From a803e107d08c3a433bf5dda272490574cc6329a1 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 15 Jul 2022 14:30:26 -0500 Subject: [PATCH 067/106] feat(split-secondary-subnet): Split secondary subnet into 4 /18 subnets (#1994) Co-authored-by: Edward Malinowski --- tf_files/aws/eks/root.tf | 3 ++- tf_files/aws/eks/variables.tf | 7 +++++++ tf_files/aws/modules/eks/cloud.tf | 20 +++++++++++++++----- tf_files/aws/modules/eks/variables.tf | 6 ++++++ 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/tf_files/aws/eks/root.tf b/tf_files/aws/eks/root.tf index 3e3223079..0208e090a 100644 --- a/tf_files/aws/eks/root.tf +++ b/tf_files/aws/eks/root.tf @@ -48,5 +48,6 @@ module "eks" { fips = "${var.fips}" fips_ami_kms = "${var.fips_ami_kms}" fips_enabled_ami = "${var.fips_enabled_ami}" - availability_zones = "${var.availability_zones}" + availability_zones = "${var.availability_zones}" + secondary_availability_zones = "${var.secondary_availability_zones}" } diff --git a/tf_files/aws/eks/variables.tf b/tf_files/aws/eks/variables.tf index 643c4babd..efd7e64da 100644 --- a/tf_files/aws/eks/variables.tf +++ b/tf_files/aws/eks/variables.tf @@ -169,3 +169,10 @@ variable "availability_zones" { type = "list" default = ["us-east-1a", "us-east-1c", "us-east-1d"] } + +variable "secondary_availability_zones" { + description = "AZ to be used by EKS nodes in the secondary subnet" + type = "list" + default = ["us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"] +} + diff --git a/tf_files/aws/modules/eks/cloud.tf b/tf_files/aws/modules/eks/cloud.tf index d5e8ad994..c1a582dfe 100644 --- a/tf_files/aws/modules/eks/cloud.tf +++ b/tf_files/aws/modules/eks/cloud.tf @@ -8,6 +8,7 @@ locals{ # if AZs are explicitly defined as a variable, use those. Otherwise use all the AZs of the current region # NOTE: the syntax should improve with Terraform 12 azs = "${split(",", length(var.availability_zones) != 0 ? join(",", var.availability_zones) : join(",", data.aws_availability_zones.available.names))}" + secondary_azs = "${split(",", length(var.secondary_availability_zones) != 0 ? join(",", var.secondary_availability_zones) : join(",", data.aws_availability_zones.available.names))}" ami = "${var.fips ? var.fips_enabled_ami : data.aws_ami.eks_worker.id}" eks_priv_subnets = "${split(",", var.secondary_cidr_block != "" ? join(",", aws_subnet.eks_secondary_subnet.*.id) : join(",", aws_subnet.eks_private.*.id))}" } @@ -116,6 +117,15 @@ resource "random_shuffle" "az" { count = 1 } +resource "random_shuffle" "secondary_az" { + #input = ["${data.aws_autoscaling_group.squid_auto.availability_zones}"] + #input = ["${data.aws_availability_zones.available.names}"] + #input = "${length(var.availability_zones) > 0 ? var.availability_zones : data.aws_autoscaling_group.squid_auto.availability_zones }" + #input = "${var.availability_zones}" + input = ["${local.secondary_azs}"] + result_count = "${length(local.secondary_azs)}" + count = 1 +} # The subnet where our cluster will live in resource "aws_subnet" "eks_private" { @@ -144,15 +154,15 @@ resource "aws_subnet" "eks_private" { # The subnet for secondary CIDR block utilization resource "aws_subnet" "eks_secondary_subnet" { - count = "${var.secondary_cidr_block != "" ? 1 : 0}" + count = "${var.secondary_cidr_block != "" ? 4 : 0}" vpc_id = "${data.aws_vpc.the_vpc.id}" - cidr_block = "${var.secondary_cidr_block}" - availability_zone = "${random_shuffle.az.result[count.index]}" + cidr_block = "${cidrsubnet(var.secondary_cidr_block, 2 , count.index)}" + availability_zone = "${random_shuffle.secondary_az.result[count.index]}" map_public_ip_on_launch = false tags = "${ map( - "Name", "eks_secondary_cidr_subnet", + "Name", "eks_secondary_cidr_subnet_${count.index}", "Environment", "${var.vpc_name}", "Organization", "${var.organization_name}", "kubernetes.io/cluster/${var.vpc_name}", "owned", @@ -243,7 +253,7 @@ resource "aws_route_table_association" "private_kube" { resource "aws_route_table_association" "secondary_subnet_kube" { count = "${var.secondary_cidr_block != "" ? 1 : 0}" - subnet_id = "${aws_subnet.eks_secondary_subnet.id}" + subnet_id = "${aws_subnet.eks_secondary_subnet.*.id[count.index]}" route_table_id = "${aws_route_table.eks_private.id}" depends_on = ["aws_subnet.eks_secondary_subnet"] } diff --git a/tf_files/aws/modules/eks/variables.tf b/tf_files/aws/modules/eks/variables.tf index c72b25ce4..3eefa456c 100644 --- a/tf_files/aws/modules/eks/variables.tf +++ b/tf_files/aws/modules/eks/variables.tf @@ -119,6 +119,12 @@ variable "availability_zones" { default = ["us-east-1a", "us-east-1c", "us-east-1d"] } +variable "secondary_availability_zones" { + description = "AZ to be used by EKS nodes in the secondary subnet" + type = "list" + default = ["us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"] +} + variable "domain_test" { description = "Domain for the lambda function to check for the proxy" default = "www.google.com" From 8b024f9ab1a145bd7a5a2c061a1e8a2f466762c8 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 18 Jul 2022 12:32:07 -0500 Subject: [PATCH 068/106] Fix kubectl installation script (#1974) * Fix kubectl installation script --- gen3/bin/kube-setup-workvm.sh | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/gen3/bin/kube-setup-workvm.sh b/gen3/bin/kube-setup-workvm.sh index c41ab3224..206e00cf0 100644 --- a/gen3/bin/kube-setup-workvm.sh +++ b/gen3/bin/kube-setup-workvm.sh @@ -110,14 +110,21 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo -E apt-key add - sudo -E apt-get update sudo -E apt-get install -y google-cloud-sdk \ - google-cloud-sdk-cbt \ - kubectl - if [[ -f /usr/local/bin/kubectl && -f /usr/bin/kubectl ]]; then # pref dpkg managed kubectl - sudo -E /bin/rm /usr/local/bin/kubectl - fi + google-cloud-sdk-cbt ) fi + k8s_server_version=$(kubectl version --short | awk -F[v.] '/Server/ {print $3"."$4}') + if [[ ! -z "${k8s_server_version// }" ]]; then + # install kubectl + install_version=$(apt-cache madison kubectl | awk '$3 ~ /'$k8s_server_version'/ {print $3}'| head -n 1) + gen3_log_info "Installing kubectl version $install_version" + sudo -E apt-get install -y kubectl=$install_version --allow-downgrades + else + # install kubectl + sudo -E apt-get install -y kubectl=1.21.14-00 --allow-downgrades + fi + mkdir -p ~/.config sudo chown -R "${USER}:" ~/.config From 5bc7421d3392c701029342149f0b80718b307301 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 19 Jul 2022 16:43:28 -0500 Subject: [PATCH 069/106] fix: test_mfilter (#1998) --- gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index da0661e20..66fb41ca4 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -275,6 +275,7 @@ spec: if fence-create migrate --help > /dev/null 2>&1; then if ! grep -E 'ENABLE_DB_MIGRATION"?: *false' /var/www/fence/fence-config.yaml; then echo "Running db migration: fence-create migrate" + cd /fence fence-create migrate else echo "Db migration disabled in fence-config" From 51ec47150af1e295882d8ae055f92c1a3b3357ef Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Tue, 19 Jul 2022 16:43:43 -0500 Subject: [PATCH 070/106] Update web_whitelist (#1996) Removing support.datacommons.io as datacommons.io is in the wildcard whitelist --- files/squid_whitelist/web_whitelist | 1 - 1 file changed, 1 deletion(-) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index c62367982..219f6b41e 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -142,7 +142,6 @@ sa-update.space-pro.be security.debian.org services.mathworks.com streaming.stat.iastate.edu -support.datacommons.io us-central1-docker.pkg.dev www.google.com www.icpsr.umich.edu From 85109ef99bf70f8340fdade6a4a8345674b0e49b Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 19 Jul 2022 17:27:46 -0500 Subject: [PATCH 071/106] feat: Argo will be available everywhere (#1997) --- gen3/bin/kube-setup-revproxy.sh | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 72cc21bf5..97b91b65a 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -105,18 +105,15 @@ for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name' fi done -if [[ $current_namespace == "default" ]]; +if g3kubectl get namespace argo > /dev/null 2>&1; then - if g3kubectl get namespace argo > /dev/null 2>&1; - then - for argo in $(g3kubectl get services -n argo -o jsonpath='{.items[*].metadata.name}'); - do - filePath="$scriptDir/gen3.nginx.conf/${argo}.conf" - if [[ -f "$filePath" ]]; then - confFileList+=("--from-file" "$filePath") - fi - done - fi + for argo in $(g3kubectl get services -n argo -o jsonpath='{.items[*].metadata.name}'); + do + filePath="$scriptDir/gen3.nginx.conf/${argo}.conf" + if [[ -f "$filePath" ]]; then + confFileList+=("--from-file" "$filePath") + fi + done fi if [[ $current_namespace == "default" ]]; From 6b1d7c243d9b9b806d78bcf2ba6f17fd2157ee11 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Fri, 22 Jul 2022 11:31:54 -0500 Subject: [PATCH 072/106] feat: add doc url (#2000) * feat: add doc url * fix: bug * fix config --- gen3/bin/kube-setup-revproxy.sh | 7 +++++++ .../documentation-site/documentation-site.conf | 9 +++++++++ kube/services/revproxy/nginx.conf | 6 ++++++ kube/services/revproxy/revproxy-deploy.yaml | 6 ++++++ 4 files changed, 28 insertions(+) create mode 100644 kube/services/revproxy/gen3.nginx.conf/documentation-site/documentation-site.conf diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 97b91b65a..9e38fb908 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -149,6 +149,13 @@ if [[ $current_namespace == "default" ]]; then fi fi +if g3k_manifest_lookup .global.document_url > /dev/null 2>&1; then + documentUrl="$(g3k_manifest_lookup .global.document_url)" + if [[ "$documentUrl" != null ]]; then + filePath="$scriptDir/gen3.nginx.conf/documentation-site/documentation-site.conf" + confFileList+=("--from-file" "$filePath") + fi +fi # # Funny hook to load the portal-workspace-parent nginx config # diff --git a/kube/services/revproxy/gen3.nginx.conf/documentation-site/documentation-site.conf b/kube/services/revproxy/gen3.nginx.conf/documentation-site/documentation-site.conf new file mode 100644 index 000000000..6ed4f3722 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/documentation-site/documentation-site.conf @@ -0,0 +1,9 @@ + location /documentation { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $document_url "$document_url_env"; + + rewrite ^/documentation/?(.*)$ $document_url/$1 redirect; + } diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf index bb5ff652c..79c5d2e22 100644 --- a/kube/services/revproxy/nginx.conf +++ b/kube/services/revproxy/nginx.conf @@ -24,6 +24,7 @@ env MAINTENANCE_MODE; env INDEXD_AUTHZ; env MDS_AUTHZ; env FRONTEND_ROOT; +env DOCUMENT_URL; events { worker_connections 768; @@ -77,6 +78,8 @@ js_set $black_list_check checkBlackList; # Modsecurity and blacklist configuration include /etc/nginx/gen3_server*.conf; +perl_set $document_url_env 'sub { return $ENV{"DOCUMENT_URL"} || ""; }'; + # see portal-conf perl_set $maintenance_mode_env 'sub { return $ENV{"MAINTENANCE_MODE"} || "undefined"; }'; @@ -411,6 +414,9 @@ server { #} include /etc/nginx/gen3.conf/*.conf; + if ($document_url_env != "") { + include /etc/nginx/gen3.conf/documentation-site/*.conf; + } if ($frontend_root_service = "portal") { include /etc/nginx/gen3.conf/portal-as-root/*.conf; } diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 1cba90bc0..5f0f90f3a 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -95,6 +95,12 @@ spec: name: manifest-global key: maintenance_mode optional: true + - name: DOCUMENT_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: document_url + optional: true - name: FRONTEND_ROOT valueFrom: configMapKeyRef: From 52d0f8c4bbac06fb591fae7fdec123dcf103c819 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Tue, 26 Jul 2022 18:05:12 -0500 Subject: [PATCH 073/106] HP-781 Feat/heal cedar injest job (#2004) --- .../healdata/heal-cedar-data-ingest.py | 80 ++++++++ kube/services/jobs/cedar-ingestion-job.yaml | 193 ++++++++++++++++++ 2 files changed, 273 insertions(+) create mode 100644 files/scripts/healdata/heal-cedar-data-ingest.py create mode 100644 kube/services/jobs/cedar-ingestion-job.yaml diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py new file mode 100644 index 000000000..fb2c1f2c1 --- /dev/null +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -0,0 +1,80 @@ +import argparse +import json +import requests +import pydash +import os + + +parser = argparse.ArgumentParser() + +parser.add_argument("--directory", help="CEDAR Directory ID for registering ") +parser.add_argument("--access_token", help="User access token") +parser.add_argument("--hostname", help="Hostname") + + +args = parser.parse_args() + +if not args.directory: + print("Directory ID is required!") + exit(1) +if not args.access_token: + print("User access token is required!") + exit(1) +if not args.hostname: + print("Hostname is required!") + exit(1) + +dir_id = args.directory +access_token = args.access_token +hostname = args.hostname + +token_header = {"Authorization": 'bearer ' + access_token} + +# Get the metadata from cedar to register +print("Querying CEDAR...") +cedar = requests.get(f"https://{hostname}/cedar/get-instance-by-directory/{dir_id}", headers=token_header) + +# If we get metadata back now register with MDS +if cedar.status_code == 200: + metadata_return = cedar.json() + if "metadata" not in metadata_return: + print("Got 200 from CEDAR wrapper but no metadata in body, something is not right!") + exit(1) + + print(f"Successfully got {len(metadata_return['metadata'])} record(s) from CEDAR directory") + for cedar_record in metadata_return["metadata"]: + if "appl_id" not in cedar_record: + print("This record doesn't have appl_id, skipping...") + continue + cedar_record_id = str(cedar_record["appl_id"]) + + # Get the metadata record for the nih_application_id + mds = requests.get(f"https://{hostname}/mds/metadata/{cedar_record_id}", + headers=token_header + ) + if mds.status_code == 200: + mds_res = mds.json() + mds_cedar_register_data_body = {} + mds_discovery_data_body = {} + if mds_res["_guid_type"] == "discovery_metadata": + print("Metadata is already registered. Updating MDS record") + elif mds_res["_guid_type"] == "unregistered_discovery_metadata": + print("Metadata is has not been registered. Registering it in MDS record") + pydash.merge(mds_discovery_data_body, mds_res["gen3_discovery"], cedar_record) + mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body + mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" + + print("Metadata is now being registered.") + mds_put = requests.put(f"https://{hostname}/mds/metadata/{cedar_record_id}", + headers=token_header, + json = mds_cedar_register_data_body + ) + if mds_put.status_code == 200: + print(f"Successfully registered: {cedar_record_id}") + else: + print(f"Failed to register: {cedar_record_id}. Might not be MDS admin") + print(f"Status from MDS: {mds_put.status_code}") + else: + print(f"Failed to get information from MDS: {mds.status_code}") +else: + print(f"Failed to get information from CEDAR wrapper service: {cedar.status_code}") diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml new file mode 100644 index 000000000..37f537c53 --- /dev/null +++ b/kube/services/jobs/cedar-ingestion-job.yaml @@ -0,0 +1,193 @@ +# +# run with: +# gen3 job run cedar-ingestion \ +# SUBMISSION_USER $submission_user \ +# CEDAR_DIRECTORY_ID $cedar_directory_id \ +# +# SUBMISSION_USER(optional) +# e-mail of user-account to submit the data to MDS, must have MDS admin and CEDAR polices granted. Default: "cdis.autotest@gmail.com" +# +# CEDAR_DIRECTORY_ID +# ID of CEDAR directory where instances will be pulled from, only needs its UUID part. For example: "123e4567-e89b-12d3-a456-426614174000" +# The deployed CEDAR wrapper services must be able to read from this directory. +# +# Example +# gen3 job run cedar-ingestion CEDAR_DIRECTORY_ID 123e4567-e89b-12d3-a456-426614174000 SUBMISSION_USER cdis.autotest@gmail.com +# +apiVersion: batch/v1 +kind: Job +metadata: + name: cedar-ingestion +spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: useryaml-job + volumes: + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + - name: shared-data + emptyDir: {} +# ----------------------------------------------------------------------------- +# DEPRECATED! Remove when all commons are no longer using local_settings.py +# for fence. +# ----------------------------------------------------------------------------- + - name: old-config-volume + secret: + secretName: "fence-secret" + - name: creds-volume + secret: + secretName: "fence-creds" + - name: config-helper + configMap: + name: config-helper + - name: json-secret-volume + secret: + secretName: "fence-json-secret" +# ----------------------------------------------------------------------------- + - name: config-volume + secret: + secretName: "fence-config" + - name: fence-jwt-keys + secret: + secretName: "fence-jwt-keys" + containers: + - name: awshelper + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + ports: + - containerPort: 80 + env: + - name: HOSTNAME + valueFrom: + configMapKeyRef: + name: global + key: hostname + - name: SUBMISSION_USER + GEN3_SUBMISSION_USER|-value: "cdis.autotest@gmail.com"-| + - name: CEDAR_DIRECTORY_ID + GEN3_CEDAR_DIRECTORY_ID|-value: ""-| + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + resources: + limits: + cpu: 1 + memory: 5Gi + command: ["/bin/bash" ] + args: + - "-c" + - | + if [[ -z "$CEDAR_DIRECTORY_ID" ]]; then + echo -e "CEDAR_DIRECTORY_ID is required" 1>&2 + exit 1 + fi + let count=0 + while [[ ! -f /mnt/shared/access_token.txt && $count -lt 50 ]]; do + echo "Waiting for /mnt/shared/access_token.txt"; + sleep 2 + let count=$count+1 + done + pip install pydash + export GEN3_HOME="$HOME/cloud-automation" + export ACCESS_TOKEN="$(cat /mnt/shared/access_token.txt)" + python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --access_token $ACCESS_TOKEN --directory $CEDAR_DIRECTORY_ID --hostname $HOSTNAME + echo "All done - exit status $?" + - name: fence + GEN3_FENCE_IMAGE + imagePullPolicy: Always + env: + - name: PYTHONPATH + value: /var/www/fence + - name: SUBMISSION_USER + GEN3_SUBMISSION_USER|-value: "cdis.autotest@gmail.com"-| + - name: TOKEN_EXPIRATION + value: "3600" + - name: FENCE_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-fence + key: fence-config-public.yaml + optional: true + volumeMounts: +# ----------------------------------------------------------------------------- +# DEPRECATED! Remove when all commons are no longer using local_settings.py +# for fence. +# ----------------------------------------------------------------------------- + - name: "old-config-volume" + readOnly: true + mountPath: "/var/www/fence/local_settings.py" + subPath: local_settings.py + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/fence/creds.json" + subPath: creds.json + - name: "config-helper" + readOnly: true + mountPath: "/var/www/fence/config_helper.py" + subPath: config_helper.py + - name: "json-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_credentials.json" + subPath: fence_credentials.json +# ----------------------------------------------------------------------------- + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config-secret.yaml" + subPath: fence-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + - name: "fence-jwt-keys" + readOnly: true + mountPath: "/fence/jwt-keys.tar" + subPath: "jwt-keys.tar" + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash" ] + args: + - "-c" + - | + echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" + python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml + if [ -f /fence/jwt-keys.tar ]; then + cd /fence + tar xvf jwt-keys.tar + if [ -d jwt-keys ]; then + mkdir -p keys + mv jwt-keys/* keys/ + fi + fi + echo "generate access token" + echo "fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION" + tempFile="$(mktemp -p /tmp token.txt_XXXXXX)" + success=false + count=0 + sleepTime=10 + # retry loop + while [[ $count -lt 3 && $success == false ]]; do + if fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION > "$tempFile"; then + echo "fence-create success!" + tail -1 "$tempFile" > /mnt/shared/access_token.txt + # base64 --decode complains about invalid characters - don't know why + awk -F . '{ print $2 }' /mnt/shared/access_token.txt | base64 --decode 2> /dev/null + success=true + else + echo "fence-create failed!" + cat "$tempFile" + echo "sleep for $sleepTime, then retry" + sleep "$sleepTime" + let sleepTime=$sleepTime+$sleepTime + fi + let count=$count+1 + done + if [[ $success != true ]]; then + echo "Giving up on fence-create after $count retries - failed to create valid access token" + fi + echo "" + echo "All Done - always succeed to avoid k8s retries" + restartPolicy: Never From 384c37cc3c4a02166134561544df5ad0056194f9 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 1 Aug 2022 12:49:42 -0600 Subject: [PATCH 074/106] adding my ssh key to various files. (#2006) --- files/authorized_keys/ops_team | 3 ++- files/authorized_keys/squid_authorized_keys_admin | 3 ++- files/authorized_keys/vpn_authorized_keys_admin | 3 ++- tf_files/aws/commons/cluster.yaml | 1 + tf_files/aws/publicvm/root.tf | 1 + 5 files changed, 8 insertions(+), 3 deletions(-) diff --git a/files/authorized_keys/ops_team b/files/authorized_keys/ops_team index a55a0717e..1c41caaa0 100644 --- a/files/authorized_keys/ops_team +++ b/files/authorized_keys/ops_team @@ -2,4 +2,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan \ No newline at end of file +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP \ No newline at end of file diff --git a/files/authorized_keys/squid_authorized_keys_admin b/files/authorized_keys/squid_authorized_keys_admin index 251eedbdb..20c4d966d 100644 --- a/files/authorized_keys/squid_authorized_keys_admin +++ b/files/authorized_keys/squid_authorized_keys_admin @@ -4,4 +4,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg8 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdIXKLMs14c8U9exX/sOoIcvOCZ4v2pKsjdM1VBA56GyI98E1R+hxTBecHeWri9MeQcZkrlmjqT3ZzCb87+n0W2LEWquLNfeheAEq61ogi0taxWEpnb4rIAr1U9aS3d0mk5NIIivrwaUHTIvUhH8mn4Pek0GgybZAsjN/MpZ9PZwUtXNmjZoY5gWR0QO4ZWu7ARknFoNcTXwpWyl/Khhal0KKhdB38y3MpJc03IIqhem15e78jRlko04CAZX3zlFAQwbxnrpgrJUMYeY8fZqpV6FiWC40yu+n9KwAZkmtrc45mkxahj8c3QtJ/Z3t33yXEN9PEHV6z104STYi2cPVD rpollard@news-MacBook-Pro.local -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan \ No newline at end of file +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP \ No newline at end of file diff --git a/files/authorized_keys/vpn_authorized_keys_admin b/files/authorized_keys/vpn_authorized_keys_admin index a55a0717e..1c41caaa0 100644 --- a/files/authorized_keys/vpn_authorized_keys_admin +++ b/files/authorized_keys/vpn_authorized_keys_admin @@ -2,4 +2,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan \ No newline at end of file +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP \ No newline at end of file diff --git a/tf_files/aws/commons/cluster.yaml b/tf_files/aws/commons/cluster.yaml index 811bbdc32..c4c6ee82c 100644 --- a/tf_files/aws/commons/cluster.yaml +++ b/tf_files/aws/commons/cluster.yaml @@ -9,6 +9,7 @@ sshAuthorizedKeys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdIXKLMs14c8U9exX/sOoIcvOCZ4v2pKsjdM1VBA56GyI98E1R+hxTBecHeWri9MeQcZkrlmjqT3ZzCb87+n0W2LEWquLNfeheAEq61ogi0taxWEpnb4rIAr1U9aS3d0mk5NIIivrwaUHTIvUhH8mn4Pek0GgybZAsjN/MpZ9PZwUtXNmjZoY5gWR0QO4ZWu7ARknFoNcTXwpWyl/Khhal0KKhdB38y3MpJc03IIqhem15e78jRlko04CAZX3zlFAQwbxnrpgrJUMYeY8fZqpV6FiWC40yu+n9KwAZkmtrc45mkxahj8c3QtJ/Z3t33yXEN9PEHV6z104STYi2cPVD rpollard@news-MacBook-Pro.local - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP region: ${aws_region} kmsKeyArn: "${kms_key}" apiEndpoints: diff --git a/tf_files/aws/publicvm/root.tf b/tf_files/aws/publicvm/root.tf index d51f4663f..cf08ba645 100644 --- a/tf_files/aws/publicvm/root.tf +++ b/tf_files/aws/publicvm/root.tf @@ -122,6 +122,7 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhY ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdIXKLMs14c8U9exX/sOoIcvOCZ4v2pKsjdM1VBA56GyI98E1R+hxTBecHeWri9MeQcZkrlmjqT3ZzCb87+n0W2LEWquLNfeheAEq61ogi0taxWEpnb4rIAr1U9aS3d0mk5NIIivrwaUHTIvUhH8mn4Pek0GgybZAsjN/MpZ9PZwUtXNmjZoY5gWR0QO4ZWu7ARknFoNcTXwpWyl/Khhal0KKhdB38y3MpJc03IIqhem15e78jRlko04CAZX3zlFAQwbxnrpgrJUMYeY8fZqpV6FiWC40yu+n9KwAZkmtrc45mkxahj8c3QtJ/Z3t33yXEN9PEHV6z104STYi2cPVD rpollard@news-MacBook-Pro.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP EOM ) ( From 1bab75c02b8368d33097a49fe90ec9128cadce15 Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Mon, 1 Aug 2022 15:48:30 -0500 Subject: [PATCH 075/106] Update variables.tf (#2012) --- tf_files/aws/eks/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tf_files/aws/eks/variables.tf b/tf_files/aws/eks/variables.tf index efd7e64da..b4275dc6b 100644 --- a/tf_files/aws/eks/variables.tf +++ b/tf_files/aws/eks/variables.tf @@ -37,7 +37,7 @@ variable "worker_drive_size" { } variable "eks_version" { - default = "1.16" + default = "1.21" } variable "workers_subnet_size" { From fdf9b44ede62a5f4ebea055e66e717fdb23809b7 Mon Sep 17 00:00:00 2001 From: Jing Huang <71466688+jingh8@users.noreply.github.com> Date: Mon, 1 Aug 2022 16:34:31 -0500 Subject: [PATCH 076/106] add thor to psql-fips-fix.sh (#2010) --- files/scripts/psql-fips-fix.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/scripts/psql-fips-fix.sh b/files/scripts/psql-fips-fix.sh index 01db2742a..fcbb6e20c 100644 --- a/files/scripts/psql-fips-fix.sh +++ b/files/scripts/psql-fips-fix.sh @@ -16,7 +16,7 @@ for name in indexd fence sheepdog peregrine; do update_pass $name $username $password done -for name in wts metadata gearbox audit arborist access-backend argo_db atlas argo; do +for name in wts metadata gearbox audit arborist access-backend argo_db atlas argo thor; do if [[ ! -z $(gen3 secrets decode $name-g3auto dbcreds.json) ]]; then username=$(gen3 secrets decode $name-g3auto dbcreds.json | jq -r .db_username) password=$(gen3 secrets decode $name-g3auto dbcreds.json | jq -r .db_password) From 5dd8d8ddafc5fb2605f226641841bd0d50f9a40f Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 2 Aug 2022 15:17:48 -0600 Subject: [PATCH 077/106] adding a role binding to grant gitops-sa permissions in the argo namespace (#2011) --- kube/services/jenkins/rolebinding-devops.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/kube/services/jenkins/rolebinding-devops.yaml b/kube/services/jenkins/rolebinding-devops.yaml index 47c98e47b..2f262172e 100644 --- a/kube/services/jenkins/rolebinding-devops.yaml +++ b/kube/services/jenkins/rolebinding-devops.yaml @@ -11,3 +11,16 @@ roleRef: kind: ClusterRole name: admin apiGroup: "" +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: argo-binding + namespace: argo +subjects: +- kind: ServiceAccount + name: gitops-sa +roleRef: + kind: ClusterRole + name: admin + apiGroup: "" \ No newline at end of file From e5b80b72e170e70497caa6a035a4638b9ea3463d Mon Sep 17 00:00:00 2001 From: Fan Wang Date: Thu, 4 Aug 2022 09:11:31 -0500 Subject: [PATCH 078/106] Update web_whitelist (#2013) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 219f6b41e..1bf67da16 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -5,6 +5,7 @@ accounts.google.com achecker.ca apache.github.io +api.epigraphdb.org api.monqcle.com biodata-integration-tests.net biorender.com From a6822747a27c8d2fd4ea565684acf4d64286315a Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 11 Aug 2022 23:42:08 -0500 Subject: [PATCH 079/106] kube-setup-ohdsi script (#2015) * feat: barebone kube-setup-ohdsi script * feat: ingress for kube-setup-ohdsi * feat: use g3kubectl apply * feat: add WebAPI and env variable naming * fix: back to roll * fix: proper path * fix: dns name for testing * feat: use subdomain * fix: port for ingress for ohdsi * fix: type NodePort for ohdsi services * feat: configmap with configuration * fix: missing export? * fix: indentation * fix: no need Atlas nginx config with new Ingress deployment * fix: extra networkpolicies * fix: liveness/readiness probes * fix: networkpolicies? * fix: networkpolicies! * fix: final for networkpolicies * feat: add secrets template with clients and db * fix: bash dummy function * feat: setup creds (based on superset setup script) * chore: remove unused files --- gen3/bin/kube-setup-ohdsi-tools.sh | 17 --- gen3/bin/kube-setup-ohdsi.sh | 134 ++++++++++++++++++ .../gen3/services/revproxy_netpolicy.yaml | 1 + kube/services/ohdsi-atlas/README.md | 117 --------------- kube/services/ohdsi-atlas/config-local.js | 31 ---- .../ohdsi-atlas/ohdsi-atlas-deploy.yaml | 9 -- .../ohdsi-atlas/ohdsi-atlas-service-elb.yaml | 18 --- .../ohdsi-atlas/ohdsi-atlas-service.yaml | 1 + .../ohdsi-webapi/ohdsi-webapi-deploy.yaml | 28 ++-- .../ohdsi-webapi-service-elb.yaml | 18 --- .../ohdsi-webapi/ohdsi-webapi-service.yaml | 1 + kube/services/ohdsi/ohdsi-configmap.yaml | 37 +++++ kube/services/ohdsi/ohdsi-ingress.yaml | 32 +++++ kube/services/ohdsi/ohdsi-secrets.yaml | 60 ++++++++ 14 files changed, 280 insertions(+), 224 deletions(-) delete mode 100644 gen3/bin/kube-setup-ohdsi-tools.sh create mode 100644 gen3/bin/kube-setup-ohdsi.sh delete mode 100644 kube/services/ohdsi-atlas/README.md delete mode 100644 kube/services/ohdsi-atlas/config-local.js delete mode 100644 kube/services/ohdsi-atlas/ohdsi-atlas-service-elb.yaml delete mode 100644 kube/services/ohdsi-webapi/ohdsi-webapi-service-elb.yaml create mode 100644 kube/services/ohdsi/ohdsi-configmap.yaml create mode 100644 kube/services/ohdsi/ohdsi-ingress.yaml create mode 100644 kube/services/ohdsi/ohdsi-secrets.yaml diff --git a/gen3/bin/kube-setup-ohdsi-tools.sh b/gen3/bin/kube-setup-ohdsi-tools.sh deleted file mode 100644 index 891cc48ee..000000000 --- a/gen3/bin/kube-setup-ohdsi-tools.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# -# Deploy Atlas/WebAPI into existing commons -# - -source "${GEN3_HOME}/gen3/lib/utils.sh" -gen3_load "gen3/lib/kube-setup-init" - -gen3 roll ohdsi-webapi -g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml" -gen3 roll ohdsi-atlas -g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-service.yaml" -g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-service-elb.yaml" - -cat < /dev/null 2>&1 + secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1) + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets" + return 1 + fi + fi + local FENCE_CLIENT_ID="${BASH_REMATCH[2]}" + local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}" + gen3_log_info "create ohdsi-secret" + mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/ohdsi" + + cat - < /dev/null 2>&1; then + local credsPath="$(gen3_secrets_folder)/g3auto/ohdsi/appcreds.json" + if [ -f "$credsPath" ]; then + gen3 secrets sync + return 0 + fi + mkdir -p "$(dirname "$credsPath")" + if ! new_client > "$credsPath"; then + gen3_log_err "Failed to setup ohdsi fence client" + rm "$credsPath" || true + return 1 + fi + gen3 secrets sync + fi + + if ! g3kubectl describe secret ohdsi-g3auto | grep dbcreds.json > /dev/null 2>&1; then + gen3_log_info "create database" + if ! gen3 db setup ohdsi; then + gen3_log_err "Failed setting up database for ohdsi service" + return 1 + fi + gen3 secrets sync + fi +} + +setup_secrets() { + # ohdsi-secrets.yaml populate and apply. + gen3_log_info "Deploying secrets for ohdsi" + # subshell + + ( + if ! dbcreds="$(gen3 db creds ohdsi)"; then + gen3_log_err "unable to find db creds for ohdsi service" + return 1 + fi + + if ! appcreds="$(gen3 secrets decode ohdsi-g3auto appcreds.json)"; then + gen3_log_err "unable to find app creds for ohdsi service" + return 1 + fi + + local hostname=$(gen3 api hostname) + export DB_NAME=$(jq -r ".db_database" <<< "$dbcreds") + export DB_USER=$(jq -r ".db_username" <<< "$dbcreds") + export DB_PASS=$(jq -r ".db_password" <<< "$dbcreds") + export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds") + + export FENCE_URL="https://${hostname}/user/user" + export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration" + export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds") + export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds") + envsubst <"${GEN3_HOME}/kube/services/ohdsi/ohdsi-secrets.yaml" | g3kubectl apply -f - + ) +} + +setup_ingress() { + certs=$(aws acm list-certificates --certificate-statuses ISSUED | jq --arg hostname $hostname -c '.CertificateSummaryList[] | select(.DomainName | contains("*."+$hostname))') + if [ "$certs" = "" ]; then + gen3_log_info "no certs found for *.${hostname}. exiting" + exit 22 + fi + gen3_log_info "Found ACM certificate for *.$hostname" + export ARN=$(jq -r .CertificateArn <<< $certs) + export ohdsi_hostname="atlas.${hostname}" + envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-ingress.yaml | g3kubectl apply -f - +} + +# main -------------------------------------- +# deploy superset +if [[ $# -gt 0 && "$1" == "new-client" ]]; then + new_client + exit $? +elif [[ $# -gt 0 && "$1" == "ingress" ]]; then + setup_ingress + exit $? +fi + +setup_creds + +setup_secrets +setup_ingress + +envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-configmap.yaml | g3kubectl apply -f - + +gen3 roll ohdsi-webapi +g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml" + +gen3 roll ohdsi-atlas +g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-service.yaml" + +cat <:5432/ - datasource_ohdsi_schema: ohdsi - datasource_username: - datasource_password: - spring_jpa_properties_hibernate_default_schema: ohdsi - spring_jpa_properties_hibernate_dialect: org.hibernate.dialect.PostgreSQLDialect - spring_batch_repository_tableprefix: ohdsi.BATCH_ - flyway_datasource_driverClassName: org.postgresql.Driver - flyway_datasource_url: jdbc:postgresql://:5432/ - flyway_schemas: ohdsi - flyway_placeholders_ohdsiSchema: ohdsi - flyway_datasource_username: - flyway_datasource_password: - flyway_locations: classpath:db/migration/postgresql - # Zoe testing Atlas-Fence - security_cors_enabled: "true" - security_origin: "*" - security_token_expiration: "43200" - security_ssl_enabled: "false" - -# security_provider: DisabledSecurity - security_provider: AtlasRegularSecurity - - security_auth_windows_enabled: "false" - security_auth_kerberos_enabled: "false" - security_auth_openid_enabled: "true" - security_auth_facebook_enabled: "false" - security_auth_github_enabled: "false" - security_auth_google_enabled: "false" - security_auth_jdbc_enabled: "false" - security_auth_ldap_enabled: "false" - security_auth_ad_enabled: "false" - security_auth_cas_enabled: "false" - - security_db_datasource_schema: security - security_db_datasource_url: jdbc:postgresql://:5432/ - security_db_datasource_driverClassName: org.postgresql.Driver - security_db_datasource_username: - security_db_datasource_password: - - security_oid_clientId: - security_oid_apiSecret: - security_oid_url: https:///.well-known/openid-configuration - security_oid_redirectUrl: https:///atlas/#/welcome - security_oid_logoutUrl: https:///atlas/#/home - - security_oauth_callback_ui: https:///atlas/#/welcome - security_oauth_callback_api: https:///WebAPI/user/oauth/callback - security_oauth_callback_urlResolver: query - - logging_level_root: info - logging_level_org_ohdsi: info - logging_level_org_apache_shiro: info -``` - -## Making changes and redeploying to QA - -Example: we have some changes in `kube/services/ohdsi-atlas/config-local.js`. - -To redeploy to QA, follow these steps: -- delete old configmap `kubectl delete configmap ohdsi-atlas-config-local` -- get a copy of `config-local.js` into the current folder -- run the `kubectl create configmap ohdsi-atlas-config-local --from-file=config-local.js` on QA server -- assess results with `kubectl describe configmap ohdsi-atlas-config-local` -- and then restart Atlas with `gen3 roll ohdsi-atlas` -- watch pod status with `kubectl get pods -l app=ohdsi-atlas` - -To clear the browser cache (when making .js changes): -- go to https://atlas-qa-mickey.planx-pla.net/atlas/js/config-local.js and force-reload it to clear old code from browser cache diff --git a/kube/services/ohdsi-atlas/config-local.js b/kube/services/ohdsi-atlas/config-local.js deleted file mode 100644 index 73f9aa76d..000000000 --- a/kube/services/ohdsi-atlas/config-local.js +++ /dev/null @@ -1,31 +0,0 @@ -define([], function () { - var configLocal = {}; - // WebAPI - configLocal.api = { - name: 'Gen3', - url: 'https://atlas-qa-mickey.planx-pla.net/WebAPI/' - }; - configLocal.authProviders = [{ - "name": "Fence", - "url": "user/login/openid", - "ajax": false, - "icon": "fa fa-openid" - }]; - configLocal.cohortComparisonResultsEnabled = false; - configLocal.userAuthenticationEnabled = true; - configLocal.plpResultsEnabled = false; - return configLocal; -}); - -var parentOfThisIframe = window.parent; -var mouseoverCount = 0; - -console.log("Adding activity event listener..."); -window.addEventListener("mouseover", function(event) { - mouseoverCount++; - if (mouseoverCount % 20 == 0 && parentOfThisIframe) { - console.log("Activity detected. Atlas running in an iframe. Posting 'I'm alive' message..."); - parentOfThisIframe.postMessage("refresh token!", "*"); - mouseoverCount = 0; - } -}); diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml index a155e92de..abb611e39 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml @@ -2,8 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: name: ohdsi-atlas-deployment - annotations: - gen3.io/network-ingress: "ohdsi-webapi" spec: selector: # Only select pods based on the 'app' label @@ -39,9 +37,6 @@ spec: - name: ohdsi-atlas-config-local configMap: name: ohdsi-atlas-config-local - - name: ohdsi-atlas-nginx-webapi - configMap: - name: ohdsi-atlas-nginx-webapi containers: - name: ohdsi-atlas GEN3_OHDSI-ATLAS_IMAGE|-image: quay.io/cdis/ohdsi-atlas:latest-| @@ -63,10 +58,6 @@ spec: readOnly: true mountPath: /usr/share/nginx/html/atlas/js/config-local.js subPath: config-local.js - - name: ohdsi-atlas-nginx-webapi - readOnly: true - mountPath: /etc/nginx/conf.d/custom-conf/webapi.conf - subPath: webapi.conf imagePullPolicy: Always resources: requests: diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-service-elb.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-service-elb.yaml deleted file mode 100644 index eb59fea62..000000000 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-service-elb.yaml +++ /dev/null @@ -1,18 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: ohdsi-atlas-service-elb - annotations: - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: see-kube-setup-ohdsi-atlas - service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" -spec: - selector: - app: ohdsi-atlas - ports: - - protocol: TCP - port: 443 - targetPort: 8080 - name: https - type: LoadBalancer diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-service.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-service.yaml index f2f2f3501..65b8d1149 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-service.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-service.yaml @@ -3,6 +3,7 @@ apiVersion: v1 metadata: name: ohdsi-atlas-service spec: + type: NodePort selector: app: ohdsi-atlas ports: diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 08f58efb4..75d916e59 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -2,8 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: name: ohdsi-webapi-deployment - annotations: - gen3.io/network-ingress: "ohdsi-atlas" spec: selector: # Only select pods based on the 'app' label @@ -19,7 +17,9 @@ spec: metadata: labels: app: ohdsi-webapi - dbatlas: "yes" + dbohdsi-webapi: "yes" + dbomop-data: "yes" + internet: "yes" public: "yes" GEN3_DATE_LABEL spec: @@ -39,17 +39,17 @@ spec: containers: - name: ohdsi-webapi GEN3_OHDSI-WEBAPI_IMAGE|-image: quay.io/cdis/ohdsi-webapi:latest-| - # livenessProbe: - # httpGet: - # path: /WebAPI/source/sources - # port: 8080 - # initialDelaySeconds: 30 - # periodSeconds: 60 - # timeoutSeconds: 30 - # readinessProbe: - # httpGet: - # path: /WebAPI/source/sources - # port: 8080 + livenessProbe: + httpGet: + path: /WebAPI/info/ + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 60 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: /WebAPI/info/ + port: 8080 ports: - containerPort: 8080 envFrom: diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-service-elb.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-service-elb.yaml deleted file mode 100644 index 6d3c4fd1e..000000000 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-service-elb.yaml +++ /dev/null @@ -1,18 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: ohdsi-webapi-service-elb - annotations: - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: see-kube-setup-ohdsi-webapi - service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" -spec: - selector: - app: ohdsi-webapi - ports: - - protocol: TCP - port: 443 - targetPort: 8080 - name: https - type: LoadBalancer diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml index ebca42108..18ef9047d 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml @@ -3,6 +3,7 @@ apiVersion: v1 metadata: name: ohdsi-webapi-service spec: + type: NodePort selector: app: ohdsi-webapi ports: diff --git a/kube/services/ohdsi/ohdsi-configmap.yaml b/kube/services/ohdsi/ohdsi-configmap.yaml new file mode 100644 index 000000000..e2d712533 --- /dev/null +++ b/kube/services/ohdsi/ohdsi-configmap.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ohdsi-atlas-config-local +data: + config-local.js: | + define([], function () { + var configLocal = {}; + // WebAPI + configLocal.api = { + name: 'Gen3', + url: 'https://atlas.$hostname/WebAPI/' + }; + configLocal.authProviders = [{ + "name": "Fence", + "url": "user/login/openid", + "ajax": false, + "icon": "fa fa-openid" + }]; + configLocal.cohortComparisonResultsEnabled = false; + configLocal.userAuthenticationEnabled = true; + configLocal.plpResultsEnabled = false; + return configLocal; + }); + + var parentOfThisIframe = window.parent; + var mouseoverCount = 0; + + console.log("Adding activity event listener..."); + window.addEventListener("mouseover", function(event) { + mouseoverCount++; + if (mouseoverCount % 20 == 0 && parentOfThisIframe) { + console.log("Activity detected. Atlas running in an iframe. Posting 'I'm alive' message..."); + parentOfThisIframe.postMessage("refresh token!", "*"); + mouseoverCount = 0; + } + }); diff --git a/kube/services/ohdsi/ohdsi-ingress.yaml b/kube/services/ohdsi/ohdsi-ingress.yaml new file mode 100644 index 000000000..487ee44e7 --- /dev/null +++ b/kube/services/ohdsi/ohdsi-ingress.yaml @@ -0,0 +1,32 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ohdsi-ingress + annotations: + # TODO: Make this configurable + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/tags: Environment=$vpc_name + alb.ingress.kubernetes.io/certificate-arn: $ARN + alb.ingress.kubernetes.io/group.name: "$vpc_name" + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' +spec: + ingressClassName: alb + rules: + - host: $ohdsi_hostname + http: + paths: + - path: /atlas + pathType: Prefix + backend: + service: + name: ohdsi-atlas-service + port: + number: 80 + - path: /WebAPI + pathType: Prefix + backend: + service: + name: ohdsi-webapi-service + port: + number: 80 diff --git a/kube/services/ohdsi/ohdsi-secrets.yaml b/kube/services/ohdsi/ohdsi-secrets.yaml new file mode 100644 index 000000000..7b84c7964 --- /dev/null +++ b/kube/services/ohdsi/ohdsi-secrets.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ohdsi-webapi-config +type: Opaque +stringData: + datasource_driverClassName: org.postgresql.Driver + datasource_url: jdbc:postgresql://$DB_HOST:5432/$DB_NAME + datasource_ohdsi_schema: ohdsi + datasource_username: $DB_USER + datasource_password: $DB_PASS + spring_jpa_properties_hibernate_default_schema: ohdsi + spring_jpa_properties_hibernate_dialect: org.hibernate.dialect.PostgreSQLDialect + spring_batch_repository_tableprefix: ohdsi.BATCH_ + flyway_datasource_driverClassName: org.postgresql.Driver + flyway_datasource_url: jdbc:postgresql://$DB_HOST:5432/$DB_NAME + flyway_schemas: ohdsi + flyway_placeholders_ohdsiSchema: ohdsi + flyway_datasource_username: $DB_USER + flyway_datasource_password: $DB_PASS + flyway_locations: classpath:db/migration/postgresql + # Zoe testing Atlas-Fence + security_cors_enabled: "true" + security_origin: "*" + security_token_expiration: "43200" + security_ssl_enabled: "false" + +# security_provider: DisabledSecurity + security_provider: AtlasRegularSecurity + + security_auth_windows_enabled: "false" + security_auth_kerberos_enabled: "false" + security_auth_openid_enabled: "true" + security_auth_facebook_enabled: "false" + security_auth_github_enabled: "false" + security_auth_google_enabled: "false" + security_auth_jdbc_enabled: "false" + security_auth_ldap_enabled: "false" + security_auth_ad_enabled: "false" + security_auth_cas_enabled: "false" + + security_db_datasource_schema: security + security_db_datasource_url: jdbc:postgresql://$DB_HOST:5432/$DB_NAME + security_db_datasource_driverClassName: org.postgresql.Driver + security_db_datasource_username: $DB_USER + security_db_datasource_password: $DB_PASS + + security_oid_clientId: $FENCE_CLIENT_ID + security_oid_apiSecret: $FENCE_CLIENT_SECRET + security_oid_url: https://$hostname/.well-known/openid-configuration + security_oid_redirectUrl: https://atlas.$hostname/atlas/#/welcome + security_oid_logoutUrl: https://atlas.$hostname/atlas/#/home + + security_oauth_callback_ui: https://atlas.$hostname/atlas/#/welcome + security_oauth_callback_api: https://atlas.$hostname/WebAPI/user/oauth/callback + security_oauth_callback_urlResolver: query + + logging_level_root: info + logging_level_org_ohdsi: info + logging_level_org_apache_shiro: info From 4f7619ae0c6319c3c9e746d50dfe8959d6d403c2 Mon Sep 17 00:00:00 2001 From: Jing Huang <71466688+jingh8@users.noreply.github.com> Date: Fri, 12 Aug 2022 09:34:27 -0500 Subject: [PATCH 080/106] add kube-setup-thor (#2002) * add kube-setup-thor * create tables * add githubtoken check * add jira token * disable auth * create table in deployment * add jenkins secret back * add thor-admin * fix init path * Update kube/services/revproxy/gen3.nginx.conf/thor-service.conf * Update thor-service.conf * add slack api token Co-authored-by: Atharva Rane <41084525+atharvar28@users.noreply.github.com> Co-authored-by: Hara Prasad --- gen3/bin/kube-setup-thor.sh | 70 +++++++++++++++++++ .../gen3.nginx.conf/thor-service.conf | 22 ++++-- kube/services/thor/thor-deploy.yaml | 32 +++++++-- 3 files changed, 115 insertions(+), 9 deletions(-) create mode 100644 gen3/bin/kube-setup-thor.sh diff --git a/gen3/bin/kube-setup-thor.sh b/gen3/bin/kube-setup-thor.sh new file mode 100644 index 000000000..50de4d5bb --- /dev/null +++ b/gen3/bin/kube-setup-thor.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# +# Deploy the thor service. +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +setup_database() { + gen3_log_info "setting up database for thor service ..." + + if g3kubectl describe secret thor-g3auto > /dev/null 2>&1; then + gen3_log_info "thor-g3auto secret already configured" + return 0 + fi + if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + gen3_log_err "skipping db setup in non-adminvm environment" + return 0 + fi + # Setup .env file that thor consumes + if [[ ! -f "$secretsFolder/thor.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then + local secretsFolder="$(gen3_secrets_folder)/g3auto/thor" + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + if ! gen3 db setup thor; then + gen3_log_err "Failed setting up database for thor service" + return 1 + fi + fi + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + gen3_log_err "dbcreds not present in Gen3Secrets/" + return 1 + fi + + # go ahead and rotate the password whenever we regen this file + local password="$(gen3 random)" # pragma: allowlist secret + cat - > "$secretsFolder/thor.env" < "$secretsFolder/base64Authz.txt" + fi + gen3 secrets sync 'setup thor-g3auto secrets' +} + +github_token="$(cat $(gen3_secrets_folder)/g3auto/thor/github_token.json)" # pragma: allowlist secret +jira_api_token="$(cat $(gen3_secrets_folder)/g3auto/thor/jira_api_token.json)" # pragma: allowlist secret + +if [[ -z "$github_token" ]]; then + gen3_log_err "missing github credential for thor" + exit 1 +fi +if [[ -z "$jira_api_token" ]]; then + gen3_log_err "missing jira credential for thor" + exit 1 +fi + +if ! setup_database; then + gen3_log_err "kube-setup-thor bailing out - database failed setup" + exit 1 +fi + +gen3 roll thor +g3kubectl apply -f "${GEN3_HOME}/kube/services/thor/thor-service.yaml" + +gen3_log_info "The thor service has been deployed onto the kubernetes cluster" \ No newline at end of file diff --git a/kube/services/revproxy/gen3.nginx.conf/thor-service.conf b/kube/services/revproxy/gen3.nginx.conf/thor-service.conf index e5f522ebb..15bf4d40e 100644 --- a/kube/services/revproxy/gen3.nginx.conf/thor-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/thor-service.conf @@ -2,18 +2,30 @@ if ($csrf_check !~ ^ok-\S.+$) { return 403 "failed csrf check"; } + + set $proxy_service "thor-service"; + set $upstream http://thor-service$des_domain; + rewrite ^/thor/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/thor/; + } + + location /thor-admin/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } error_page 403 @errorworkspace; - set $authz_resource "/thor"; + set $authz_resource "/thor_admin"; set $authz_method "access"; - set $authz_service "thor"; + set $authz_service "thor_admin"; # be careful - sub-request runs in same context as this request - auth_request /gen3-authz; - + auth_request /gen3-authz; + set $proxy_service "thor-service"; set $upstream http://thor-service$des_domain; rewrite ^/thor/(.*) /$1 break; proxy_pass $upstream; - proxy_redirect http://$host/ https://$host/thor/; + proxy_redirect http://$host/ https://$host/thor-admin/; } diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml index dbe46a734..088a3ca53 100644 --- a/kube/services/thor/thor-deploy.yaml +++ b/kube/services/thor/thor-deploy.yaml @@ -45,16 +45,26 @@ spec: secretKeyRef: name: thor-g3auto key: "jenkins_api_token.json" - - name: JENKINS2_API_TOKEN + - name: JENKINS_JOB_TOKEN valueFrom: secretKeyRef: name: thor-g3auto - key: "jenkins2_api_token.json" - - name: JENKINS_JOB_TOKEN + key: "jenkins_job_token.json" + - name: GITHUB_TOKEN valueFrom: secretKeyRef: name: thor-g3auto - key: "jenkins_job_token.json" + key: "github_token.json" + - name: JIRA_API_TOKEN + valueFrom: + secretKeyRef: + name: thor-g3auto + key: "jira_api_token.json" + - name: SLACK_API_TOKEN + valueFrom: + secretKeyRef: + name: thor-g3auto + key: "slacktoken.json" imagePullPolicy: Always volumeMounts: - name: config-volume-g3auto @@ -65,3 +75,17 @@ spec: - name: config-volume-g3auto secret: secretName: thor-g3auto + initContainers: + - name: thor-db-create + image: quay.io/cdis/thor:master + imagePullPolicy: Always + volumeMounts: + - name: config-volume-g3auto + readOnly: true + mountPath: /src/thor.env + subPath: thor.env + command: ["/bin/sh"] + args: + - "-c" + - | + /env/bin/python /src/src/thor/create_all_tables.py From 01ee5330ce132825619f6e129f5aaee5370fb8ef Mon Sep 17 00:00:00 2001 From: Spencer Myles Axelrod Date: Wed, 17 Aug 2022 13:42:39 -0700 Subject: [PATCH 081/106] Fix gen3qa-run (#1928) * add {data} attr to token scopes * add data to scopes Co-authored-by: Alexander VanTol --- kube/services/jobs/gen3qa-check-bucket-access-job.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml index 45ccab34d..547689635 100644 --- a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml +++ b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml @@ -116,14 +116,14 @@ spec: fi fi echo "generate access token" - echo "fence-create --path fence token-create --type access_token --username $TEST_OPERATOR --scopes openid,user,test-client --exp $TOKEN_EXPIRATION" + echo "fence-create --path fence token-create --type access_token --username $TEST_OPERATOR --scopes openid,user,test-client,data --exp $TOKEN_EXPIRATION" tempFile="$(mktemp -p /tmp token.txt_XXXXXX)" success=false count=0 sleepTime=10 # retry loop while [[ $count -lt 3 && $success == false ]]; do - if fence-create --path fence token-create --type access_token --username $TEST_OPERATOR --scopes openid,user,test-client --exp $TOKEN_EXPIRATION > "$tempFile"; then + if fence-create --path fence token-create --type access_token --username $TEST_OPERATOR --scopes openid,user,test-client,data --exp $TOKEN_EXPIRATION > "$tempFile"; then echo "fence-create success!" tail -1 "$tempFile" > /mnt/shared/access_token.txt # base64 --decode complains about invalid characters - don't know why From 6ded7b6119922eb99b18ed21649d7810bf8bfdbf Mon Sep 17 00:00:00 2001 From: bill winslow Date: Wed, 17 Aug 2022 19:42:56 -0500 Subject: [PATCH 082/106] adding process collection --- kube/services/datadog/values.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index fea2c20af..47896e4f0 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -72,15 +72,15 @@ datadog: # datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent ## Requires processAgent.enabled to be set to true to have any effect - processCollection: false + processCollection: true # datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes ## Requires processAgent.enabled and processAgent.processCollection to be set to true to have any effect ## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing - stripProcessArguments: false + stripProcessArguments: true # datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations - processDiscovery: false + processDiscovery: true ## Enable systemProbe agent and provide custom configs systemProbe: From d424b4cd3a50b5522df726b5071dc316432fcf62 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 18 Aug 2022 15:15:17 -0700 Subject: [PATCH 083/106] pull branch in initContainer (#2025) --- kube/services/thor/thor-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml index 088a3ca53..ac4649352 100644 --- a/kube/services/thor/thor-deploy.yaml +++ b/kube/services/thor/thor-deploy.yaml @@ -77,7 +77,7 @@ spec: secretName: thor-g3auto initContainers: - name: thor-db-create - image: quay.io/cdis/thor:master + GEN3_THOR_IMAGE imagePullPolicy: Always volumeMounts: - name: config-volume-g3auto From 9ce4cca5d172dda409a24c6478e661e95f6dfe87 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Fri, 19 Aug 2022 11:57:57 -0500 Subject: [PATCH 084/106] Fix for exposing WebAPI on https (#2021) * feat: nginx for WebAPI * fix: configmap definition * feat: use less CPU * feat: expose nginx in WebAPI service Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- gen3/bin/kube-setup-ohdsi.sh | 2 ++ .../ohdsi-webapi/ohdsi-webapi-deploy.yaml | 25 +++++++++++++++-- .../ohdsi-webapi-reverse-proxy-config.yaml | 27 +++++++++++++++++++ .../ohdsi-webapi/ohdsi-webapi-service.yaml | 2 +- 4 files changed, 53 insertions(+), 3 deletions(-) create mode 100644 kube/services/ohdsi-webapi/ohdsi-webapi-reverse-proxy-config.yaml diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index 891155051..0a4d3b7a8 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -91,6 +91,8 @@ setup_secrets() { export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds") export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds") envsubst <"${GEN3_HOME}/kube/services/ohdsi/ohdsi-secrets.yaml" | g3kubectl apply -f - + + envsubst '$hostname' <"${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-reverse-proxy-config.yaml" | g3kubectl apply -f - ) } diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 75d916e59..f720ec530 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -36,6 +36,10 @@ spec: - ohdsi-webapi topologyKey: "kubernetes.io/hostname" automountServiceAccountToken: false + volumes: + - name: ohdsi-webapi-reverse-proxy-config + configMap: + name: ohdsi-webapi-reverse-proxy-config containers: - name: ohdsi-webapi GEN3_OHDSI-WEBAPI_IMAGE|-image: quay.io/cdis/ohdsi-webapi:latest-| @@ -58,8 +62,25 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 1 + cpu: '0.5' memory: 1Gi limits: - cpu: 1 + cpu: '0.5' memory: 1Gi + - name: ohdsi-webapi-reverse-proxy + image: nginx:1.23 + ports: + - containerPort: 80 + volumeMounts: + - name: ohdsi-webapi-reverse-proxy-config + readOnly: true + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + imagePullPolicy: Always + resources: + requests: + cpu: '0.5' + memory: 1Gi + limits: + cpu: '0.5' + memory: 1Gi diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-reverse-proxy-config.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-reverse-proxy-config.yaml new file mode 100644 index 000000000..d31948e49 --- /dev/null +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-reverse-proxy-config.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ohdsi-webapi-reverse-proxy-config +data: + nginx.conf: | + user nginx; + worker_processes 1; + events { + worker_connections 10240; + } + http { + server { + listen 80; + server_name localhost; + resolver kube-dns.kube-system.svc.cluster.local ipv6=off; + location / { + set $proxy_service "ohdsi-webapi"; + # upstream is written to logs + set $upstream http://localhost:8080; + proxy_pass $upstream; + proxy_set_header Host atlas.$hostname; + proxy_redirect http://atlas.$hostname/ https://atlas.$hostname/; + client_max_body_size 0; + } + } + } diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml index 18ef9047d..d5711915f 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml @@ -9,5 +9,5 @@ spec: ports: - protocol: TCP port: 80 - targetPort: 8080 + targetPort: 80 name: http From 1d265f77ce385179f1d8f4ec42d143f100ba9fef Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Fri, 19 Aug 2022 11:58:19 -0500 Subject: [PATCH 085/106] feat: update superset deployment (#2022) --- kube/services/superset/superset-deploy.yaml | 90 ++++++++++++------- kube/services/superset/superset-redis.yaml | 12 +-- .../superset/superset-secrets-template.yaml | 1 - 3 files changed, 63 insertions(+), 40 deletions(-) diff --git a/kube/services/superset/superset-deploy.yaml b/kube/services/superset/superset-deploy.yaml index 222c9a660..72b9b88be 100644 --- a/kube/services/superset/superset-deploy.yaml +++ b/kube/services/superset/superset-deploy.yaml @@ -22,7 +22,7 @@ metadata: name: superset-config labels: app: superset - chart: superset-0.6.1 + chart: superset-0.7.1 release: "superset" heritage: "Helm" type: Opaque @@ -86,7 +86,7 @@ stringData: logging.info("Oauth2 provider: {0}.".format(provider)) if provider == 'fence': # As example, this line request a GET to base_url + '/' + userDetails with Bearer Authentication, - # and expects that authorization server checks the token, and response with user details + # and expects that authorization server checks the token, and response with user details me = self.appbuilder.sm.oauth_remotes[provider].get(env('FENCE_URL')).json() logging.info("user_data: {0}".format(me)) # logging.info("user_data as dir: {0}".format(dir(me))) @@ -97,13 +97,13 @@ stringData: from flask_appbuilder.security.manager import AUTH_OAUTH AUTH_TYPE = AUTH_OAUTH OAUTH_PROVIDERS = [ - { 'name':'fence', - 'token_key':'access_token', # Name of the token in the response of access_token_url - 'icon':'fa-address-card', # Icon for the provider + { 'name': 'fence', + 'token_key': 'access_token', # Name of the token in the response of access_token_url + 'icon': 'fa fa-openid', # Icon for the provider 'remote_app': { 'client_id': env('FENCE_CLIENT_ID'), # Client Id (Identify Superset application) 'client_secret': env('FENCE_CLIENT_SECRET'), # Secret for this Client Id (Identify Superset application) - 'client_kwargs':{ + 'client_kwargs': { 'scope': 'openid user data' }, 'server_metadata_url': env('FENCE_METADATA_URL') @@ -119,7 +119,9 @@ stringData: AUTH_USER_REGISTRATION = True # The default user self registration role - AUTH_USER_REGISTRATION_ROLE = "Public" + AUTH_USER_REGISTRATION_ROLE = 'Public' + + AUTH_USER_REGISTRATION_ROLE_JMESPATH = "contains(['@uchicago.edu'], email) && 'Admin' || 'Public'" superset_init.sh: | @@ -130,14 +132,6 @@ stringData: echo "Initializing roles..." superset init - echo "Creating admin user..." - superset fab create-admin \ - --username admin \ - --firstname Superset \ - --lastname Admin \ - --email admin@superset.com \ - --password admin \ - || true if [ -f "/app/configs/import_datasources.yaml" ]; then echo "Importing database connections.... " @@ -177,7 +171,7 @@ metadata: name: superset labels: app: superset - chart: superset-0.6.1 + chart: superset-0.7.1 release: superset heritage: Helm spec: @@ -214,7 +208,7 @@ metadata: name: superset-worker labels: app: superset-worker - chart: superset-0.6.1 + chart: superset-0.7.1 release: superset heritage: Helm spec: @@ -226,18 +220,20 @@ spec: template: metadata: annotations: - checksum/superset_config.py: 44092ea9a5a0d886f392d2804bb2af8fb356a891351005bee3b868b114036f01 - checksum/connections: a91716d6d1088e870fbe02159dc0b066dd011885aa08a22fbe60ea1cd4720f82 + checksum/superset_config.py: 955c3e88940f522fe4d9ad60d105ab4537e290697d135703c8a01aeb6c1a3d8d + checksum/connections: c44da43c5f3426c3c4a25f3235e3e23452ce1cf713ad059eaef7767e175a5eb4 checksum/extraConfigs: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/extraSecrets: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/extraSecretEnv: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a - checksum/configOverrides: fdad02b1a387b3699858d0e2c9bd002705d72ccc3df97e969f1b7ff910b7b352 + checksum/configOverrides: 4b1ff4f862a95242ea509b5dc5f7d87c47faf1815de5ea21a46b3fde8e576bf4 checksum/configOverridesFiles: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a labels: app: superset-worker release: superset netnolimit: "yes" + dbsuperset: "yes" + public: "yes" spec: securityContext: runAsUser: 0 @@ -249,12 +245,12 @@ spec: envFrom: - secretRef: name: 'superset-env' - image: 'busybox:latest' + image: 'busybox:1.35' imagePullPolicy: 'IfNotPresent' name: wait-for-postgres containers: - name: superset - image: "apache/superset:latest" + image: "apache/superset:2.0.0" imagePullPolicy: Always command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker"] env: @@ -268,7 +264,9 @@ spec: mountPath: "/app/pythonpath" readOnly: true resources: - {} + limits: + cpu: 1 + memory: 1Gi volumes: - name: superset-config secret: @@ -297,7 +295,7 @@ metadata: name: superset labels: app: superset - chart: superset-0.6.1 + chart: superset-0.7.1 release: superset heritage: Helm spec: @@ -310,19 +308,21 @@ spec: metadata: annotations: # Force reload on config changes - checksum/superset_config.py: 44092ea9a5a0d886f392d2804bb2af8fb356a891351005bee3b868b114036f01 - checksum/superset_init.sh: e6b1e8eac1f7a79a07a6c72a0e2ee6e09654eeb439c6bbe61bfd676917c41e02 + checksum/superset_config.py: 955c3e88940f522fe4d9ad60d105ab4537e290697d135703c8a01aeb6c1a3d8d + checksum/superset_init.sh: ff251d03d362c4a3ff1451d24893d5d12811f67edc84efa39484a84c59c3f883 checksum/superset_bootstrap.sh: a6edf034118d68cef7203cc3181bb6c72b6244cdedf270ee4accc9ae9ff92b2e - checksum/connections: a91716d6d1088e870fbe02159dc0b066dd011885aa08a22fbe60ea1cd4720f82 + checksum/connections: c44da43c5f3426c3c4a25f3235e3e23452ce1cf713ad059eaef7767e175a5eb4 checksum/extraConfigs: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/extraSecrets: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/extraSecretEnv: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a - checksum/configOverrides: fdad02b1a387b3699858d0e2c9bd002705d72ccc3df97e969f1b7ff910b7b352 + checksum/configOverrides: 4b1ff4f862a95242ea509b5dc5f7d87c47faf1815de5ea21a46b3fde8e576bf4 checksum/configOverridesFiles: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a labels: app: superset release: superset netnolimit: "yes" + dbsuperset: "yes" + public: "yes" spec: securityContext: runAsUser: 0 @@ -334,12 +334,12 @@ spec: envFrom: - secretRef: name: 'superset-env' - image: 'busybox:latest' + image: 'busybox:1.35' imagePullPolicy: 'IfNotPresent' name: wait-for-postgres containers: - name: superset - image: "apache/superset:latest" + image: "apache/superset:2.0.0" imagePullPolicy: Always command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; /usr/bin/run-server.sh"] env: @@ -356,8 +356,28 @@ spec: - name: http containerPort: 8088 protocol: TCP + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 resources: - {} + limits: + cpu: '0.25' + memory: 256Mi volumes: - name: superset-config @@ -405,12 +425,12 @@ spec: envFrom: - secretRef: name: 'superset-env' - image: 'busybox:latest' + image: 'busybox:1.35' imagePullPolicy: 'IfNotPresent' name: wait-for-postgres containers: - name: superset-init-db - image: "apache/superset:latest" + image: "apache/superset:2.0.0" envFrom: - secretRef: name: superset-env @@ -421,7 +441,9 @@ spec: readOnly: true command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; . /app/pythonpath/superset_init.sh"] resources: - {} + limits: + cpu: '0.25' + memory: 256Mi volumes: - name: superset-config secret: diff --git a/kube/services/superset/superset-redis.yaml b/kube/services/superset/superset-redis.yaml index c1c77f391..875e3030b 100644 --- a/kube/services/superset/superset-redis.yaml +++ b/kube/services/superset/superset-redis.yaml @@ -1,3 +1,4 @@ +--- # Source: superset/charts/redis/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount @@ -185,6 +186,8 @@ metadata: helm.sh/chart: redis-16.3.1 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm + annotations: + {} spec: type: ClusterIP clusterIP: None @@ -216,6 +219,7 @@ spec: - name: tcp-redis port: 6379 targetPort: redis + nodePort: null selector: app.kubernetes.io/name: redis app.kubernetes.io/instance: superset @@ -256,9 +260,9 @@ spec: app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master annotations: - checksum/configmap: b765300878bf502421423c6a14a4c4ea2fa093089a9649b698e6301f5c5815b2 - checksum/health: f25ecd3c820553a892da51cdc4ae25d85969f58033bdf527cd9a2bb05fcbcb83 - checksum/scripts: 6a772f276bc7b11f7beaf7add93416a2438de881b4085cf2b9c8e09453cf37e5 + checksum/configmap: 5fb78a3f5ce9ca1af5b7223f9cebe42f832ebc64f37e09a2fc8c8b29bb7101b0 + checksum/health: 2ea27c28e44af78b1d3dc1373aa2ac24ba2b215f788de4a0f0c9e02cbb79c533 + checksum/scripts: c351ebe638f6967b5bc76c2f38c28e2f7f65bc93846a1cd7786e2cbff9d51620 checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 spec: @@ -276,8 +280,6 @@ spec: app.kubernetes.io/name: redis app.kubernetes.io/instance: superset app.kubernetes.io/component: master - namespaces: - - "qa-mickey" topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: diff --git a/kube/services/superset/superset-secrets-template.yaml b/kube/services/superset/superset-secrets-template.yaml index 5d300d6a6..8a3c7a2a6 100644 --- a/kube/services/superset/superset-secrets-template.yaml +++ b/kube/services/superset/superset-secrets-template.yaml @@ -39,4 +39,3 @@ stringData: FENCE_CLIENT_SECRET: $FENCE_CLIENT_SECRET FENCE_METADATA_URL: $FENCE_METADATA_URL SECRET_KEY: $SECRET_KEY ---- From 32257ce69e2c5c53d4873e72c330977d65fef408 Mon Sep 17 00:00:00 2001 From: Spencer Myles Axelrod Date: Fri, 19 Aug 2022 16:26:07 -0700 Subject: [PATCH 086/106] use 'gen3-qa-controller:fix_gen3qa_get_check' img (#2023) --- kube/services/jobs/gen3qa-check-bucket-access-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml index 547689635..c95516ca9 100644 --- a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml +++ b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: gen3qa-check-bucket-access - GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:0.5-| + GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:fix_gen3qa_get_check-| workingDir: /var/sdet_home imagePullPolicy: Always env: From 1929409481b70deaa7033fd9f8df3a2fdcf31ba4 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 22 Aug 2022 13:29:43 -0600 Subject: [PATCH 087/106] GPE-512 (#2018) * adding a bash script and read me to update kubeconfig. * Rely on AWS cli to regnerate valid kubeconfig --- doc/update-kubeconfig.md | 24 ++++++++++++++++++++++++ gen3/bin/kube-setup-workvm.sh | 6 +++--- gen3/bin/update-kubeconfig.sh | 30 ++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 3 deletions(-) create mode 100644 doc/update-kubeconfig.md create mode 100644 gen3/bin/update-kubeconfig.sh diff --git a/doc/update-kubeconfig.md b/doc/update-kubeconfig.md new file mode 100644 index 000000000..0aec8f7ac --- /dev/null +++ b/doc/update-kubeconfig.md @@ -0,0 +1,24 @@ +# TL;DR + +kubectl 1.24.0 introduces a breaking change, so the older kubeconfig doesn't work anymore. + +https://github.com/aws/aws-cli/issues/6920 + +Updates Kubeconfig API version, args, and command to get rid of the following error: +error: exec plugin: invalid apiVersion "client.authentication.k8s.io/v1alpha1" + +This error occurs when the client kubectl version is updated and the kubeconfig remains the same. + +This requires AWS cli v2.7.0 or higher. + +## Use + +### Run +``` +gen3 update-kubeconfig +``` + + +This command backs up existing kubeconfig file and regenerates a valid kubeconfig file using AWS cli. Also persists the current namespace to the context. + + diff --git a/gen3/bin/kube-setup-workvm.sh b/gen3/bin/kube-setup-workvm.sh index 206e00cf0..4b47be0fa 100644 --- a/gen3/bin/kube-setup-workvm.sh +++ b/gen3/bin/kube-setup-workvm.sh @@ -40,14 +40,14 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then if aws --version; then version="$(aws --version | awk '{ print $1 }' | awk -F / '{ print $2 }')" fi - if semver_ge "$version" "2.1.15"; then + if semver_ge "$version" "2.7.0"; then gen3_log_info "awscli up to date" return 0 fi # update to latest version ( # subshell export DEBIAN_FRONTEND=noninteractive - if [[ -f /usr/local/bin/aws ]] && ! semver_ge "$version" "2.0.0"; then + if [[ -f /usr/local/bin/aws ]] && ! semver_ge "$version" "2.7.0"; then sudo rm /usr/local/bin/aws fi cd $HOME @@ -56,7 +56,7 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then cd $temp_dir curl -o awscli.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip unzip awscli.zip - if semver_ge "$version" "2.0.0"; then + if semver_ge "$version" "2.7.0"; then yes | sudo ./aws/install --update else yes | sudo ./aws/install diff --git a/gen3/bin/update-kubeconfig.sh b/gen3/bin/update-kubeconfig.sh new file mode 100644 index 000000000..a7cdb7bae --- /dev/null +++ b/gen3/bin/update-kubeconfig.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +cd "${HOME}/Gen3Secrets/" + +aws_version="0.0.0" +if aws --version 2>&1 > /dev/null; then + aws_version="$(aws --version | awk '{ print $1 }' | awk -F / '{ print $2 }')" +fi +if ! semver_ge "$aws_version" "2.7.0"; then + gen3_log_err "awscli is on version $aws_version. Please update to latest version before running this command again. \nHint: 'gen3 kube-setup-workvm' can take care of that for you." + exit 0 +fi + +namespace=$(gen3 api namespace) + +if [ ! -z "$KUBECONFIG" ]; then + gen3_log_info "Backing up existing kubeconfig located at $KUBECONFIG" + mv "$KUBECONFIG" "$KUBECONFIG.backup" +else + gen3_log_warn "KUBECONFIG env var is not set. Cannot take backup of existing kubeconfig." +fi + +gen3_log_info "Updating kubeconfig by running 'aws eks update-kubeconfig --name $(gen3 api environment)'" +aws eks update-kubeconfig --name $(gen3 api environment) + +gen3_log_info "Setting namespace to $namespace. ('kubectl config set-context --current --namespace=$namespace')" +kubectl config set-context --current --namespace=$namespace \ No newline at end of file From 649023fb5b03688a2a73063b5697755cf54bd351 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Tue, 23 Aug 2022 07:31:29 -0700 Subject: [PATCH 088/106] Update thor-deploy.yaml (#2028) --- kube/services/thor/thor-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml index ac4649352..5e86e73c6 100644 --- a/kube/services/thor/thor-deploy.yaml +++ b/kube/services/thor/thor-deploy.yaml @@ -34,7 +34,7 @@ spec: automountServiceAccountToken: false containers: - name: thor - image: quay.io/cdis/thor:master + GEN3_THOR_IMAGE env: - name: RUNNING_IN_QAPLANETV1 value: "true" From 6b95f1128c33071780dca0d2c5c18571da9dcc1a Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 26 Aug 2022 14:32:09 -0700 Subject: [PATCH 089/106] add env var to thor deploy (#2031) --- kube/services/thor/thor-deploy.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml index 5e86e73c6..00e57076c 100644 --- a/kube/services/thor/thor-deploy.yaml +++ b/kube/services/thor/thor-deploy.yaml @@ -65,6 +65,11 @@ spec: secretKeyRef: name: thor-g3auto key: "slacktoken.json" + - name: SLACK_WEBHOOK + valueFrom: + secretKeyRef: + name: thor-g3auto + key: "slack_webhook.json" imagePullPolicy: Always volumeMounts: - name: config-volume-g3auto From eeb7fd85928ab17682117cc2eab3706e88498206 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 1 Sep 2022 11:39:56 -0500 Subject: [PATCH 090/106] =?UTF-8?q?fix(tf-tests):=20Updated=20image=20sear?= =?UTF-8?q?ch=20critera=20to=20match=20submodule=20and=20fi=E2=80=A6=20(#2?= =?UTF-8?q?034)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(tf-tests): Updated image search critera to match submodule and fix tests * fix(tf-tests): Updated image search critera to match submodule and fix tests Co-authored-by: Edward Malinowski --- kube/services/jobs/cogwheel-register-client-job.yaml | 2 +- tf_files/aws/utility_vm/variables.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/jobs/cogwheel-register-client-job.yaml b/kube/services/jobs/cogwheel-register-client-job.yaml index 3458ef163..03461619b 100644 --- a/kube/services/jobs/cogwheel-register-client-job.yaml +++ b/kube/services/jobs/cogwheel-register-client-job.yaml @@ -23,7 +23,7 @@ spec: secretName: cogwheel-g3auto containers: - name: cogwheel - GEN3_COGWHEEL_IMAGE + GEN3_COGWHEEL_IMAGE|-image: quay.io/cdis/cogwheel:master-| imagePullPolicy: Always env: - name: HOSTNAME diff --git a/tf_files/aws/utility_vm/variables.tf b/tf_files/aws/utility_vm/variables.tf index 605b7de85..cbd9580cf 100644 --- a/tf_files/aws/utility_vm/variables.tf +++ b/tf_files/aws/utility_vm/variables.tf @@ -38,7 +38,7 @@ variable "instance_type" { } variable "image_name_search_criteria" { - default = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-2018*" + default = "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*" } variable "extra_vars" { From 20ae6b054374a19cff5ed12bb11b7c3be312e16c Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Thu, 1 Sep 2022 14:33:15 -0500 Subject: [PATCH 091/106] Update update-kubeconfig.sh (#2036) --- gen3/bin/update-kubeconfig.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/gen3/bin/update-kubeconfig.sh b/gen3/bin/update-kubeconfig.sh index a7cdb7bae..0da60bcb7 100644 --- a/gen3/bin/update-kubeconfig.sh +++ b/gen3/bin/update-kubeconfig.sh @@ -17,14 +17,16 @@ fi namespace=$(gen3 api namespace) if [ ! -z "$KUBECONFIG" ]; then - gen3_log_info "Backing up existing kubeconfig located at $KUBECONFIG" - mv "$KUBECONFIG" "$KUBECONFIG.backup" + if [ -f "$FILE" ]; then + gen3_log_info "Backing up existing kubeconfig located at $KUBECONFIG" + mv "$KUBECONFIG" "$KUBECONFIG.backup" + fi else gen3_log_warn "KUBECONFIG env var is not set. Cannot take backup of existing kubeconfig." fi -gen3_log_info "Updating kubeconfig by running 'aws eks update-kubeconfig --name $(gen3 api environment)'" -aws eks update-kubeconfig --name $(gen3 api environment) +gen3_log_info "Updating kubeconfig by running 'aws eks update-kubeconfig --name $vpc_name'" +aws eks update-kubeconfig --name $vpc_name gen3_log_info "Setting namespace to $namespace. ('kubectl config set-context --current --namespace=$namespace')" -kubectl config set-context --current --namespace=$namespace \ No newline at end of file +kubectl config set-context --current --namespace=$namespace From ba4026b4404f6970d525d42b73b911f7c8ceec77 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 1 Sep 2022 16:28:08 -0500 Subject: [PATCH 092/106] fix(kube-dns-autoscaler): Updated kube-dns-autoscaler to work with newer k8 version (#2035) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-system-services.sh | 4 ++++ .../kube-dns-autoscaler/dns-horizontal-autoscaler.yaml | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh index 4321c6cf3..7a75a33f8 100644 --- a/gen3/bin/kube-setup-system-services.sh +++ b/gen3/bin/kube-setup-system-services.sh @@ -18,6 +18,7 @@ gen3_load "gen3/gen3setup" kubeproxy=${kubeproxy:-1.16.13} coredns=${coredns:-1.6.6} +kubednsautoscaler=${kubednsautoscaler:-1.8.6} cni=${cni:-1.11.0} calico=${calico:-1.7.8} @@ -32,11 +33,13 @@ done kube_proxy_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v${kubeproxy}-eksbuild.1" coredns_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/coredns:v${coredns}" +kubednsautoscaler_image="k8s.gcr.io/cpa/cluster-proportional-autoscaler:${kubednsautoscaler}" cni_image="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${cni}/config/master/aws-k8s-cni.yaml" calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico}/config/master/calico.yaml" g3kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=${kube_proxy_image} g3kubectl set image --namespace kube-system deployment.apps/coredns coredns=${coredns_image} +g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f - g3kubectl apply -f ${cni_image} g3kubectl apply -f ${calico_yaml} @@ -68,3 +71,4 @@ g3kubectl apply -f ${calico_yaml} envsubst < $GEN3_HOME/kube/services/kube-proxy/kube-proxy-daemonset.yaml > $tempFile g3kubectl apply -f $tempFile ) + diff --git a/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml b/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml index 1235c1d2b..c0d4ba8ff 100644 --- a/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml +++ b/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml @@ -33,7 +33,7 @@ rules: - apiGroups: [""] resources: ["replicationcontrollers/scale"] verbs: ["get", "update"] - - apiGroups: ["extensions"] + - apiGroups: ["extensions","apps"] resources: ["deployments/scale", "replicasets/scale"] verbs: ["get", "update"] # Remove the configmaps rule once below issue is fixed: @@ -82,7 +82,7 @@ spec: priorityClassName: system-cluster-critical containers: - name: autoscaler - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.2.0 + image: IMAGE|-k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.6-| resources: requests: cpu: "20m" From b61f3a701c247beaecf6e737a92d73f12e07a681 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 2 Sep 2022 06:16:35 -0500 Subject: [PATCH 093/106] chore(awshelper-psql13): Updated awshelper image to run psql13 client (#2024) * chore(awshelper-psql13): Updated awshelper image to run psql13 client * chore(awshelper-psql13): Updated awshelper image to run psql13 client * chore(awshelper-psql13): Updated awshelper image to run psql13 client Co-authored-by: Edward Malinowski Co-authored-by: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> --- Docker/awshelper/Dockerfile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile index 4b7437ed6..961dd8cd4 100644 --- a/Docker/awshelper/Dockerfile +++ b/Docker/awshelper/Dockerfile @@ -26,7 +26,6 @@ RUN apt-get update && apt-get upgrade -y \ net-tools \ openssh-client \ openssh-server \ - postgresql-client \ python3 \ python3-dev \ python3-pip \ @@ -70,6 +69,12 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /var/log/* +# Install postgres 13 client +RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \ + echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \ + apt-get update && \ + apt-get install -y postgresql-client-13 + RUN useradd -m -s /bin/bash ubuntu && \ ( echo "ubuntu:gen3" | chpasswd ) From 15824f7e342aef93d28f8d93ffed985331c510ed Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 2 Sep 2022 06:17:06 -0500 Subject: [PATCH 094/106] fix(squid-healthcheck): Fixed typo to allow for healtcheck script to properly run (#2032) Co-authored-by: Edward Malinowski --- flavors/squid_auto/squid_running_on_docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index fda884743..05607f304 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -176,7 +176,7 @@ EOF # Copy the updatewhitelist.sh script to the home directory cp ${SUB_FOLDER}/flavors/squid_auto/updatewhitelist-docker.sh ${HOME_FOLDER}/updatewhitelist.sh chmod +x ${HOME_FOLDER}/updatewhitelist.sh - cp ${SUB_FOLDER}/flavors/squid_auto/healthcheck.sh ${HOME_FOLDER}/healtcheck.sh + cp ${SUB_FOLDER}/flavors/squid_auto/healthcheck.sh ${HOME_FOLDER}/healthcheck.sh chmod +x ${HOME_FOLDER}/healthcheck.sh crontab -l > crontab_file; echo "*/15 * * * * ${HOME_FOLDER}/updatewhitelist.sh >/dev/null 2>&1" >> crontab_file From 3f856e52cc88315b73426dafb2df70218ee05418 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 6 Sep 2022 10:55:27 -0500 Subject: [PATCH 095/106] feat(opencost-setup): Added opencost config (#2014) * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config * feat(opencost-setup): Added opencost config Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-kubecost.sh | 32 +- gen3/bin/kube-setup-prometheus.sh | 37 +- .../kube-proxy/kube-proxy-daemonset.yaml | 5 + .../kubecost-master/kubecost-alb.yaml | 2 +- .../kubecost-standalone/kubecost-alb.yaml | 2 +- .../kubecost-cost-analyzer-service.yaml | 20 + .../kubecost-standalone/object-store.yaml | 5 +- .../kubecost-standalone/thanos-deploy.yaml | 221 ++ kube/services/kubecost-standalone/values.yaml | 20 +- kube/services/monitoring/thanos-deploy.yaml | 220 ++ .../thanos.yaml} | 9 +- kube/services/monitoring/values.yaml | 3356 +++++++++++++++++ .../gen3.nginx.conf/kubecost-service.conf | 33 + 13 files changed, 3924 insertions(+), 38 deletions(-) create mode 100644 kube/services/kubecost-standalone/kubecost-cost-analyzer-service.yaml create mode 100644 kube/services/kubecost-standalone/thanos-deploy.yaml create mode 100644 kube/services/monitoring/thanos-deploy.yaml rename kube/services/{kubecost-slave/object-store.yaml => monitoring/thanos.yaml} (71%) create mode 100644 kube/services/monitoring/values.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf diff --git a/gen3/bin/kube-setup-kubecost.sh b/gen3/bin/kube-setup-kubecost.sh index bcdf8854d..1514e8b21 100644 --- a/gen3/bin/kube-setup-kubecost.sh +++ b/gen3/bin/kube-setup-kubecost.sh @@ -38,11 +38,11 @@ gen3_setup_kubecost_service_account() { aws iam attach-role-policy --role-name "$roleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2 #gen3 awsrole sa-annotate "$saName" "$roleName" "kubecost" kubectl delete sa -n kubecost $saName - thanosRoleName="$vpc_name-thanos-user" - thanosSaName="thanos-service-account" - gen3 awsrole create "$thanosRoleName" "$thanosSaName" "kubecost" || return 1 - aws iam attach-role-policy --role-name "$thanosRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2 - gen3 awsrole sa-annotate "$thanosSaName" "$thanosRoleName" "kubecost" + #thanosRoleName="$vpc_name-thanos-user" + #thanosSaName="thanos-service-account" + #gen3 awsrole create "$thanosRoleName" "$thanosSaName" "kubecost" || return 1 + #aws iam attach-role-policy --role-name "$thanosRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2 + #gen3 awsrole sa-annotate "$thanosSaName" "$thanosRoleName" "kubecost" } gen3_delete_kubecost_service_account() { @@ -95,23 +95,27 @@ gen3_setup_kubecost() { else valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" valuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/values.yaml" - thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml" - thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/object-store.yaml" + #thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml" + #thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/object-store.yaml" g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile gen3_kubecost_create_alb fi - kubectl delete secret -n kubecost kubecost-thanos || true - kubectl delete secret -n kubecost thanos || true - g3k_kv_filter $thanosValuesTemplate AWS_REGION $awsRegion KUBECOST_S3_BUCKET $s3Bucket > $thanosValuesFile - kubectl create secret generic kubecost-thanos -n kubecost --from-file=$thanosValuesFile - kubectl create secret generic thanos -n kubecost --from-file=$thanosValuesFile + #kubectl delete secret -n kubecost kubecost-thanos || true + #kubectl delete secret -n kubecost thanos || true + #g3k_kv_filter $thanosValuesTemplate AWS_REGION $awsRegion KUBECOST_S3_BUCKET $s3Bucket > $thanosValuesFile + #kubectl create secret generic kubecost-thanos -n kubecost --from-file=$thanosValuesFile + #kubectl create secret generic thanos -n kubecost --from-file=$thanosValuesFile # Need to setup thanos config + gen3 kube-setup-certs + gen3 kube-setup-prometheus + g3kubectl delete secret -n kubecost cert-kubecost-cost-analyzer || true + g3kubectl create secret generic "cert-kubecost-cost-analyzer" "--from-file=tls.crt=$(gen3_secrets_folder)/credentials/kubecost-cost-analyzer-service.crt" "--from-file=tls.key=$(gen3_secrets_folder)/credentials/kubecost-cost-analyzer-service.key" -n kubecost || true helm repo add kubecost https://kubecost.github.io/cost-analyzer/ --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) if [[ -z $disablePrometheus ]]; then - helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} -f https://raw.githubusercontent.com/kubecost/cost-analyzer-helm-chart/develop/cost-analyzer/values-thanos.yaml + helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} else - helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} -f https://raw.githubusercontent.com/kubecost/cost-analyzer-helm-chart/develop/cost-analyzer/values-thanos.yaml --set prometheus.fqdn=http://$prometheusService.$prometheusNamespace.svc --set prometheus.enabled=false + helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} fi else gen3_log_info "kube-setup-kubecost exiting - kubecost already deployed, use --force true to redeploy" diff --git a/gen3/bin/kube-setup-prometheus.sh b/gen3/bin/kube-setup-prometheus.sh index 00b4ee9b8..848c33389 100644 --- a/gen3/bin/kube-setup-prometheus.sh +++ b/gen3/bin/kube-setup-prometheus.sh @@ -17,7 +17,8 @@ function helm_repository() { if ! helm repo list > /dev/null 2>&1; then # helm3 has no default repo, need to add it manually - helm repo add stable https://charts.helm.sh/stable --force-update + #helm repo add stable https://charts.helm.sh/stable --force-update + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts helm repo update fi } @@ -55,21 +56,23 @@ function deploy_prometheus() # but we only have one prometheus. # helm_repository - if (! g3kubectl --namespace=prometheus get deployment prometheus-server > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then - if (! g3kubectl get namespace prometheus > /dev/null 2>&1); + if (! g3kubectl --namespace=monitoring get deployment prometheus-server > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then + if (! g3kubectl get namespace monitoring> /dev/null 2>&1); then - g3kubectl create namespace prometheus - g3kubectl label namespace prometheus app=prometheus + g3kubectl create namespace monitoring + g3kubectl label namespace namespace app=prometheus fi - if (g3kubectl --namespace=prometheus get deployment prometheus-server > /dev/null 2>&1); + if (g3kubectl --namespace=monitoring get deployment prometheus-server > /dev/null 2>&1); then - delete_prometheus + #delete_prometheus + echo "skipping delete" fi if ! g3kubectl get storageclass prometheus > /dev/null 2>&1; then g3kubectl apply -f "${GEN3_HOME}/kube/services/monitoring/prometheus-storageclass.yaml" fi - gen3 arun helm upgrade --install prometheus stable/prometheus --namespace prometheus -f "${GEN3_HOME}/kube/services/monitoring/prometheus-values.yaml" + deploy_thanos + gen3 arun helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --namespace monitoring -f "${GEN3_HOME}/kube/services/monitoring/values.yaml" else gen3_log_info "Prometheus is already installed, use --force to try redeploying" fi @@ -107,6 +110,24 @@ function deploy_grafana() fi } +function deploy_thanos() { + if [[ -z $vpc_name ]]; then + local vpc_name="$(gen3 api environment)" + fi + roleName="$vpc_name-thanos-role" + saName="thanos" + bucketName="$vpc_name-thanos-bucket" + gen3 s3 create "$bucketName" + gen3 awsrole create "$roleName" "$saName" "monitoring" || return 1 + gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name ${roleName} || true + thanosValuesFile="$XDG_RUNTIME_DIR/thanos.yaml" + thanosValuesTemplate="${GEN3_HOME}/kube/services/monitoring/thanos.yaml" + g3k_kv_filter $thanosValuesTemplate S3_BUCKET $bucketName > $thanosValuesFile + g3kubectl delete secret -n monitoring thanos-objstore-config || true + g3kubectl create secret generic -n monitoring thanos-objstore-config --from-file="$thanosValuesFile" + g3kubectl apply -f "${GEN3_HOME}/kube/services/monitoring/thanos-deploy.yaml" +} + command="" if [[ $# -gt 0 && ! "$1" =~ ^-*force ]]; then command="$1" diff --git a/kube/services/kube-proxy/kube-proxy-daemonset.yaml b/kube/services/kube-proxy/kube-proxy-daemonset.yaml index 3e32f0bc5..13672e72b 100644 --- a/kube/services/kube-proxy/kube-proxy-daemonset.yaml +++ b/kube/services/kube-proxy/kube-proxy-daemonset.yaml @@ -150,6 +150,11 @@ spec: containers: - name: kube-proxy image: ${kube_proxy_image} + ports: + - containerPort: 10249 + hostPort: 10249 + name: metrics + protocol: TCP resources: requests: cpu: 100m diff --git a/kube/services/kubecost-master/kubecost-alb.yaml b/kube/services/kubecost-master/kubecost-alb.yaml index 9a0fc4ef7..24fbe7edc 100644 --- a/kube/services/kubecost-master/kubecost-alb.yaml +++ b/kube/services/kubecost-master/kubecost-alb.yaml @@ -16,4 +16,4 @@ spec: service: name: kubecost-cost-analyzer port: - number: 9090 \ No newline at end of file + number: 443 \ No newline at end of file diff --git a/kube/services/kubecost-standalone/kubecost-alb.yaml b/kube/services/kubecost-standalone/kubecost-alb.yaml index 9a0fc4ef7..24fbe7edc 100644 --- a/kube/services/kubecost-standalone/kubecost-alb.yaml +++ b/kube/services/kubecost-standalone/kubecost-alb.yaml @@ -16,4 +16,4 @@ spec: service: name: kubecost-cost-analyzer port: - number: 9090 \ No newline at end of file + number: 443 \ No newline at end of file diff --git a/kube/services/kubecost-standalone/kubecost-cost-analyzer-service.yaml b/kube/services/kubecost-standalone/kubecost-cost-analyzer-service.yaml new file mode 100644 index 000000000..3f4db5944 --- /dev/null +++ b/kube/services/kubecost-standalone/kubecost-cost-analyzer-service.yaml @@ -0,0 +1,20 @@ +kind: Service +apiVersion: v1 +metadata: + name: kubecost-cost-analyzer-service +spec: + selector: + app: indexd + release: production + ports: + - protocol: TCP + port: 80 + targetPort: 80 + name: http + nodePort: null + - protocol: TCP + port: 443 + targetPort: 443 + name: https + nodePort: null + type: ClusterIP \ No newline at end of file diff --git a/kube/services/kubecost-standalone/object-store.yaml b/kube/services/kubecost-standalone/object-store.yaml index bcfadc752..bd38ac3e9 100644 --- a/kube/services/kubecost-standalone/object-store.yaml +++ b/kube/services/kubecost-standalone/object-store.yaml @@ -4,6 +4,7 @@ config: endpoint: "s3.amazonaws.com" region: AWS_REGION insecure: false + aws_sdk_auth: true signature_version2: false put_user_metadata: "X-Amz-Acl": "bucket-owner-full-control" @@ -13,4 +14,6 @@ config: insecure_skip_verify: false trace: enable: true - part_size: 134217728 \ No newline at end of file + part_size: 134217728 + sse_config: + type: "SSE-S3" \ No newline at end of file diff --git a/kube/services/kubecost-standalone/thanos-deploy.yaml b/kube/services/kubecost-standalone/thanos-deploy.yaml new file mode 100644 index 000000000..8c9493d60 --- /dev/null +++ b/kube/services/kubecost-standalone/thanos-deploy.yaml @@ -0,0 +1,221 @@ +--- +# querier-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: thanos-query + namespace: monitoring + labels: + app: thanos-query +spec: + replicas: 1 + selector: + matchLabels: + app: thanos-query + template: + metadata: + labels: + app: thanos-query + spec: + containers: + - name: thanos-query + image: quay.io/thanos/thanos:v0.23.0 + args: + - 'query' + - '--log.level=debug' + - '--query.replica-label=prometheus_replica' + - '--store=prometheus-kube-prometheus-thanos-discovery.monitoring.svc:10901' + resources: + requests: + cpu: '100m' + memory: '64Mi' + limits: + cpu: '250m' + memory: '256Mi' + ports: + - name: http + containerPort: 10902 + - name: grpc + containerPort: 10901 + - name: cluster + containerPort: 10900 + +--- +# querier-service-servicemonitor.yaml +apiVersion: v1 +kind: Service +metadata: + name: thanos-query + labels: + app: thanos-query + release: prometheus-operator + jobLabel: thanos + namespace: monitoring +spec: + selector: + app: thanos-query + ports: + - port: 9090 + protocol: TCP + targetPort: http + name: http-query +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: prom-thanos-query + namespace: monitoring +spec: + jobLabel: thanos + selector: + matchLabels: + app: thanos-query + namespaceSelector: + matchNames: + - 'monitoring' + endpoints: + - port: http-query + path: /metrics + interval: 5s + +--- +# store-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: thanos-store + namespace: monitoring + labels: + app: thanos-store +spec: + serviceName: 'thanos-store' + replicas: 1 + selector: + matchLabels: + app: thanos-store + template: + metadata: + labels: + app: thanos-store + spec: + containers: + - name: thanos-store + image: quay.io/thanos/thanos:v0.23.0 + args: + - 'store' + - '--log.level=debug' + - '--data-dir=/var/thanos/store' + - '--objstore.config-file=/config/thanos.yaml' + ports: + - name: http + containerPort: 10902 + - name: grpc + containerPort: 10901 + - name: cluster + containerPort: 10900 + volumeMounts: + - name: config + mountPath: /config/ + readOnly: true + - name: data + mountPath: /var/thanos/store + volumes: + - name: data + emptyDir: {} + - name: config + secret: + secretName: thanos-objstore-config +--- +# store-servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: thanos-store + namespace: monitoring + labels: + release: prom-op +spec: + jobLabel: thanos + endpoints: + - port: http + path: /metrics + interval: 30s + selector: + matchLabels: + app: thanos-store +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: thanos-compactor + namespace: monitoring + labels: + app: thanos-compactor +spec: + serviceName: 'thanos-compactor' + replicas: 1 + selector: + matchLabels: + app: thanos-compactor + template: + metadata: + labels: + app: thanos-compactor + spec: + containers: + - name: thanos-compactor + image: quay.io/thanos/thanos:v0.23.0 + args: + - 'compact' + - '--log.level=debug' + - '--data-dir=/var/thanos/store' + - '--objstore.config-file=/config/thanos.yaml' + - '--wait' + ports: + - name: http + containerPort: 10902 + volumeMounts: + - name: config + mountPath: /config/ + readOnly: true + - name: data + mountPath: /var/thanos/store + volumes: + - name: data + emptyDir: {} + - name: config + secret: + secretName: thanos-objstore-config +--- +# compactor-service-servicemonitor.yaml +apiVersion: v1 +kind: Service +metadata: + name: thanos-compactor + labels: + app: thanos-compactor + namespace: monitoring +spec: + selector: + app: thanos-compactor + ports: + - port: 10902 + name: http +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: thanos-compactor + namespace: monitoring + labels: + release: prom-op +spec: + jobLabel: thanos + endpoints: + - port: http + path: /metrics + interval: 30s + selector: + matchLabels: + app: thanos-compactor + diff --git a/kube/services/kubecost-standalone/values.yaml b/kube/services/kubecost-standalone/values.yaml index cd9117f85..6235dee4b 100644 --- a/kube/services/kubecost-standalone/values.yaml +++ b/kube/services/kubecost-standalone/values.yaml @@ -2,6 +2,11 @@ kubecostToken: KUBECOST_TOKEN +global: + prometheus: + enabled: false + fqdn: http://prometheus-operated.monitoring.svc:9090 + serviceAccount: create: true # Set this to false if you're bringing your own service account. annotations: @@ -19,17 +24,12 @@ kubecostProductConfigs: # awsSpotDataRegion: AWS_kubecostProductConfigs_awsSpotDataRegion # awsSpotDataBucket: AWS_kubecostProductConfigs_awsSpotDataBucket -prometheus: - serviceAccounts: - server: - create: false - name: "THANOS_SA" +kubecostFrontend: + tls: + enabled: true + secretName: "cert-kubecost-cost-analyzer" + networkCosts: enabled: true -thanos: - store: - serviceAccount: "THANOS_SA" - compact: - serviceAccount: "THANOS_SA" \ No newline at end of file diff --git a/kube/services/monitoring/thanos-deploy.yaml b/kube/services/monitoring/thanos-deploy.yaml new file mode 100644 index 000000000..74c98dc19 --- /dev/null +++ b/kube/services/monitoring/thanos-deploy.yaml @@ -0,0 +1,220 @@ +--- +# querier-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: thanos-query + namespace: monitoring + labels: + app: thanos-query +spec: + replicas: 1 + selector: + matchLabels: + app: thanos-query + template: + metadata: + labels: + app: thanos-query + spec: + containers: + - name: thanos-query + image: quay.io/thanos/thanos:v0.23.0 + args: + - 'query' + - '--log.level=debug' + - '--query.replica-label=prometheus_replica' + - '--store=prometheus-kube-prometheus-thanos-discovery.monitoring.svc:10901' + resources: + requests: + cpu: '100m' + memory: '64Mi' + limits: + cpu: '250m' + memory: '256Mi' + ports: + - name: http + containerPort: 10902 + - name: grpc + containerPort: 10901 + - name: cluster + containerPort: 10900 + +--- +# querier-service-servicemonitor.yaml +apiVersion: v1 +kind: Service +metadata: + name: thanos-query + labels: + app: thanos-query + release: prometheus-operator + jobLabel: thanos + namespace: monitoring +spec: + selector: + app: thanos-query + ports: + - port: 9090 + protocol: TCP + targetPort: http + name: http-query +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: prom-thanos-query + namespace: monitoring +spec: + jobLabel: thanos + selector: + matchLabels: + app: thanos-query + namespaceSelector: + matchNames: + - 'monitoring' + endpoints: + - port: http-query + path: /metrics + interval: 5s + +--- +# store-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: thanos-store + namespace: monitoring + labels: + app: thanos-store +spec: + serviceName: 'thanos-store' + replicas: 1 + selector: + matchLabels: + app: thanos-store + template: + metadata: + labels: + app: thanos-store + spec: + containers: + - name: thanos-store + image: quay.io/thanos/thanos:v0.23.0 + args: + - 'store' + - '--log.level=debug' + - '--data-dir=/var/thanos/store' + - '--objstore.config-file=/config/thanos.yaml' + ports: + - name: http + containerPort: 10902 + - name: grpc + containerPort: 10901 + - name: cluster + containerPort: 10900 + volumeMounts: + - name: config + mountPath: /config/ + readOnly: true + - name: data + mountPath: /var/thanos/store + volumes: + - name: data + emptyDir: {} + - name: config + secret: + secretName: thanos-objstore-config +--- +# store-servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: thanos-store + namespace: monitoring + labels: + release: prom-op +spec: + jobLabel: thanos + endpoints: + - port: http + path: /metrics + interval: 30s + selector: + matchLabels: + app: thanos-store +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: thanos-compactor + namespace: monitoring + labels: + app: thanos-compactor +spec: + serviceName: 'thanos-compactor' + replicas: 1 + selector: + matchLabels: + app: thanos-compactor + template: + metadata: + labels: + app: thanos-compactor + spec: + containers: + - name: thanos-compactor + image: quay.io/thanos/thanos:v0.23.0 + args: + - 'compact' + - '--log.level=debug' + - '--data-dir=/var/thanos/store' + - '--objstore.config-file=/config/thanos.yaml' + - '--wait' + ports: + - name: http + containerPort: 10902 + volumeMounts: + - name: config + mountPath: /config/ + readOnly: true + - name: data + mountPath: /var/thanos/store + volumes: + - name: data + emptyDir: {} + - name: config + secret: + secretName: thanos-objstore-config +--- +# compactor-service-servicemonitor.yaml +apiVersion: v1 +kind: Service +metadata: + name: thanos-compactor + labels: + app: thanos-compactor + namespace: monitoring +spec: + selector: + app: thanos-compactor + ports: + - port: 10902 + name: http +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: thanos-compactor + namespace: monitoring + labels: + release: prom-op +spec: + jobLabel: thanos + endpoints: + - port: http + path: /metrics + interval: 30s + selector: + matchLabels: + app: thanos-compactor \ No newline at end of file diff --git a/kube/services/kubecost-slave/object-store.yaml b/kube/services/monitoring/thanos.yaml similarity index 71% rename from kube/services/kubecost-slave/object-store.yaml rename to kube/services/monitoring/thanos.yaml index bcfadc752..e5106f22f 100644 --- a/kube/services/kubecost-slave/object-store.yaml +++ b/kube/services/monitoring/thanos.yaml @@ -1,9 +1,10 @@ type: S3 config: - bucket: KUBECOST_S3_BUCKET + bucket: S3_BUCKET endpoint: "s3.amazonaws.com" - region: AWS_REGION + region: us-east-1 insecure: false + aws_sdk_auth: true signature_version2: false put_user_metadata: "X-Amz-Acl": "bucket-owner-full-control" @@ -13,4 +14,6 @@ config: insecure_skip_verify: false trace: enable: true - part_size: 134217728 \ No newline at end of file + part_size: 134217728 + sse_config: + type: "SSE-S3" \ No newline at end of file diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml new file mode 100644 index 000000000..448df4cde --- /dev/null +++ b/kube/services/monitoring/values.yaml @@ -0,0 +1,3356 @@ +# Default values for kube-prometheus-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Provide a name in place of kube-prometheus-stack for `app:` labels +## +nameOverride: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 +## +kubeTargetVersionOverride: "" + +## Allow kubeVersion to be overridden while creating the ingress +## +kubeVersionOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +## Create default rules for monitoring the cluster +## +defaultRules: + create: true + rules: + alertmanager: true + etcd: true + configReloaders: true + general: true + k8s: true + kubeApiserverAvailability: true + kubeApiserverBurnrate: true + kubeApiserverHistogram: true + kubeApiserverSlos: true + kubelet: true + kubeProxy: true + kubePrometheusGeneral: true + kubePrometheusNodeRecording: true + kubernetesApps: true + kubernetesResources: true + kubernetesStorage: true + kubernetesSystem: true + kubeScheduler: true + kubeStateMetrics: true + network: true + node: true + nodeExporterAlerting: true + nodeExporterRecording: true + prometheus: true + prometheusOperator: true + + ## Reduce app namespace alert scope + appNamespacesTarget: ".*" + + ## Labels for default rules + labels: {} + ## Annotations for default rules + annotations: {} + + ## Additional labels for PrometheusRule alerts + additionalRuleLabels: {} + + ## Additional annotations for PrometheusRule alerts + additionalRuleAnnotations: {} + + ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules. + runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks" + + ## Disabled PrometheusRule alerts + disabled: {} + # KubeAPIDown: true + # NodeRAIDDegraded: true + +## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. +## +# additionalPrometheusRules: [] +# - name: my-rule-file +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## Provide custom recording or alerting rules to be deployed into the cluster. +## +additionalPrometheusRulesMap: {} +# rule-name: +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## +global: + rbac: + create: true + + ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles + createAggregateClusterRoles: false + pspEnabled: false + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + # or + # - "image-pull-secret" + +## Configuration for alertmanager +## ref: https://prometheus.io/docs/alerting/alertmanager/ +## +alertmanager: + + ## Deploy alertmanager + ## + enabled: true + + ## Annotations for Alertmanager + ## + annotations: {} + + ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 + ## + apiVersion: v2 + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + annotations: {} + + ## Configure pod disruption budgets for Alertmanager + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## This configuration is immutable once created and will require the PDB to be deleted to be changed + ## https://github.com/kubernetes/kubernetes/issues/45398 + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + ## Alertmanager configuration directives + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + config: + global: + resolve_timeout: 5m + inhibit_rules: + - source_matchers: + - 'severity = critical' + target_matchers: + - 'severity =~ warning|info' + equal: + - 'namespace' + - 'alertname' + - source_matchers: + - 'severity = warning' + target_matchers: + - 'severity = info' + equal: + - 'namespace' + - 'alertname' + - source_matchers: + - 'alertname = InfoInhibitor' + target_matchers: + - 'severity = info' + equal: + - 'namespace' + route: + group_by: ['namespace'] + group_wait: 30s + group_interval: 5m + repeat_interval: 12h + receiver: 'null' + routes: + - receiver: 'null' + matchers: + - alertname =~ "InfoInhibitor|Watchdog" + receivers: + - name: 'null' + templates: + - '/etc/alertmanager/config/*.tmpl' + + ## Pass the Alertmanager configuration directives through Helm's templating + ## engine. If the Alertmanager configuration contains Alertmanager templates, + ## they'll need to be properly escaped so that they are not interpreted by + ## Helm + ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function + ## https://prometheus.io/docs/alerting/configuration/#tmpl_string + ## https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + tplConfig: false + + ## Alertmanager template files to format alerts + ## By default, templateFiles are placed in /etc/alertmanager/config/ and if + ## they have a .tmpl file suffix will be loaded. See config.templates above + ## to change, add other suffixes. If adding other suffixes, be sure to update + ## config.templates above to include those suffixes. + ## ref: https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + ## + templateFiles: {} + # + ## An example template: + # template_1.tmpl: |- + # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} + # + # {{ define "slack.myorg.text" }} + # {{- $root := . -}} + # {{ range .Alerts }} + # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` + # *Cluster:* {{ template "cluster" $root }} + # *Description:* {{ .Annotations.description }} + # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> + # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> + # *Details:* + # {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}` + # {{ end }} + # {{ end }} + # {{ end }} + + ingress: + enabled: true + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + + labels: {} + + ## Redirect ingress to an additional defined port on the service + # servicePort: 8081 + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + #- prometheus.emalinowskiv1.planx-pla.net + # - alertmanager.domain.com + + ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Alertmanager Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: alertmanager-general-tls + # hosts: + # - alertmanager.example.com + + ## Configuration for Alertmanager secret + ## + secret: + annotations: {} + + ## Configuration for creating an Ingress that will map to each Alertmanager replica service + ## alertmanager.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for alertmanager per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "alertmanager" + + ## Configuration for Alertmanager service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port for Alertmanager Service to listen on + ## + port: 9093 + ## To be used with a proxy extraContainer port + ## + targetPort: 9093 + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30903 + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + + ## Additional ports to open for Alertmanager service + additionalPorts: [] + # additionalPorts: + # - name: authenticated + # port: 8081 + # targetPort: 8081 + + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Configuration for creating a separate Service for each statefulset Alertmanager replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Alertmanager Service per replica to listen on + ## + port: 9093 + + ## To be used with a proxy extraContainer port + targetPort: 9093 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30904 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## If true, create a serviceMonitor for alertmanager + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Settings affecting alertmanagerSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerspec + ## + alertmanagerSpec: + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. + ## + podMetadata: {} + + ## Image of Alertmanager + ## + image: + repository: quay.io/prometheus/alertmanager + tag: v0.24.0 + sha: "" + + ## If true then the user will be responsible to provide a secret with alertmanager configuration + ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used + ## + useExistingSecret: false + + ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the + ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + ## + secrets: [] + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. + ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + ## + configMaps: [] + + ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for + ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. + ## + # configSecret: + + ## WebTLSConfig defines the TLS parameters for HTTPS + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerwebspec + web: {} + + ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with. + ## + alertmanagerConfigSelector: {} + ## Example which selects all alertmanagerConfig resources + ## with label "alertconfig" with values any of "example-config" or "example-config-2" + # alertmanagerConfigSelector: + # matchExpressions: + # - key: alertconfig + # operator: In + # values: + # - example-config + # - example-config-2 + # + ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config" + # alertmanagerConfigSelector: + # matchLabels: + # role: example-config + + ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. + ## + alertmanagerConfigNamespaceSelector: {} + ## Example which selects all namespaces + ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2" + # alertmanagerConfigNamespaceSelector: + # matchExpressions: + # - key: alertmanagerconfig + # operator: In + # values: + # - example-namespace + # - example-namespace-2 + + ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled" + # alertmanagerConfigNamespaceSelector: + # matchLabels: + # alertmanagerconfig: enabled + + ## AlermanagerConfig to be used as top level configuration + ## + alertmanagerConfiguration: {} + ## Example with select a global alertmanagerconfig + # alertmanagerConfiguration: + # name: global-alertmanager-Configuration + + ## Define Log Format + # Use logfmt (default) or json logging + logFormat: logfmt + + ## Log level for Alertmanager to be configured with. + ## + logLevel: info + + ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the + ## running cluster equal to the expected size. + replicas: 1 + + ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression + ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). + ## + retention: 120h + + ## Storage is the definition of how storage will be used by the Alertmanager instances. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md + ## + storage: {} + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} + + + ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false + ## + externalUrl: + + ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, + ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. + ## + routePrefix: / + + ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. + ## + paused: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Define resources requests and limits for single Pods. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # requests: + # memory: 400Mi + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + ## + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the alertmanager instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## If specified, the pod's tolerations. + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: alertmanager + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + + ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. + ## Note this is only for the Alertmanager UI, not the gossip communication. + ## + listenLocal: false + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. + ## + containers: [] + # containers: + # - name: oauth-proxy + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.1.2 + # args: + # - --upstream=http://127.0.0.1:9093 + # - --http-address=0.0.0.0:8081 + # - ... + # ports: + # - containerPort: 8081 + # name: oauth-proxy + # protocol: TCP + # resources: {} + + # Additional volumes on the output StatefulSet definition. + volumes: [] + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + ## + additionalPeers: [] + + ## PortName to use for Alert Manager. + ## + portName: "http-web" + + ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918 + ## + clusterAdvertiseAddress: false + + ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. + ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + forceEnableClusterMode: false + + ## ExtraSecret can be used to store various data in an extra secret + ## (use it for example to store hashed basic auth credentials) + extraSecret: + ## if not set, name will be auto generated + # name: "" + annotations: {} + data: {} + # auth: | + # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 + # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. + +## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml +## +grafana: + enabled: true + namespaceOverride: "" + + ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled + ## + forceDeployDatasources: false + + ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled + ## + forceDeployDashboards: false + + ## Deploy default dashboards + ## + defaultDashboardsEnabled: true + + ## Timezone for the default dashboards + ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg + ## + defaultDashboardsTimezone: utc + + adminPassword: prom-operator #pragma: allowlist secret + + rbac: + ## If true, Grafana PSPs will be created + ## + pspEnabled: false + + ingress: + ## If true, Grafana Ingress will be created + ## + enabled: false + + ## IngressClassName for Grafana Ingress. + ## Should be provided if Ingress is enable. + ## + # ingressClassName: nginx + + ## Annotations for Grafana Ingress + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + ## Labels to be added to the Ingress + ## + labels: {} + + ## Hostnames. + ## Must be provided if Ingress is enable. + ## + # hosts: + # - grafana.domain.com + hosts: [] + + ## Path for grafana ingress + path: / + + ## TLS configuration for grafana Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: grafana-general-tls + # hosts: + # - grafana.example.com + + sidecar: + dashboards: + enabled: true + label: grafana_dashboard + labelValue: "1" + + ## Annotations for Grafana dashboard configmaps + ## + annotations: {} + multicluster: + global: + enabled: false + etcd: + enabled: false + provider: + allowUiUpdates: false + datasources: + enabled: true + defaultDatasourceEnabled: true + + uid: prometheus + + ## URL of prometheus datasource + ## + # url: http://prometheus-stack-prometheus:9090/ + + # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default + # defaultDatasourceScrapeInterval: 15s + + ## Annotations for Grafana datasource configmaps + ## + annotations: {} + + ## Create datasource for each Pod of Prometheus StatefulSet; + ## this uses headless service `prometheus-operated` which is + ## created by Prometheus Operator + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286 + createPrometheusReplicasDatasources: false + label: grafana_datasource + labelValue: "1" + + ## Field with internal link pointing to existing data source in Grafana. + ## Can be provisioned via additionalDataSources + exemplarTraceIdDestinations: {} + # datasourceUid: Jaeger + # traceIdLabelName: trace_id + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # configMap: certs-configmap + # readOnly: true + + deleteDatasources: [] + # - name: example-datasource + # orgId: 1 + + ## Configure additional grafana datasources (passed through tpl) + ## ref: http://docs.grafana.org/administration/provisioning/#datasources + additionalDataSources: [] + # - name: prometheus-sample + # access: proxy + # basicAuth: true + # basicAuthPassword: pass + # basicAuthUser: daco + # editable: false + # jsonData: + # tlsSkipVerify: true + # orgId: 1 + # type: prometheus + # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090 + # version: 1 + + ## Passed to grafana subchart and used by servicemonitor below + ## + service: + portName: http-web + + serviceMonitor: + # If true, a ServiceMonitor CRD is created for a prometheus operator + # https://github.com/coreos/prometheus-operator + # + enabled: true + + # Path to use for scraping metrics. Might be different if server.root_url is set + # in grafana.ini + path: "/metrics" + + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + + # labels for the ServiceMonitor + labels: {} + + # Scrape interval. If not set, the Prometheus default scrape interval is used. + # + interval: "" + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping the kube api server +## +kubeApiServer: + enabled: true + tlsConfig: + serverName: kubernetes + insecureSkipVerify: false + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + jobLabel: component + selector: + matchLabels: + component: apiserver + provider: kubernetes + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: + # Drop excessively noisy apiserver buckets. + - action: drop + regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50) + sourceLabels: + - __name__ + - le + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: + # - __meta_kubernetes_namespace + # - __meta_kubernetes_service_name + # - __meta_kubernetes_endpoint_port_name + # action: keep + # regex: default;kubernetes;https + # - targetLabel: __address__ + # replacement: kubernetes.default.svc:443 + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping the kubelet and kubelet-hosted cAdvisor +## +kubelet: + enabled: true + namespace: kube-system + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Enable scraping the kubelet over https. For requirements to enable this see + ## https://github.com/prometheus-operator/prometheus-operator/issues/926 + ## + https: true + + ## Enable scraping /metrics/cadvisor from kubelet's service + ## + cAdvisor: true + + ## Enable scraping /metrics/probes from kubelet's service + ## + probes: true + + ## Enable scraping /metrics/resource from kubelet's service + ## This is disabled by default because container metrics are already exposed by cAdvisor + ## + resource: false + # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource + resourcePath: "/metrics/resource/v1alpha1" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + cAdvisorMetricRelabelings: + # Drop less useful container CPU metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)' + # Drop less useful container / always zero filesystem metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)' + # Drop less useful / always zero container memory metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_memory_(mapped_file|swap)' + # Drop less useful container process metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_(file_descriptors|tasks_state|threads_max)' + # Drop container spec metrics that overlap with kube-state-metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_spec.*' + # Drop cgroup metrics with no pod. + - sourceLabels: [id, pod] + action: drop + regex: '.+;' + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + probesMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + ## metrics_path is required to match upstream rules and charts + cAdvisorRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + probesRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + resourceRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + ## metrics_path is required to match upstream rules and charts + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping the kube controller manager +## +kubeControllerManager: + enabled: true + + ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeControllerManager.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.22. + ## + port: null + targetPort: null + # selector: + # component: kube-controller-manager + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Enable scraping kube-controller-manager over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + # Skip TLS certificate validation when scraping + insecureSkipVerify: null + + # Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping coreDns. Use either this or kubeDns +## +coreDns: + enabled: true + service: + port: 9153 + targetPort: 9153 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kubeDns. Use either this or coreDns +## +kubeDns: + enabled: false + service: + dnsmasq: + port: 10054 + targetPort: 10054 + skydns: + port: 10055 + targetPort: 10055 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqMetricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqRelabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping etcd +## +kubeEtcd: + enabled: true + + ## If your etcd is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used + ## + service: + enabled: true + port: 2379 + targetPort: 2379 + # selector: + # component: etcd + + ## Configure secure access to the etcd cluster by loading a secret into prometheus and + ## specifying security configuration below. For example, with a secret named etcd-client-cert + ## + ## serviceMonitor: + ## scheme: https + ## insecureSkipVerify: false + ## serverName: localhost + ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client + ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + ## + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + scheme: http + insecureSkipVerify: false + serverName: "" + caFile: "" + certFile: "" + keyFile: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kube scheduler +## +kubeScheduler: + enabled: true + + ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeScheduler.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.23. + ## + port: null + targetPort: null + # selector: + # component: kube-scheduler + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## Enable scraping kube-scheduler over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + ## Skip TLS certificate validation when scraping + insecureSkipVerify: null + + ## Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kube proxy +## +kubeProxy: + enabled: true + + ## If your kube proxy is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + service: + enabled: true + port: 10249 + targetPort: 10249 + selector: + k8s-app: kube-proxy + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Enable scraping kube-proxy over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kube state metrics +## +kubeStateMetrics: + enabled: true + +## Configuration for kube-state-metrics subchart +## +kube-state-metrics: + namespaceOverride: "" + rbac: + create: true + releaseLabel: true + prometheus: + monitor: + enabled: true + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. + ## + scrapeTimeout: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + # Keep labels from scraped data, overriding server-side labels + ## + honorLabels: true + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + selfMonitor: + enabled: false + +## Deploy node exporter as a daemonset to all nodes +## +nodeExporter: + enabled: true + +## Configuration for prometheus-node-exporter subchart +## +prometheus-node-exporter: + namespaceOverride: "" + podLabels: + ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards + ## + jobLabel: node-exporter + extraArgs: + - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) + - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ + service: + portName: http-metrics + prometheus: + monitor: + enabled: true + + jobLabel: jobLabel + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. + ## + scrapeTimeout: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - sourceLabels: [__name__] + # separator: ; + # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + rbac: + ## If true, create PSPs for node-exporter + ## + pspEnabled: false + +## Manages Prometheus and Alertmanager components +## +prometheusOperator: + enabled: true + + ## Prometheus-Operator v0.39.0 and later support TLS natively. + ## + tls: + enabled: true + # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + tlsMinVersion: VersionTLS13 + # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. + internalPort: 10250 + + ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted + ## rules from making their way into prometheus and potentially preventing the container from starting + admissionWebhooks: + failurePolicy: Fail + enabled: true + ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate. + ## If unspecified, system trust roots on the apiserver are used. + caBundle: "" + ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. + ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own + ## certs ahead of time if you wish. + ## + patch: + enabled: true + image: + repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen + tag: v1.1.1 + sha: "" + pullPolicy: IfNotPresent + resources: {} + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + affinity: {} + tolerations: [] + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + duration: "" # default to be 5y + admissionCert: + duration: "" # default to be 1y + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + + ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). + ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration + ## + namespaces: {} + # releaseNamespace: true + # additional: + # - kube-system + + ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). + ## + denyNamespaces: [] + + ## Filter namespaces to look for prometheus-operator custom resources + ## + alertmanagerInstanceNamespaces: [] + prometheusInstanceNamespaces: [] + thanosRulerInstanceNamespaces: [] + + ## The clusterDomain value will be added to the cluster.peer option of the alertmanager. + ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value) + ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094 + ## + # clusterDomain: "cluster.local" + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + + ## Configuration for Prometheus operator service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30080 + + nodePortTls: 30443 + + ## Additional ports to open for Prometheus service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + ## + additionalPorts: [] + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + ## + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## NodePort, ClusterIP, LoadBalancer + ## + type: ClusterIP + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Annotations to add to the operator deployment + ## + annotations: {} + + ## Labels to add to the operator pod + ## + podLabels: {} + + ## Annotations to add to the operator pod + ## + podAnnotations: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + ## If true, the operator will create and maintain a service for scraping kubelets + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md + ## + kubeletService: + enabled: true + namespace: kube-system + ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default + name: "" + + ## Create a servicemonitor for the operator + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + selfMonitor: true + + ## Metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Resource limits & requests + ## + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Assign custom affinity rules to the prometheus operator + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + ## Container-specific security context configuration + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + containerSecurityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + + ## Prometheus-operator image + ## + image: + repository: quay.io/prometheus-operator/prometheus-operator + tag: v0.57.0 + sha: "" + pullPolicy: IfNotPresent + + ## Prometheus image to use for prometheuses managed by the operator + ## + # prometheusDefaultBaseImage: quay.io/prometheus/prometheus + + ## Alertmanager image to use for alertmanagers managed by the operator + ## + # alertmanagerDefaultBaseImage: quay.io/prometheus/alertmanager + + ## Prometheus-config-reloader + ## + prometheusConfigReloader: + # image to use for config and rule reloading + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.57.0 + sha: "" + + # resource config for prometheusConfigReloader + resources: + requests: + cpu: 200m + memory: 50Mi + limits: + cpu: 200m + memory: 50Mi + + ## Thanos side-car image when configured + ## + thanosImage: + repository: quay.io/thanos/thanos + tag: v0.25.2 + sha: "" + + ## Set a Field Selector to filter watched secrets + ## + secretFieldSelector: "" + +## Deploy a Prometheus instance +## +prometheus: + + enabled: true + + ## Annotations for Prometheus + ## + annotations: {} + + ## Service account for Prometheuses to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: false + name: "thanos" + annotations: {} + + # Service for thanos service discovery on sidecar + # Enable this can make Thanos Query can use + # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery + # Thanos sidecar on prometheus nodes + # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!) + thanosService: + enabled: true + annotations: {} + labels: {} + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## gRPC port config + portName: grpc + port: 10901 + targetPort: "grpc" + + ## HTTP port config (for metrics) + httpPortName: http + httpPort: 10902 + targetHttpPort: "http" + + ## ClusterIP to assign + # Default is to make this a headless service ("None") + clusterIP: "None" + + ## Port to expose on each node, if service type is NodePort + ## + nodePort: 30901 + httpNodePort: 30902 + + # ServiceMonitor to scrape Sidecar metrics + # Needs thanosService to be enabled as well + thanosServiceMonitor: + enabled: true + interval: "" + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## Metric relabel configs to apply to samples before ingestion. + metricRelabelings: [] + + ## relabel configs to apply to samples before ingestion. + relabelings: [] + + # Service for external access to sidecar + # Enabling this creates a service to expose thanos-sidecar outside the cluster. + thanosServiceExternal: + enabled: false + annotations: {} + labels: {} + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## gRPC port config + portName: grpc + port: 10901 + targetPort: "grpc" + + ## HTTP port config (for metrics) + httpPortName: http + httpPort: 10902 + targetHttpPort: "http" + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: LoadBalancer + + ## Port to expose on each node + ## + nodePort: 30901 + httpNodePort: 30902 + + ## Configuration for Prometheus service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port for Prometheus Service to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30090 + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Additional port to define in the Service + additionalPorts: [] + # additionalPorts: + # - name: authenticated + # port: 8081 + # targetPort: 8081 + + ## Consider that all endpoints are considered "ready" even if the Pods themselves are not + ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + publishNotReadyAddresses: false + + sessionAffinity: "" + + ## Configuration for creating a separate Service for each statefulset Prometheus replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Prometheus Service per replica to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30091 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Configure pod disruption budgets for Prometheus + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## This configuration is immutable once created and will require the PDB to be deleted to be changed + ## https://github.com/kubernetes/kubernetes/issues/45398 + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + # Ingress exposes thanos sidecar outside the cluster + thanosIngress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + servicePort: 10901 + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30901 + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - thanos-gateway.domain.com + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Thanos Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: thanos-gateway-tls + # hosts: + # - thanos-gateway.domain.com + # + + ## ExtraSecret can be used to store various data in an extra secret + ## (use it for example to store hashed basic auth credentials) + extraSecret: + ## if not set, name will be auto generated + # name: "" + annotations: {} + data: {} + # auth: | + # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 + # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. + + ingress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Redirect ingress to an additional defined port on the service + # servicePort: 8081 + + ## Hostnames. + ## Must be provided if Ingress is enabled. + ## + # hosts: + # - prometheus.domain.com + hosts: [] + + ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Prometheus Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-general-tls + # hosts: + # - prometheus.example.com + + ## Configuration for creating an Ingress that will map to each Prometheus replica service + ## prometheus.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for Prometheus per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "prometheus" + + ## Configure additional options for default pod security policy for Prometheus + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + podSecurityPolicy: + allowedCapabilities: [] + allowedHostPaths: [] + volumes: [] + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## Metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Settings affecting prometheusSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec + ## + prometheusSpec: + ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos + ## + disableCompaction: false + ## APIServerConfig + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig + ## + apiserverConfig: {} + + ## Interval between consecutive scrapes. + ## Defaults to 30s. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 + ## + scrapeInterval: "" + + ## Number of seconds to wait for target to respond before erroring + ## + scrapeTimeout: "" + + ## Interval between consecutive evaluations. + ## + evaluationInterval: "" + + ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. + ## + listenLocal: false + + ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. + ## This is disabled by default. + ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + ## + enableAdminAPI: false + + ## WebTLSConfig defines the TLS parameters for HTTPS + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig + web: {} + + # EnableFeatures API enables access to Prometheus disabled features. + # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/ + enableFeatures: [] + # - exemplar-storage + + ## Image of Prometheus. + ## + image: + repository: quay.io/prometheus/prometheus + tag: v2.36.1 + sha: "" + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: prometheus + + ## Alertmanagers to which alerts will be sent + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints + ## + ## Default configuration will connect to the alertmanager deployed as part of this release + ## + alertingEndpoints: [] + # - name: "" + # namespace: "" + # port: http + # scheme: http + # pathPrefix: "" + # tlsConfig: {} + # bearerTokenFile: "" + # apiVersion: v2 + + ## External labels to add to any time series or alerts when communicating with external systems + ## + externalLabels: {} + + ## enable --web.enable-remote-write-receiver flag on prometheus-server + ## + enableRemoteWriteReceiver: false + + ## Name of the external label used to denote replica name + ## + replicaExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote replica name + ## + replicaExternalLabelNameClear: false + + ## Name of the external label used to denote Prometheus instance name + ## + prometheusExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote Prometheus instance name + ## + prometheusExternalLabelNameClear: false + + ## External URL at which Prometheus will be reachable. + ## + externalUrl: "" + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not + ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated + ## with the new list of secrets. + ## + secrets: [] + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. + ## + configMaps: [] + + ## QuerySpec defines the query command line flags when starting Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec + ## + query: {} + + ## Namespaces to be selected for PrometheusRules discovery. + ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage + ## + ruleNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the PrometheusRule resources created + ## + ruleSelectorNilUsesHelmValues: true + + ## PrometheusRules to be selected for target discovery. + ## If {}, select all PrometheusRules + ## + ruleSelector: {} + ## Example which select all PrometheusRules resources + ## with label "prometheus" with values any of "example-rules" or "example-rules-2" + # ruleSelector: + # matchExpressions: + # - key: prometheus + # operator: In + # values: + # - example-rules + # - example-rules-2 + # + ## Example which select all PrometheusRules resources with label "role" set to "example-rules" + # ruleSelector: + # matchLabels: + # role: example-rules + + ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the servicemonitors created + ## + serviceMonitorSelectorNilUsesHelmValues: true + + ## ServiceMonitors to be selected for target discovery. + ## If {}, select all ServiceMonitors + ## + serviceMonitorSelector: {} + ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" + # serviceMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for ServiceMonitor discovery. + ## + serviceMonitorNamespaceSelector: {} + ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel" + # serviceMonitorNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the podmonitors created + ## + podMonitorSelectorNilUsesHelmValues: true + + ## PodMonitors to be selected for target discovery. + ## If {}, select all PodMonitors + ## + podMonitorSelector: {} + ## Example which selects PodMonitors with label "prometheus" set to "somelabel" + # podMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for PodMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage + ## + podMonitorNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the probes created + ## + probeSelectorNilUsesHelmValues: true + + ## Probes to be selected for target discovery. + ## If {}, select all Probes + ## + probeSelector: {} + ## Example which selects Probes with label "prometheus" set to "somelabel" + # probeSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for Probe discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage + ## + probeNamespaceSelector: {} + + ## How long to retain metrics + ## + retention: 2d + + ## Maximum size of metrics + ## + retentionSize: "" + + ## Enable compression of the write-ahead log using Snappy. + ## + walCompression: false + + ## If true, the Operator won't process any Prometheus configuration changes + ## + paused: false + + ## Number of replicas of each shard to deploy for a Prometheus deployment. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## + replicas: 1 + + ## EXPERIMENTAL: Number of shards to distribute targets onto. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. + ## Increasing shards will not reshard data either but it will continue to be available from the same instances. + ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. + ## Sharding is done on the content of the `__address__` target meta-label. + ## + shards: 1 + + ## Log level for Prometheus be configured in + ## + logLevel: info + + ## Log format for Prometheus be configured in + ## + logFormat: logfmt + + ## Prefix used to register routes, overriding externalUrl route. + ## Useful for proxies that rewrite URLs. + ## + routePrefix: / + + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the prometheus pods. + ## + podMetadata: {} + # labels: + # app: prometheus + # k8s-app: prometheus + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the prometheus instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## The remote_read spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec + remoteRead: [] + # - url: http://remote1/read + ## additionalRemoteRead is appended to remoteRead + additionalRemoteRead: [] + + ## The remote_write spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec + remoteWrite: [] + # - url: http://remote1/push + ## additionalRemoteWrite is appended to remoteWrite + additionalRemoteWrite: [] + + ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature + remoteWriteDashboards: false + + ## Resource limits & requests + ## + resources: {} + # requests: + # memory: 400Mi + + ## Prometheus StorageSpec for persistent data + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md + ## + storageSpec: + ## Using PersistentVolumeClaim + ## + volumeClaimTemplate: + spec: + storageClassName: standard + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 500Gi + #selector: {} + + ## Using tmpfs volume + ## + # emptyDir: + # medium: Memory + + # Additional volumes on the output StatefulSet definition. + volumes: [] + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations + ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form + ## as specified in the official Prometheus documentation: + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are + ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility + ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible + ## scrape configs are going to break Prometheus after the upgrade. + ## AdditionalScrapeConfigs can be defined as a list or as a templated string. + ## + ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the + ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes + ## + additionalScrapeConfigs: + - job_name: statsd_ambassador_gen3 + metrics_path: '/metrics' + static_configs: + - targets: + - statsd-exporter.default.svc.cluster.local:9102 + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - job_name: kubecost + honor_labels: true + scrape_interval: 1m + scrape_timeout: 10s + metrics_path: /metrics + scheme: http + static_configs: + - targets: + - kubecost-cost-analyzer.kubecost.svc.cluster.local:9003 + + # If scrape config contains a repetitive section, you may want to use a template. + # In the following example, you can see how to define `gce_sd_configs` for multiple zones + + + #additionalScrapeConfigs: [] + # - job_name: kube-etcd + # kubernetes_sd_configs: + # - role: node + # scheme: https + # tls_config: + # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client + # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + # relabel_configs: + # - action: labelmap + # regex: __meta_kubernetes_node_label_(.+) + # - source_labels: [__address__] + # action: replace + # targetLabel: __address__ + # regex: ([^:;]+):(\d+) + # replacement: ${1}:2379 + # - source_labels: [__meta_kubernetes_node_name] + # action: keep + # regex: .*mst.* + # - source_labels: [__meta_kubernetes_node_name] + # action: replace + # targetLabel: node + # regex: (.*) + # replacement: ${1} + # metric_relabel_configs: + # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + # action: labeldrop + # + ## If scrape config contains a repetitive section, you may want to use a template. + ## In the following example, you can see how to define `gce_sd_configs` for multiple zones + # additionalScrapeConfigs: | + # - job_name: "node-exporter" + # gce_sd_configs: + # {{range $zone := .Values.gcp_zones}} + # - project: "project1" + # zone: "{{$zone}}" + # port: 9100 + # {{end}} + # relabel_configs: + # ... + + + ## If additional scrape configurations are already deployed in a single secret file you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalScrapeConfigs + additionalScrapeConfigsSecret: {} + # enabled: false + # name: + # key: + + ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful + ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' + additionalPrometheusSecretsAnnotations: {} + + ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified + ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. + ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. + ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this + ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release + ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. + ## + additionalAlertManagerConfigs: [] + # - consul_sd_configs: + # - server: consul.dev.test:8500 + # scheme: http + # datacenter: dev + # tag_separator: ',' + # services: + # - metrics-prometheus-alertmanager + + ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage + ## them separately from the helm deployment, you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalAlertManagerConfigs + additionalAlertManagerConfigsSecret: {} + # name: + # key: + + ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended + ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the + ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the + ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel + ## configs are going to break Prometheus after the upgrade. + ## + additionalAlertRelabelConfigs: [] + # - separator: ; + # regex: prometheus_replica + # replacement: $1 + # action: labeldrop + + ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage + ## them separately from the helm deployment, you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalAlertRelabelConfigs + additionalAlertRelabelConfigsSecret: {} + # name: + # key: + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. + ## This section is experimental, it may change significantly without deprecation notice in any release. + ## This is experimental and may change significantly without backward compatibility in any release. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec + ## + thanos: + objectStorageConfig: + key: thanos.yaml + name: thanos-objstore-config + image: quay.io/thanos/thanos:v0.25.2 + #image: thanosio/thanos:v0.24.0 + version: v0.25.2 + tag: v0.25.2 + # secretProviderClass: + # provider: s3 + # parameters: + # secrets: | + # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest" + # fileName: "objstore.yaml" + # objectStorageConfigFile: /var/secrets/object-store.yaml + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. + ## if using proxy extraContainer update targetPort with proxy container port + containers: [] + # containers: + # - name: oauth-proxy + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.1.2 + # args: + # - --upstream=http://127.0.0.1:9093 + # - --http-address=0.0.0.0:8081 + # - ... + # ports: + # - containerPort: 8081 + # name: oauth-proxy + # protocol: TCP + # resources: {} + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## PortName to use for Prometheus. + ## + portName: "http-web" + + ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files + ## on the file system of the Prometheus container e.g. bearer token files. + arbitraryFSAccessThroughSMs: false + + ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor + ## or PodMonitor to true, this overrides honor_labels to false. + overrideHonorLabels: false + + ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. + overrideHonorTimestamps: false + + ## IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor + ## configs, and they will only discover endpoints within their current namespace. Defaults to false. + ignoreNamespaceSelectors: false + + ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. + ## The label value will always be the namespace of the object that is being created. + ## Disabled by default + enforcedNamespaceLabel: "" + + ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. + ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair + ## Deprecated, use `excludedFromEnforcement` instead + prometheusRulesExcludedFromEnforce: [] + + ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects + ## to be excluded from enforcing a namespace label of origin. + ## Works only if enforcedNamespaceLabel set to true. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference + excludedFromEnforcement: [] + + ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, + ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such + ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions + ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) + queryLogFile: false + + ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit + ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall + ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. + enforcedSampleLimit: false + + ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set + ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall + ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except + ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced. + enforcedTargetLimit: false + + + ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present + ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions + ## 2.27.0 and newer. + enforcedLabelLimit: false + + ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number + ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions + ## 2.27.0 and newer. + enforcedLabelNameLengthLimit: false + + ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this + ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus + ## versions 2.27.0 and newer. + enforcedLabelValueLengthLimit: false + + ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental + ## in Prometheus so it may change in any upcoming release. + allowOverlappingBlocks: false + + additionalRulesForClusterRole: [] + # - apiGroups: [ "" ] + # resources: + # - nodes/proxy + # verbs: [ "get", "list", "watch" ] + + additionalServiceMonitors: [] + ## Name of the ServiceMonitor to create + ## + # - name: "" + + ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from + ## the chart + ## + # additionalLabels: {} + + ## Service label for use in assembling a job name of the form