diff --git a/.github/workflows/build_awshelper.yaml b/.github/workflows/build_awshelper.yaml new file mode 100644 index 000000000..36b5745db --- /dev/null +++ b/.github/workflows/build_awshelper.yaml @@ -0,0 +1,19 @@ +name: Build awshelper image + +# Always build this image because it contains all the cloud-automation files. +# Some jobs depend on arbitrary files and we need to test them with updated awshelper images. +on: push + +jobs: + awshelper: + name: awshelper + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" + OVERRIDE_REPO_NAME: "awshelper" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/build_python3.10.yaml b/.github/workflows/build_python3.10.yaml new file mode 100644 index 000000000..80d2d7623 --- /dev/null +++ b/.github/workflows/build_python3.10.yaml @@ -0,0 +1,23 @@ +name: Build Python 3.10 image + +on: + push: + paths: + - .github/workflows/build_python3.10.yaml + - Docker/python-nginx/python3.10-buster/** + +jobs: + python_3-10: + name: Python 3.10 + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster" + OVERRIDE_REPO_NAME: "python" + OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/build_python3.9.yaml b/.github/workflows/build_python3.9.yaml new file mode 100644 index 000000000..540e0d4ec --- /dev/null +++ b/.github/workflows/build_python3.9.yaml @@ -0,0 +1,23 @@ +name: Build Python 3.9 image + +on: + push: + paths: + - .github/workflows/build_python3.9.yaml + - Docker/python-nginx/python3.9-buster/** + +jobs: + python_3-9: + name: Python 3.9 + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster" + OVERRIDE_REPO_NAME: "python" + OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml deleted file mode 100644 index 51543f0fe..000000000 --- a/.github/workflows/image_build_push.yaml +++ /dev/null @@ -1,42 +0,0 @@ -name: Build Python Base Images and Push to Quay and ECR - -on: push - -jobs: - python_3-9: - name: Python 3.9 Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster" - OVERRIDE_REPO_NAME: "python" - OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - python_3-10: - name: Python 3.10 Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster" - OVERRIDE_REPO_NAME: "python" - OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - awshelper: - name: AwsHelper Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" - OVERRIDE_REPO_NAME: "awshelper" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml index ffea50ace..094417fe5 100644 --- a/.github/workflows/image_build_push_jenkins.yaml +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -1,58 +1,63 @@ -name: Build Jenkins images and push to Quay +name: Build Jenkins images on: push: paths: + - .github/workflows/image_build_push_jenkins.yaml - Docker/jenkins/** jobs: jenkins: - name: Jenkins Build and Push + name: Jenkins uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins/Dockerfile" DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins" OVERRIDE_REPO_NAME: "jenkins" USE_QUAY_ONLY: true + BUILD_PLATFORMS: "linux/amd64" secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} jenkins2: - name: Jenkins2 Build and Push + name: Jenkins2 uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins2/Dockerfile" DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins2" OVERRIDE_REPO_NAME: "jenkins2" USE_QUAY_ONLY: true + BUILD_PLATFORMS: "linux/amd64" secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} jenkins-ci-worker: - name: Jenkins-CI-Worker Build and Push + name: Jenkins-CI-Worker uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-CI-Worker/Dockerfile" DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-CI-Worker" OVERRIDE_REPO_NAME: "gen3-ci-worker" USE_QUAY_ONLY: true + BUILD_PLATFORMS: "linux/amd64" secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} jenkins-qa-worker: - name: Jenkins-QA-Worker Build and Push + name: Jenkins-QA-Worker uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-Worker/Dockerfile" DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-Worker" OVERRIDE_REPO_NAME: "gen3-qa-worker" USE_QUAY_ONLY: true + BUILD_PLATFORMS: "linux/amd64" secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/image_build_push_squid.yaml b/.github/workflows/image_build_push_squid.yaml index 2849f0cc5..ce1761d3c 100644 --- a/.github/workflows/image_build_push_squid.yaml +++ b/.github/workflows/image_build_push_squid.yaml @@ -1,13 +1,14 @@ -name: Build Squid images and push to Quay +name: Build Squid images on: push: paths: + - .github/workflows/image_build_push_squid.yaml - Docker/squid/** jobs: squid: - name: Squid Build and Push + name: Squid image uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/squid/Dockerfile" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2e3ce795b..82034495d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: git@github.com:Yelp/detect-secrets - rev: v0.13.1 + rev: v1.4.0 hooks: - id: detect-secrets args: ['--baseline', '.secrets.baseline'] diff --git a/.secrets.baseline b/.secrets.baseline index 621c0a009..0c4eba0a8 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1,19 +1,18 @@ { - "exclude": { - "files": "^.secrets.baseline$", - "lines": null - }, - "generated_at": "2023-06-06T18:46:35Z", + "version": "1.4.0", "plugins_used": [ + { + "name": "ArtifactoryDetector" + }, { "name": "AWSKeyDetector" }, { - "name": "ArtifactoryDetector" + "name": "AzureStorageKeyDetector" }, { - "base64_limit": 4.5, - "name": "Base64HighEntropyString" + "name": "Base64HighEntropyString", + "limit": 4.5 }, { "name": "BasicAuthDetector" @@ -22,8 +21,14 @@ "name": "CloudantDetector" }, { - "hex_limit": 3, - "name": "HexHighEntropyString" + "name": "DiscordBotTokenDetector" + }, + { + "name": "GitHubTokenDetector" + }, + { + "name": "HexHighEntropyString", + "limit": 3.0 }, { "name": "IbmCloudIamDetector" @@ -35,21 +40,30 @@ "name": "JwtTokenDetector" }, { - "keyword_exclude": null, - "name": "KeywordDetector" + "name": "KeywordDetector", + "keyword_exclude": "" }, { "name": "MailchimpDetector" }, + { + "name": "NpmDetector" + }, { "name": "PrivateKeyDetector" }, + { + "name": "SendGridDetector" + }, { "name": "SlackDetector" }, { "name": "SoftlayerDetector" }, + { + "name": "SquareOAuthDetector" + }, { "name": "StripeDetector" }, @@ -57,1725 +71,3671 @@ "name": "TwilioKeyDetector" } ], + "filters_used": [ + { + "path": "detect_secrets.filters.allowlist.is_line_allowlisted" + }, + { + "path": "detect_secrets.filters.common.is_baseline_file", + "filename": ".secrets.baseline" + }, + { + "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", + "min_level": 2 + }, + { + "path": "detect_secrets.filters.heuristic.is_indirect_reference" + }, + { + "path": "detect_secrets.filters.heuristic.is_likely_id_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_lock_file" + }, + { + "path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_potential_uuid" + }, + { + "path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign" + }, + { + "path": "detect_secrets.filters.heuristic.is_sequential_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_swagger_file" + }, + { + "path": "detect_secrets.filters.heuristic.is_templated_secret" + } + ], "results": { "Chef/repo/data_bags/README.md": [ { - "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366", - "is_secret": false, - "is_verified": false, - "line_number": 45, - "type": "Secret Keyword" - }, - { + "type": "Secret Keyword", + "filename": "Chef/repo/data_bags/README.md", "hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee", - "is_secret": false, "is_verified": false, - "line_number": 51, - "type": "Secret Keyword" + "line_number": 38 } ], - "Docker/jenkins/Jenkins-CI-Worker/Dockerfile": [ + "Docker/sidecar/service.key": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "type": "Private Key", + "filename": "Docker/sidecar/service.key", + "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", "is_verified": false, - "line_number": 122, - "type": "Secret Keyword" + "line_number": 1 } ], - "Docker/jenkins/Jenkins-Worker/Dockerfile": [ + "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "type": "Secret Keyword", + "filename": "Jenkins/Stacks/Jenkins/jenkins.env.sample", + "hashed_secret": "f41a52528dd2d592d2c05de5f388101c2948aa98", "is_verified": false, - "line_number": 136, - "type": "Secret Keyword" + "line_number": 5 } ], - "Docker/jenkins/Jenkins/Dockerfile": [ + "Jenkinsfile": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "type": "Secret Keyword", + "filename": "Jenkinsfile", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", "is_verified": false, - "line_number": 110, - "type": "Secret Keyword" - } - ], - "Docker/jenkins/Jenkins2/Dockerfile": [ + "line_number": 144 + }, { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "type": "Secret Keyword", + "filename": "Jenkinsfile", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 110, - "type": "Secret Keyword" + "line_number": 147 } ], - "Docker/sidecar/service.key": [ + "ansible/roles/slurm/README.md": [ { - "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "ansible/roles/slurm/README.md", + "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", "is_verified": false, - "line_number": 1, - "type": "Private Key" + "line_number": 86 } ], - "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ + "apis_configs/fence_settings.py": [ { - "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd", - "is_secret": false, + "type": "Basic Auth Credentials", + "filename": "apis_configs/fence_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 5, - "type": "Secret Keyword" + "line_number": 80 } ], - "ansible/roles/awslogs/defaults/main.yaml": [ + "apis_configs/peregrine_settings.py": [ { - "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", - "is_secret": false, + "type": "Basic Auth Credentials", + "filename": "apis_configs/peregrine_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 30, - "type": "Basic Auth Credentials" + "line_number": 46 } ], - "ansible/roles/slurm/README.md": [ - { - "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", - "is_secret": false, - "is_verified": false, - "line_number": 86, - "type": "Base64 High Entropy String" - }, + "apis_configs/sheepdog_settings.py": [ { - "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476", - "is_secret": false, + "type": "Basic Auth Credentials", + "filename": "apis_configs/sheepdog_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 86, - "type": "Secret Keyword" + "line_number": 46 } ], - "apis_configs/config_helper.py": [ + "aws-inspec/kubernetes/chef_inspec-cron.yaml": [ { - "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "aws-inspec/kubernetes/chef_inspec-cron.yaml", + "hashed_secret": "a3ba27250861948a554629a0e21168821ddfa9f1", "is_verified": false, - "line_number": 66, - "type": "Basic Auth Credentials" + "line_number": 35 } ], - "apis_configs/fence_credentials.json": [ + "doc/api.md": [ { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "doc/api.md", + "hashed_secret": "625de83a7517422051911680cc803921ff99db90", "is_verified": false, - "line_number": 23, - "type": "Secret Keyword" + "line_number": 47 } ], - "apis_configs/fence_settings.py": [ + "doc/gen3OnK8s.md": [ { - "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf", - "is_secret": false, + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "55c100ba37d2df35ec1e5f5d6302f060387df6cc", "is_verified": false, - "line_number": 6, - "type": "Basic Auth Credentials" + "line_number": 113 }, { - "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "262d8e9b8ac5f06e7612dfb608f7267f88679801", "is_verified": false, - "line_number": 58, - "type": "Secret Keyword" + "line_number": 120 }, { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "1c17e556736c4d23933f99d199e7c2c572895fd2", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "76a4acaf31b815aa2c41cc2a2176b11fa9edf00a", + "is_verified": false, + "line_number": 145 + }, + { + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "9d678cbce5a343920f754d5836f03346ee01cde5", "is_verified": false, - "line_number": 80, - "type": "Basic Auth Credentials" + "line_number": 154 } ], - "apis_configs/indexd_settings.py": [ + "files/scripts/psql-fips-fix.sh": [ { - "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc", - "is_secret": false, + "type": "Secret Keyword", + "filename": "files/scripts/psql-fips-fix.sh", + "hashed_secret": "2f1aa1e2a58704b452a5dd60ab1bd2b761bf296a", "is_verified": false, - "line_number": 59, - "type": "Basic Auth Credentials" + "line_number": 9 } ], - "apis_configs/peregrine_settings.py": [ + "gen3/bin/bucket-manifest.sh": [ { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/bin/bucket-manifest.sh", + "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833", "is_verified": false, - "line_number": 46, - "type": "Basic Auth Credentials" + "line_number": 58 } ], - "apis_configs/sheepdog_settings.py": [ + "gen3/bin/bucket-replicate.sh": [ { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/bin/bucket-replicate.sh", + "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833", "is_verified": false, - "line_number": 46, - "type": "Basic Auth Credentials" + "line_number": 39 } ], - "doc/Gen3-data-upload.md": [ + "gen3/bin/secrets.sh": [ { - "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/bin/secrets.sh", + "hashed_secret": "fb6220478aaba649aac37271a1d7c6317abc03a6", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 135 } ], - "doc/api.md": [ + "gen3/lib/aws.sh": [ { - "hashed_secret": "625de83a7517422051911680cc803921ff99db90", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/aws.sh", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 47, - "type": "Hex High Entropy String" + "line_number": 640 } ], - "doc/gen3OnK8s.md": [ + "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ { - "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069", - "is_secret": false, + "type": "Basic Auth Credentials", + "filename": "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml", + "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", "is_verified": false, - "line_number": 113, - "type": "Secret Keyword" - }, + "line_number": 33 + } + ], + "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json": [ { - "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "0447a636536df0264b2000403fbefd69f603ceb1", "is_verified": false, - "line_number": 143, - "type": "Secret Keyword" + "line_number": 54 }, { - "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 170, - "type": "Secret Keyword" + "line_number": 60 }, { - "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 189, - "type": "Secret Keyword" + "line_number": 108 } ], - "doc/kube-setup-data-ingestion-job.md": [ + "gen3/lib/onprem.sh": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/onprem.sh", + "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", + "is_verified": false, + "line_number": 68 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/onprem.sh", + "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", "is_verified": false, - "line_number": 30, - "type": "Secret Keyword" + "line_number": 84 } ], - "doc/logs.md": [ + "gen3/lib/testData/default/expectedFenceResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 93 + }, { - "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", "is_verified": false, - "line_number": 6, - "type": "Secret Keyword" + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 99 } ], - "doc/slurm_cluster.md": [ + "gen3/lib/testData/default/expectedSheepdogResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", + "is_verified": false, + "line_number": 69 + }, { - "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 184, - "type": "Secret Keyword" + "line_number": 72 } ], - "files/dashboard/usage-reports/package-lock.json": [ + "gen3/lib/testData/etlconvert/expected2.yaml": [ + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", + "is_verified": false, + "line_number": 10 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "is_verified": false, + "line_number": 13 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "is_verified": false, + "line_number": 16 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "is_verified": false, + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "is_verified": false, + "line_number": 33 + }, { - "hashed_secret": "65ecd0650541b6caecdb6986f1871c2e6a95bdfe", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" + "line_number": 35 }, { - "hashed_secret": "e35a49e53bb97044b35cc0e4d963b4ac49e9ac7e", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", "is_verified": false, - "line_number": 15, - "type": "Base64 High Entropy String" + "line_number": 36 } ], - "gen3/bin/api.sh": [ + "gen3/lib/testData/etlconvert/users2.yaml": [ + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "is_verified": false, + "line_number": 543 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "is_verified": false, + "line_number": 553 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", + "is_verified": false, + "line_number": 558 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "is_verified": false, + "line_number": 568 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "is_verified": false, + "line_number": 643 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", "is_verified": false, - "line_number": 407, - "type": "Secret Keyword" + "line_number": 653 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", "is_verified": false, - "line_number": 477, - "type": "Secret Keyword" + "line_number": 658 } ], - "gen3/bin/kube-dev-namespace.sh": [ + "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 77 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 99 + }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 135, - "type": "Secret Keyword" + "line_number": 102 } ], - "gen3/bin/kube-setup-argo.sh": [ + "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", + "is_verified": false, + "line_number": 63 + }, { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 217, - "type": "Secret Keyword" + "line_number": 66 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 75 } ], - "gen3/bin/kube-setup-certs.sh": [ + "gen3/test/secretsTest.sh": [ { - "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/secretsTest.sh", + "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", "is_verified": false, - "line_number": 50, - "type": "Secret Keyword" + "line_number": 25 } ], - "gen3/bin/kube-setup-dashboard.sh": [ + "gen3/test/terraformTest.sh": [ { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 40, - "type": "Secret Keyword" + "line_number": 156 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "d869db7fe62fb07c25a0403ecaea55031744b5fb", "is_verified": false, - "line_number": 41, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-data-ingestion-job.sh": [ + "line_number": 163 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", + "is_verified": false, + "line_number": 172 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", + "is_verified": false, + "line_number": 172 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "is_verified": false, + "line_number": 175 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "is_verified": false, + "line_number": 175 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "212e1d3823c8c9af9e4c0c172164ee292b9a6768", + "is_verified": false, + "line_number": 311 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "cb80dbb67a1a5bdf4957eea1473789f1c65357c6", + "is_verified": false, + "line_number": 312 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "5f35c25f4bf588b5fad46e249fcd9221f5257ce4", "is_verified": false, - "line_number": 37, - "type": "Secret Keyword" + "line_number": 313 }, { - "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "5308421b43dde5775f1993bd25a8163070d65598", "is_verified": false, - "line_number": 102, - "type": "Secret Keyword" + "line_number": 314 } ], - "gen3/bin/kube-setup-dicom-server.sh": [ + "kube/services/access-backend/access-backend-deploy.yaml": [ { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/access-backend/access-backend-deploy.yaml", + "hashed_secret": "dbf88a0c3d905c669c0fd13bf8172bb34d4b1168", "is_verified": false, - "line_number": 43, - "type": "Secret Keyword" + "line_number": 60 } ], - "gen3/bin/kube-setup-jenkins.sh": [ + "kube/services/acronymbot/acronymbot-deploy.yaml": [ { - "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/acronymbot/acronymbot-deploy.yaml", + "hashed_secret": "600833390a6b9891d0d8a5f6e3326abb237ac8ca", "is_verified": false, - "line_number": 18, - "type": "Secret Keyword" - }, + "line_number": 49 + } + ], + "kube/services/arborist/arborist-deploy-2.yaml": [ { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy-2.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", "is_verified": false, - "line_number": 22, - "type": "Secret Keyword" + "line_number": 59 } ], - "gen3/bin/kube-setup-metadata.sh": [ + "kube/services/arborist/arborist-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 64 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 35, - "type": "Secret Keyword" + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 77 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 80 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 83 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", + "is_verified": false, + "line_number": 86 } ], - "gen3/bin/kube-setup-revproxy.sh": [ + "kube/services/argo/workflows/fence-usersync-wf.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 108 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 111 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 114 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 117 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", + "is_verified": false, + "line_number": 120 + } + ], + "kube/services/argocd/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/argocd/values.yaml", + "hashed_secret": "bfc1b86ce643b65bd540989213254b01fd6ad418", + "is_verified": false, + "line_number": 1489 + } + ], + "kube/services/arranger/arranger-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/arranger/arranger-deploy.yaml", + "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761", + "is_verified": false, + "line_number": 61 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arranger/arranger-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 64 + } + ], + "kube/services/audit-service/audit-service-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/audit-service/audit-service-deploy.yaml", + "hashed_secret": "42cde1c58c36d8bb5804a076e55ac6ec07ef99fc", + "is_verified": false, + "line_number": 64 + } + ], + "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml", + "hashed_secret": "7f834ccb442433fc12ec9532f75c3a4b6a748d4c", + "is_verified": false, + "line_number": 46 + } + ], + "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 56 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml", + "hashed_secret": "5949b79e0c7082dc78d543cde662871a4f8b8913", + "is_verified": false, + "line_number": 59 + } + ], + "kube/services/cogwheel/cogwheel-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cogwheel/cogwheel-deploy.yaml", + "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab", + "is_verified": false, + "line_number": 35 + } + ], + "kube/services/cohort-middleware/cohort-middleware-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cohort-middleware/cohort-middleware-deploy.yaml", + "hashed_secret": "bf22f6c4bd03572f1ef593efc3eb1a7e0b6dcab4", + "is_verified": false, + "line_number": 62 + } + ], + "kube/services/dashboard/dashboard-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/dashboard/dashboard-deploy.yaml", + "hashed_secret": "9e722d12ce045c8718ab803ed465b2fbe199f3d3", + "is_verified": false, + "line_number": 61 + } + ], + "kube/services/datadog/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/datadog/values.yaml", + "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", + "is_verified": false, + "line_number": 23 + } + ], + "kube/services/datasim/datasim-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 66 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 79 + } + ], + "kube/services/dicom-server/dicom-server-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/dicom-server/dicom-server-deploy.yaml", + "hashed_secret": "706168ac2565a93cceffe2202ac45d3d31c075fb", + "is_verified": false, + "line_number": 40 + } + ], + "kube/services/fence/fence-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 99 + } + ], + "kube/services/fence/fence-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 77 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 99 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 102 + } + ], + "kube/services/fenceshib/fenceshib-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 62 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 65 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 78 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 81 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 93 + } + ], + "kube/services/fenceshib/fenceshib-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 69 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 75 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 85 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 88 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 91 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 94 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 97 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 100 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "6c4789c3be186fd5dcbf06723462ccdd2c86dc37", + "is_verified": false, + "line_number": 103 + } + ], + "kube/services/frontend-framework/frontend-framework-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/frontend-framework/frontend-framework-root-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/gdcapi/gdcapi-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "e8c2f0bacaffbf2f9897217c6770413879945296", + "is_verified": false, + "line_number": 38 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "517cded9f3e3ab79237fde330b97a93f5a943316", + "is_verified": false, + "line_number": 41 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 44 + } + ], + "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml", + "hashed_secret": "38ded89f83435a558169dedb91a38f72d6cebf41", + "is_verified": false, + "line_number": 27 + } + ], + "kube/services/google-sa-validation/google-sa-validation-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 73 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 79 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 82 + } + ], + "kube/services/guppy/guppy-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/guppy/guppy-deploy.yaml", + "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761", + "is_verified": false, + "line_number": 65 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/guppy/guppy-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 68 + } + ], + "kube/services/indexd/indexd-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", + "is_verified": false, + "line_number": 59 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", + "is_verified": false, + "line_number": 62 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 71 + } + ], + "kube/services/indexd/indexd-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", + "is_verified": false, + "line_number": 66 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 75 + } + ], + "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 146 + } + ], + "kube/services/jenkins-worker/jenkins-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 150 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 153 + } + ], + "kube/services/jenkins/jenkins-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins/jenkins-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 157 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins/jenkins-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 160 + } + ], + "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 146 + } + ], + "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 146 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 149 + } + ], + "kube/services/jenkins2/jenkins2-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2/jenkins2-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 153 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2/jenkins2-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 156 + } + ], + "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 37 + } + ], + "kube/services/jobs/arborist-rm-expired-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arborist-rm-expired-access-job.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 37 + } + ], + "kube/services/jobs/arboristdb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arboristdb-create-job.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/aws-bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", + "is_verified": false, + "line_number": 33 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/bucket-manifest-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-manifest-job.yaml", + "hashed_secret": "6c36710fe8825b381388d7005f2c9b5c70175fba", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-replicate-job.yaml", + "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483", + "is_verified": false, + "line_number": 46 + } + ], + "kube/services/jobs/bucket-replication-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-replication-job.yaml", + "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483", + "is_verified": false, + "line_number": 32 + } + ], + "kube/services/jobs/bucket-size-report-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-size-report-job.yaml", + "hashed_secret": "7cccf62cb63863d9d3baabed4f576eb0f7039735", + "is_verified": false, + "line_number": 34 + } + ], + "kube/services/jobs/cedar-ingestion-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/cedar-ingestion-job.yaml", + "hashed_secret": "e1c426d126dcc618dcd0686fc718d509ca6ee3b8", + "is_verified": false, + "line_number": 54 + } + ], + "kube/services/jobs/client-modify-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 41 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 44 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 50 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 60 + } + ], + "kube/services/jobs/cogwheel-register-client-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/cogwheel-register-client-job.yaml", + "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab", + "is_verified": false, + "line_number": 40 + } + ], + "kube/services/jobs/config-fence-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 44 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 57 + } + ], + "kube/services/jobs/covid19-etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/covid19-etl-job.yaml", + "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212", + "is_verified": false, + "line_number": 34 + } + ], + "kube/services/jobs/covid19-notebook-etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/covid19-notebook-etl-job.yaml", + "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/data-ingestion-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "81e4388059839f71aed21999aa51095c7e545094", + "is_verified": false, + "line_number": 34 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 48 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 51 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/jobs/etl-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/etl-cronjob.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", + "is_verified": false, + "line_number": 38 + } + ], + "kube/services/jobs/etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/etl-job.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", + "is_verified": false, + "line_number": 35 + } + ], + "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 43 + } + ], + "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + } + ], + "kube/services/jobs/fence-db-migrate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/fence-delete-expired-clients-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-delete-expired-clients-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 38 + } + ], + "kube/services/jobs/fence-visa-update-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 42 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 45 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 48 + } + ], + "kube/services/jobs/fence-visa-update-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/fencedb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fencedb-create-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/gdcdb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gdcdb-create-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/gen3qa-check-bucket-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 177 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 180 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 186 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 190 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 193 + } + ], + "kube/services/jobs/gentestdata-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 80 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 83 + } + ], + "kube/services/jobs/google-bucket-manifest-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-manifest-job.yaml", + "hashed_secret": "5ca8fff7767e5dd6ebed80e2c8eab66d6f3bf5eb", + "is_verified": false, + "line_number": 31 + } + ], + "kube/services/jobs/google-bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", + "is_verified": false, + "line_number": 35 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", + "is_verified": false, + "line_number": 38 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", + "is_verified": false, + "line_number": 41 + } + ], + "kube/services/jobs/google-create-bucket-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 78 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 81 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 91 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 94 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 97 + } + ], + "kube/services/jobs/google-delete-expired-access-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 43 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 46 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 49 + } + ], + "kube/services/jobs/google-delete-expired-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 48 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 51 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 57 + }, { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 38, - "type": "Secret Keyword" + "line_number": 61 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 55, - "type": "Secret Keyword" + "line_number": 64 }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 57, - "type": "Secret Keyword" + "line_number": 67 } ], - "gen3/bin/kube-setup-secrets.sh": [ + "kube/services/jobs/google-delete-expired-service-account-job.yaml": [ { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 79, - "type": "Secret Keyword" + "line_number": 40 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 82, - "type": "Secret Keyword" + "line_number": 43 }, { - "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 95, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-sftp.sh": [ + "line_number": 49 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 36, - "type": "Secret Keyword" + "line_number": 53 }, { - "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 51, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-sheepdog.sh": [ + "line_number": 56 + }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 33, - "type": "Secret Keyword" + "line_number": 59 } ], - "gen3/bin/kube-setup-sower-jobs.sh": [ + "kube/services/jobs/google-init-proxy-groups-cronjob.yaml": [ { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 25, - "type": "Secret Keyword" + "line_number": 48 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 51 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 120, - "type": "Secret Keyword" + "line_number": 54 }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 122, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-ssjdispatcher.sh": [ + "line_number": 61 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 117, - "type": "Secret Keyword" + "line_number": 64 }, { - "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 184, - "type": "Secret Keyword" + "line_number": 67 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 197, - "type": "Secret Keyword" + "line_number": 70 } ], - "gen3/lib/aws.sh": [ + "kube/services/jobs/google-init-proxy-groups-job.yaml": [ { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 640, - "type": "Secret Keyword" + "line_number": 40 }, { - "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 660, - "type": "Secret Keyword" - } - ], - "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ + "line_number": 43 + }, { - "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 33, - "type": "Basic Auth Credentials" + "line_number": 46 }, { - "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 286, - "type": "Secret Keyword" - } - ], - "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [ + "line_number": 53 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" - } - ], - "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [ + "line_number": 56 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 5, - "type": "Secret Keyword" - } - ], - "gen3/lib/logs/utils.sh": [ + "line_number": 59 + }, { - "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 3, - "type": "Secret Keyword" + "line_number": 62 } ], - "gen3/lib/manifestDefaults/hatchery/hatchery.json": [ + "kube/services/jobs/google-manage-account-access-cronjob.yaml": [ { - "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 78, - "type": "Secret Keyword" - } - ], - "gen3/lib/onprem.sh": [ + "line_number": 48 + }, { - "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 68, - "type": "Secret Keyword" + "line_number": 51 }, { - "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 84, - "type": "Secret Keyword" - } - ], - "gen3/lib/secrets/rotate-postgres.sh": [ + "line_number": 54 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 162, - "type": "Secret Keyword" + "line_number": 61 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 250, - "type": "Secret Keyword" - } - ], - "gen3/lib/testData/etlconvert/expected2.yaml": [ + "line_number": 64 + }, { - "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/jobs/google-manage-account-access-job.yaml": [ { - "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 13, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 16, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 33, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 35, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 36, - "type": "Base64 High Entropy String" + "line_number": 59 } ], - "gen3/test/secretsTest.sh": [ + "kube/services/jobs/google-manage-keys-cronjob.yaml": [ { - "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 25, - "type": "Secret Keyword" - } - ], - "gen3/test/terraformTest.sh": [ + "line_number": 48 + }, { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 156, - "type": "Secret Keyword" + "line_number": 51 }, { - "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 172, - "type": "Base64 High Entropy String" + "line_number": 54 }, { - "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 175, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 175, - "type": "Secret Keyword" - } - ], - "kube/services/datadog/values.yaml": [ + "line_number": 64 + }, { - "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 23, - "type": "Secret Keyword" + "line_number": 67 } ], - "kube/services/fenceshib/fenceshib-configmap.yaml": [ + "kube/services/jobs/google-manage-keys-job.yaml": [ { - "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 375, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 376, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 377, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 378, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 379, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 380, - "type": "Base64 High Entropy String" - }, + "line_number": 59 + } + ], + "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml": [ { - "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 381, - "type": "Base64 High Entropy String" + "line_number": 48 }, { - "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 382, - "type": "Base64 High Entropy String" + "line_number": 51 }, { - "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 383, - "type": "Base64 High Entropy String" + "line_number": 54 }, { - "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 384, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 385, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 386, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/jobs/google-verify-bucket-access-group-job.yaml": [ { - "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 387, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 388, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 389, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 390, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 391, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 392, - "type": "Base64 High Entropy String" - }, + "line_number": 59 + } + ], + "kube/services/jobs/graph-create-job.yaml": [ { - "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/graph-create-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 393, - "type": "Base64 High Entropy String" - }, + "line_number": 33 + } + ], + "kube/services/jobs/indexd-authz-job.yaml": [ { - "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", "is_verified": false, - "line_number": 394, - "type": "Base64 High Entropy String" + "line_number": 32 }, { - "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 395, - "type": "Base64 High Entropy String" + "line_number": 35 }, { - "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", "is_verified": false, - "line_number": 396, - "type": "Base64 High Entropy String" - }, + "line_number": 38 + } + ], + "kube/services/jobs/indexd-userdb-job.yaml": [ { - "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", "is_verified": false, - "line_number": 397, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 398, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", "is_verified": false, - "line_number": 399, - "type": "Base64 High Entropy String" - }, + "line_number": 46 + } + ], + "kube/services/jobs/metadata-aggregate-sync-job.yaml": [ { - "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml", + "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9", "is_verified": false, - "line_number": 419, - "type": "Base64 High Entropy String" + "line_number": 31 }, { - "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml", + "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d", "is_verified": false, - "line_number": 420, - "type": "Base64 High Entropy String" - }, + "line_number": 34 + } + ], + "kube/services/jobs/metadata-delete-expired-objects-job.yaml": [ { - "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-delete-expired-objects-job.yaml", + "hashed_secret": "0cc8bac3fabe63722716d1e6fe04a8dded1e3ad0", "is_verified": false, - "line_number": 423, - "type": "Base64 High Entropy String" - }, + "line_number": 24 + } + ], + "kube/services/jobs/remove-objects-from-clouds-job.yaml": [ { - "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", "is_verified": false, - "line_number": 424, - "type": "Base64 High Entropy String" + "line_number": 34 }, { - "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", "is_verified": false, - "line_number": 425, - "type": "Base64 High Entropy String" + "line_number": 37 }, { - "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", "is_verified": false, - "line_number": 426, - "type": "Base64 High Entropy String" - }, + "line_number": 43 + } + ], + "kube/services/jobs/replicate-validation-job.yaml": [ { - "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", "is_verified": false, - "line_number": 427, - "type": "Base64 High Entropy String" + "line_number": 34 }, { - "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", "is_verified": false, - "line_number": 428, - "type": "Base64 High Entropy String" + "line_number": 37 }, { - "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", "is_verified": false, - "line_number": 429, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", "is_verified": false, - "line_number": 430, - "type": "Base64 High Entropy String" - }, + "line_number": 43 + } + ], + "kube/services/jobs/s3sync-cronjob.yaml": [ { - "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/s3sync-cronjob.yaml", + "hashed_secret": "27f6dfe15698a3bfaa183c84701cfb2bf4115415", "is_verified": false, - "line_number": 431, - "type": "Base64 High Entropy String" - }, + "line_number": 44 + } + ], + "kube/services/jobs/usersync-job.yaml": [ { - "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 432, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 433, - "type": "Base64 High Entropy String" + "line_number": 67 }, { - "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 434, - "type": "Base64 High Entropy String" + "line_number": 70 }, { - "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 435, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 436, - "type": "Base64 High Entropy String" + "line_number": 80 }, { - "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 437, - "type": "Base64 High Entropy String" + "line_number": 83 }, { - "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", "is_verified": false, - "line_number": 439, - "type": "Base64 High Entropy String" - }, + "line_number": 86 + } + ], + "kube/services/jobs/useryaml-job.yaml": [ { - "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 440, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 441, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 442, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 443, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 444, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 445, - "type": "Base64 High Entropy String" + "line_number": 59 }, { - "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", "is_verified": false, - "line_number": 446, - "type": "Base64 High Entropy String" - }, + "line_number": 65 + } + ], + "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml": [ { - "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 448, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml", + "hashed_secret": "fb7ea689a364feb7aafbf8d553eb77073fa7ba11", "is_verified": false, - "line_number": 449, - "type": "Base64 High Entropy String" - }, + "line_number": 59 + } + ], + "kube/services/kubecost-standalone/thanos-deploy.yaml": [ { - "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/kubecost-standalone/thanos-deploy.yaml", + "hashed_secret": "064376809efc3acda5bd341aca977e149b989696", "is_verified": false, - "line_number": 450, - "type": "Base64 High Entropy String" - }, + "line_number": 127 + } + ], + "kube/services/kubecost-standalone/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/kubecost-standalone/values.yaml", + "hashed_secret": "ec9786daee68e3541963a51299160859fe4db663", + "is_verified": false, + "line_number": 30 + } + ], + "kube/services/manifestservice/manifestservice-deploy.yaml": [ { - "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "3da2c49c267b6c58401bbf05e379b38d20434f78", "is_verified": false, - "line_number": 451, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "469e0c2b1a67aa94955bae023ddc727be31581a7", "is_verified": false, - "line_number": 452, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 453, - "type": "Base64 High Entropy String" + "line_number": 67 } ], - "kube/services/jobs/indexd-authz-job.yaml": [ + "kube/services/metadata/metadata-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/metadata/metadata-deploy.yaml", + "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9", + "is_verified": false, + "line_number": 61 + }, { - "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/metadata/metadata-deploy.yaml", + "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d", "is_verified": false, - "line_number": 87, - "type": "Basic Auth Credentials" + "line_number": 66 } ], "kube/services/monitoring/grafana-values.yaml": [ { + "type": "Secret Keyword", + "filename": "kube/services/monitoring/grafana-values.yaml", "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385", - "is_secret": false, "is_verified": false, - "line_number": 162, - "type": "Secret Keyword" + "line_number": 162 }, { - "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/monitoring/grafana-values.yaml", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", "is_verified": false, - "line_number": 166, - "type": "Secret Keyword" + "line_number": 331 } ], - "kube/services/revproxy/helpers.js": [ + "kube/services/monitoring/thanos-deploy.yaml": [ { - "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/monitoring/thanos-deploy.yaml", + "hashed_secret": "064376809efc3acda5bd341aca977e149b989696", "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" + "line_number": 130 } ], - "kube/services/revproxy/helpersTest.js": [ + "kube/services/ohif-viewer/ohif-viewer-deploy.yaml": [ { - "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/ohif-viewer/ohif-viewer-deploy.yaml", + "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818", "is_verified": false, - "line_number": 22, - "type": "JSON Web Token" + "line_number": 40 } ], - "kube/services/superset/superset-deploy.yaml": [ + "kube/services/orthanc/orthanc-deploy.yaml": [ { - "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/orthanc/orthanc-deploy.yaml", + "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818", "is_verified": false, - "line_number": 38, - "type": "Secret Keyword" + "line_number": 41 } ], - "kube/services/superset/superset-redis.yaml": [ + "kube/services/peregrine/peregrine-canary-deploy.yaml": [ { - "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3", "is_verified": false, - "line_number": 166, - "type": "Secret Keyword" + "line_number": 61 }, { - "hashed_secret": "244f421f896bdcdd2784dccf4eaf7c8dfd5189b5", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 266, - "type": "Secret Keyword" + "line_number": 64 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 73 } ], - "kube/services/superset/values.yaml": [ + "kube/services/peregrine/peregrine-deploy.yaml": [ { - "hashed_secret": "6f803b24314c39062efe38d0c1da8c472f47eab3", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3", "is_verified": false, - "line_number": 54, - "type": "Secret Keyword" + "line_number": 67 }, { - "hashed_secret": "6eae3a5b062c6d0d79f070c26e6d62486b40cb46", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 86, - "type": "Secret Keyword" + "line_number": 70 }, { - "hashed_secret": "3eb416223e9e69e6bb8ee19793911ad1ad2027d8", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec", "is_verified": false, - "line_number": 212, - "type": "Secret Keyword" + "line_number": 76 }, { - "hashed_secret": "ff55435345834a3fe224936776c2aa15f6ed5358", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 79 + } + ], + "kube/services/pidgin/pidgin-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/pidgin/pidgin-deploy.yaml", + "hashed_secret": "49af232c7adfcd54a40202e06261396a757e4ddd", "is_verified": false, - "line_number": 396, - "type": "Secret Keyword" + "line_number": 59 }, { - "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/pidgin/pidgin-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 503, - "type": "Secret Keyword" + "line_number": 62 } ], - "package-lock.json": [ + "kube/services/portal/portal-deploy.yaml": [ { - "hashed_secret": "0656ad0df3af4633dc369f13d5e8806973c5fd9d", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1481, - "type": "Base64 High Entropy String" + "line_number": 55 }, { - "hashed_secret": "00091d875d922437c5fc9e6067a08e78c2482e87", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa", "is_verified": false, - "line_number": 1489, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "c4e5cc37e115bf7d86e76e3d799705bf691e4d00", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb", "is_verified": false, - "line_number": 1521, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "0512e37fbedf1d16828680a038a241b4780a5c04", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 1547, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/portal/portal-root-deploy.yaml": [ { - "hashed_secret": "01868fd50edbfe6eb91e5b01209b543adc6857af", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1611, - "type": "Base64 High Entropy String" + "line_number": 55 }, { - "hashed_secret": "a6f48bf1e398deffc7fd31da17c3506b46c97a93", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa", "is_verified": false, - "line_number": 1640, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "85ce358dbdec0996cf3ccd2bf1c6602af68c181e", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb", "is_verified": false, - "line_number": 1648, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 1664, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml": [ { - "hashed_secret": "7098a3e6d6d2ec0a40f04fe12509c5c6f4c49c0e", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 1683, - "type": "Base64 High Entropy String" + "line_number": 74 }, { - "hashed_secret": "1664ad175bba1795a7ecad572bae7e0740b94f56", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 1733, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "1ec4ce2eb945ce2f816dcb6ebdd1e10247f439a3", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 1742, - "type": "Base64 High Entropy String" + "line_number": 80 }, { - "hashed_secret": "a7af5768a6d936e36f28e1030d7f894d7aaf555e", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 1755, - "type": "Base64 High Entropy String" + "line_number": 90 }, { - "hashed_secret": "6fbc7dd864586173160874f2a86ca7d2d552cb85", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 1769, - "type": "Base64 High Entropy String" + "line_number": 93 }, { - "hashed_secret": "81a961f2c89c6209328b74a8768e30fd76c3ac72", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 1855, - "type": "Base64 High Entropy String" + "line_number": 96 }, { - "hashed_secret": "797d4751c536c421cb82b9f62e0a804af30d78f5", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 1889, - "type": "Base64 High Entropy String" + "line_number": 99 }, { - "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", "is_verified": false, - "line_number": 1894, - "type": "Base64 High Entropy String" + "line_number": 102 }, { - "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1921, - "type": "Base64 High Entropy String" - }, + "line_number": 105 + } + ], + "kube/services/qa-dashboard/qa-dashboard-deployment.yaml": [ { - "hashed_secret": "4cf9419259c0ce8eee84b468af3c72db8b001620", + "type": "Secret Keyword", + "filename": "kube/services/qa-dashboard/qa-dashboard-deployment.yaml", + "hashed_secret": "253939a955a575ac69f409e5914dd0191b704760", "is_verified": false, - "line_number": 1950, - "type": "Base64 High Entropy String" - }, + "line_number": 63 + } + ], + "kube/services/qabot/qabot-deploy.yaml": [ { - "hashed_secret": "24816e3eb4308e247bde7c1d09ffb7b79c519b71", + "type": "Secret Keyword", + "filename": "kube/services/qabot/qabot-deploy.yaml", + "hashed_secret": "a9fa7aa8c08b647c3fb696e6598642d4a63e25be", "is_verified": false, - "line_number": 1983, - "type": "Base64 High Entropy String" - }, + "line_number": 86 + } + ], + "kube/services/requestor/requestor-deploy.yaml": [ { - "hashed_secret": "e9adfe8a333d45f4776fe0eab31608be5d7b6a7d", + "type": "Secret Keyword", + "filename": "kube/services/requestor/requestor-deploy.yaml", + "hashed_secret": "15debe4170aa5b89858d939f4c0644307ae7789b", "is_verified": false, - "line_number": 2004, - "type": "Base64 High Entropy String" - }, + "line_number": 61 + } + ], + "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf": [ { - "hashed_secret": "03d6fb388dd1b185129b14221f7127715822ece6", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf", + "hashed_secret": "f89523833036f85fed37ce3ebf25492189bc9397", "is_verified": false, - "line_number": 2013, - "type": "Base64 High Entropy String" - }, + "line_number": 41 + } + ], + "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf": [ { - "hashed_secret": "ee161bb3f899720f95cee50a5f9ef9c9ed96278b", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf", + "hashed_secret": "18c0871af26eb9875c0f840b13211f097c133fd2", "is_verified": false, - "line_number": 2046, - "type": "Base64 High Entropy String" - }, + "line_number": 24 + } + ], + "kube/services/revproxy/helpers.js": [ { - "hashed_secret": "ebeb5b574fa1ed24a40248275e6136759e766466", + "type": "Base64 High Entropy String", + "filename": "kube/services/revproxy/helpers.js", + "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", "is_verified": false, - "line_number": 2078, - "type": "Base64 High Entropy String" - }, + "line_number": 10 + } + ], + "kube/services/revproxy/helpersTest.js": [ { - "hashed_secret": "a6a555a428522ccf439fd516ce7c7e269274363f", + "type": "Base64 High Entropy String", + "filename": "kube/services/revproxy/helpersTest.js", + "hashed_secret": "389c3ec21b7325359051e97ff569b078843d2d37", "is_verified": false, - "line_number": 2083, - "type": "Base64 High Entropy String" + "line_number": 19 }, { - "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", - "is_secret": false, + "type": "JSON Web Token", + "filename": "kube/services/revproxy/helpersTest.js", + "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", "is_verified": false, - "line_number": 2111, - "type": "Base64 High Entropy String" - }, + "line_number": 22 + } + ], + "kube/services/revproxy/revproxy-deploy.yaml": [ { - "hashed_secret": "3f1646b60abe74297d2f37a1eee5dc771ad834fc", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "c7a87a61893a647e29289845cb51e61afb06800b", "is_verified": false, - "line_number": 2138, - "type": "Base64 High Entropy String" + "line_number": 74 }, { - "hashed_secret": "fd933c71e82d5519ae0cb0779b370d02f6935759", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "b3a4e2dea4c1fae8c58a07a84065b73b3a2d831c", "is_verified": false, - "line_number": 2143, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "7090aa59cb52ad1f1810b08c4ac1ddf5c8fce523", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 2150, - "type": "Base64 High Entropy String" - }, + "line_number": 80 + } + ], + "kube/services/sftp/sftp-deploy.yaml": [ { - "hashed_secret": "756444bea4ea3d67844d8ddf58ad32356e9c2430", + "type": "Secret Keyword", + "filename": "kube/services/sftp/sftp-deploy.yaml", + "hashed_secret": "9fdebf62e477d59d25730744c8b3089c67c3db85", "is_verified": false, - "line_number": 2188, - "type": "Base64 High Entropy String" - }, + "line_number": 39 + } + ], + "kube/services/sheepdog/sheepdog-canary-deploy.yaml": [ { - "hashed_secret": "f74135fdd6b8dafdfb01ebbc61c5e5c24ee27cf8", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", "is_verified": false, - "line_number": 2291, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "56fbae787f4aed7d0632e95840d71bd378d3a36f", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 2303, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "81cb6be182eb79444202c4563080aee75296a672", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 2308, - "type": "Base64 High Entropy String" + "line_number": 67 }, { - "hashed_secret": "f0f3f7bce32184893046ac5f8cc80da56c3ca539", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 70 + } + ], + "kube/services/sheepdog/sheepdog-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", "is_verified": false, - "line_number": 2317, - "type": "Base64 High Entropy String" + "line_number": 63 }, { - "hashed_secret": "097893233346336f4003acfb6eb173ee59e648f0", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 2327, - "type": "Base64 High Entropy String" + "line_number": 66 }, { - "hashed_secret": "bb14c3b4ef4a9f2e86ffdd44b88d9b6729419671", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 2332, - "type": "Base64 High Entropy String" + "line_number": 72 }, { - "hashed_secret": "71344a35cff67ef081920095d1406601fb5e9b97", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 2340, - "type": "Base64 High Entropy String" + "line_number": 75 + } + ], + "kube/services/shiny/shiny-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/shiny/shiny-deploy.yaml", + "hashed_secret": "327a1bbc6dc0ce857472ee9162a3415133862d50", + "is_verified": false, + "line_number": 43 + } + ], + "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml", + "hashed_secret": "7f932449df74fc78573fea502df8a484aef3f69d", + "is_verified": false, + "line_number": 61 + } + ], + "kube/services/superset/superset-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-deploy.yaml", + "hashed_secret": "3e9d1737117ff62b23e37aedc72b522b0134997a", + "is_verified": false, + "line_number": 235 }, { - "hashed_secret": "eb3db6990fd43477a35dfeffc90b3f1ffa83c7bd", + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-deploy.yaml", + "hashed_secret": "6ac08eaa58d425783ff8b5a38fe16ee66c0bce15", + "is_verified": false, + "line_number": 311 + } + ], + "kube/services/superset/superset-redis.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-redis.yaml", + "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", + "is_verified": false, + "line_number": 265 + } + ], + "kube/services/superset/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "9a09d4081ddc128a80384712ce6df3578e6bc58e", "is_verified": false, - "line_number": 2349, - "type": "Base64 High Entropy String" + "line_number": 173 }, { - "hashed_secret": "266288bdc14807b538d1e48a5891e361fa9b4a14", + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "118c413f3fc929a1624f4c3e1da1e3d24377a693", "is_verified": false, - "line_number": 2357, - "type": "Base64 High Entropy String" + "line_number": 299 }, { - "hashed_secret": "800477261175fd21f23e7321923e1fba6ae55471", + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "d2a8d1ddfa75398366cff06545380c73481ec17d", "is_verified": false, - "line_number": 2369, - "type": "Base64 High Entropy String" + "line_number": 445 }, { - "hashed_secret": "3f0c251b9c2c21454445a98fde6915ceacde2136", + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", + "is_verified": false, + "line_number": 459 + } + ], + "kube/services/thor/thor-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/thor/thor-deploy.yaml", + "hashed_secret": "1f3f96a3887209d0dda357e5516231ee9c5cd9a7", + "is_verified": false, + "line_number": 100 + } + ], + "kube/services/tube/tube-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/tube/tube-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 2387, - "type": "Base64 High Entropy String" + "line_number": 58 } ], - "tf_files/aws/cognito/README.md": [ + "kube/services/ws-storage/ws-storage-deploy.yaml": [ { - "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70", - "is_secret": false, + "type": "Secret Keyword", + "filename": "kube/services/ws-storage/ws-storage-deploy.yaml", + "hashed_secret": "ec2d9395e11f353370a4abac21a1565641b35ce9", "is_verified": false, - "line_number": 106, - "type": "Secret Keyword" + "line_number": 66 + } + ], + "kube/services/wts/wts-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/wts/wts-deploy.yaml", + "hashed_secret": "5de687ae886f19c3cb68d4980e3f2e77cca3db9e", + "is_verified": false, + "line_number": 65 + } + ], + "packer/buildAll.sh": [ + { + "type": "Secret Keyword", + "filename": "packer/buildAll.sh", + "hashed_secret": "6e1d66a1596528c308e601c10aa0b92d53606ab9", + "is_verified": false, + "line_number": 15 + } + ], + "packer/variables.example.json": [ + { + "type": "Secret Keyword", + "filename": "packer/variables.example.json", + "hashed_secret": "a3a0648a036bebf78ba1a1eb498a66081059da10", + "is_verified": false, + "line_number": 5 } ], "tf_files/aws/commons/README.md": [ { - "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "5f02a3fb14ab1ce5c18c362b04b8ffc603ea5951", "is_verified": false, - "line_number": 60, - "type": "Secret Keyword" + "line_number": 60 }, { - "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "49cfceed8aa8df159e53aa5c5951cad48a3f1216", "is_verified": false, - "line_number": 78, - "type": "Secret Keyword" + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "18ad13589ca5fb3c432d7d9f0fe49f8ed6e2c478", + "is_verified": false, + "line_number": 70 } ], "tf_files/aws/eks/sample.tfvars": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/eks/sample.tfvars", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, - "line_number": 107, - "type": "Hex High Entropy String" + "line_number": 107 } ], "tf_files/aws/eks/variables.tf": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/eks/variables.tf", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, - "line_number": 133, - "type": "Hex High Entropy String" + "line_number": 133 } ], "tf_files/aws/modules/common-logging/README.md": [ { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/README.md", "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, - "is_verified": false, - "line_number": 57, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, "is_verified": false, - "line_number": 59, - "type": "Hex High Entropy String" + "line_number": 57 } ], "tf_files/aws/modules/common-logging/lambda_function.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 18 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, - "line_number": 30, - "type": "Hex High Entropy String" + "line_number": 30 } ], "tf_files/aws/modules/common-logging/testLambda.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, - "line_number": 5, - "type": "Base64 High Entropy String" + "line_number": 5 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 5 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 + }, + { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", + "is_verified": false, + "line_number": 10 } ], "tf_files/aws/modules/eks/variables.tf": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/eks/variables.tf", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, - "line_number": 113, - "type": "Hex High Entropy String" + "line_number": 113 } ], "tf_files/aws/modules/management-logs/README.md": [ { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/README.md", "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, "is_verified": false, - "line_number": 54, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, - "is_verified": false, - "line_number": 56, - "type": "Hex High Entropy String" + "line_number": 54 } ], "tf_files/aws/modules/management-logs/lambda_function.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 18 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, - "line_number": 30, - "type": "Hex High Entropy String" + "line_number": 30 } ], "tf_files/aws/modules/management-logs/testLambda.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, - "line_number": 5, - "type": "Base64 High Entropy String" + "line_number": 5 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { - "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", - "is_secret": false, + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 6, - "type": "Base64 High Entropy String" + "line_number": 5 }, { - "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d", - "is_secret": false, + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", "is_verified": false, - "line_number": 6, - "type": "Hex High Entropy String" + "line_number": 6 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4", - "is_secret": false, "is_verified": false, - "line_number": 6, - "type": "Hex High Entropy String" + "line_number": 6 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "d484ccb4ced21e0149078377f14b913bf5c613d0", + "is_verified": false, + "line_number": 6 } ], "tf_files/aws/slurm/README.md": [ { - "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/aws/slurm/README.md", + "hashed_secret": "c16686250cd583de64e02a47a8b194cd5578b2a1", "is_verified": false, - "line_number": 83, - "type": "Secret Keyword" + "line_number": 83 } ], "tf_files/azure/cloud.tf": [ { - "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/azure/cloud.tf", + "hashed_secret": "38d930120a56321ceaa147b2bc1f19db53a0b993", "is_verified": false, - "line_number": 424, - "type": "Secret Keyword" + "line_number": 361 } ], "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, - "line_number": 231, - "type": "Secret Keyword" + "line_number": 231 } ], "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, - "line_number": 231, - "type": "Secret Keyword" + "line_number": 231 } ], "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, - "line_number": 262, - "type": "Secret Keyword" + "line_number": 262 } ], - "tf_files/gcp/commons/sample.tfvars": [ + "tf_files/gcp/commons/root.tf": [ { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/root.tf", + "hashed_secret": "013b6be0bd7ef38a9ee3472cec65c208a19421e6", "is_verified": false, - "line_number": 11, - "type": "Secret Keyword" - }, + "line_number": 65 + } + ], + "tf_files/gcp/commons/sample.tfvars": [ { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/sample.tfvars", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 26 }, { - "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105", - "is_secret": false, - "is_verified": false, - "line_number": 37, - "type": "Secret Keyword" - } - ], - "tf_files/shared/modules/k8s_configs/creds.tpl": [ - { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/sample.tfvars", + "hashed_secret": "791191ef9eafc75f5dd28e37df837b4991556876", "is_verified": false, - "line_number": 8, - "type": "Secret Keyword" + "line_number": 31 } ] }, - "version": "0.13.1", - "word_list": { - "file": null, - "hash": null - } + "generated_at": "2024-03-07T21:26:14Z" } diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index afb1fca9f..6eeb8f4fd 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -1,11 +1,10 @@ -FROM jenkins/jnlp-slave:4.13.3-1-jdk11 +FROM jenkins/inbound-agent:jdk21 USER root ENV DEBIAN_FRONTEND=noninteractive -# install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -16,7 +15,6 @@ RUN set -xe && apt-get update \ libffi-dev \ libssl-dev \ libghc-regex-pcre-dev \ - linux-headers-amd64 \ libcurl4-openssl-dev \ libncurses5-dev \ libncursesw5-dev \ @@ -27,20 +25,19 @@ RUN set -xe && apt-get update \ libbz2-dev \ libexpat1-dev \ liblzma-dev \ - python-virtualenv \ lua5.3 \ r-base \ software-properties-common \ sudo \ tk-dev \ + wget \ zlib1g-dev \ zsh \ ca-certificates-java \ - openjdk-11-jre-headless \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua # Use jdk11 -ENV JAVA_HOME="/usr/lib/jvm/java-11-openjdk-amd64" +ENV JAVA_HOME="/opt/java/openjdk" ENV PATH="$JAVA_HOME/bin:$PATH" COPY ./certfix.sh /certfix.sh @@ -56,32 +53,34 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ google-cloud-sdk-cbt \ kubectl +# install go - https://go.dev/doc/install +RUN wget https://go.dev/dl/go1.21.0.linux-amd64.tar.gz \ + && rm -rf /usr/local/go \ + && tar -C /usr/local -xzf go1.21.0.linux-amd64.tar.gz +ENV PATH="$PATH:/usr/local/go/bin" +RUN go version + # # install docker tools: -# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 -# * https://docs.docker.com/compose/install/#install-compose # -RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ - && add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" \ - && apt-get update \ - && apt-get install -y docker-ce \ - && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ - && chmod a+rx /usr/local/bin/docker-compose +RUN sudo install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && sudo chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update && apt-get install -y docker-ce # install nodejs RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - -RUN apt-get update && apt-get install -y nodejs +RUN apt-get update && apt-get install -y nodejs npm -# add psql: https://www.postgresql.org/download/linux/debian/ -RUN DISTRO="$(lsb_release -c -s)" \ - && echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ - && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ - && apt-get update \ - && apt-get install -y postgresql-client-13 libpq-dev \ - && rm -rf /var/lib/apt/lists/* +# Install postgres 13 client +RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \ + echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \ + apt-get update && \ + apt-get install -y postgresql-client-13 # Copy sh script responsible for installing Python COPY install-python3.8.sh /root/tmp/install-python3.8.sh @@ -98,7 +97,7 @@ RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade +RUN set -xe && python3.8 -m pip install --upgrade pip setuptools && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade # install terraform RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \ @@ -117,6 +116,9 @@ RUN curl -sS -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-ke && apt-get -y update \ && apt-get -y install google-chrome-stable +# data-simulator needs "/usr/share/dict/words" to generate data that isn't random strings +RUN apt-get install --reinstall wamerican + # update /etc/sudoers RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ && /bin/echo -e "\n%sudo ALL=(ALL:ALL) NOPASSWD:ALL\n" >> /etc/sudoers.bak \ diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile index 7b1d460cc..fec6b3203 100644 --- a/Docker/jenkins/Jenkins-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-Worker/Dockerfile @@ -1,18 +1,14 @@ -FROM jenkins/jnlp-slave:4.13.3-1-jdk11 +FROM jenkins/inbound-agent:jdk21 USER root ENV DEBIAN_FRONTEND=noninteractive -# install python and pip and aws cli -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base wget -RUN set -xe && python -m pip install awscli --upgrade && python -m pip install pytest --upgrade && python -m pip install PyYAML --upgrade && python -m pip install lxml --upgrade -RUN set -xe && python3 -m pip install pytest --upgrade && python3 -m pip install PyYAML --upgrade -RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade -RUN set -xe && python3 -m pip install pandas --upgrade +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base RUN apt-get update \ && apt-get install -y lsb-release \ + git \ apt-transport-https \ r-base \ libffi-dev \ @@ -35,22 +31,30 @@ RUN apt-get update \ lua5.3 \ software-properties-common \ sudo \ + wget \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua # install Ruby. RUN apt-get install -y ruby-full -# install GIT from buster-backports -RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/buster-backports.list \ - && apt-get update \ - && apt-get -t=buster-backports -y install git=1:2.30.* +# +# install docker tools: +# +RUN sudo install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && sudo chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update && apt-get install -y docker-ce # install k6 to run load tests RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 \ && echo "deb https://dl.k6.io/deb stable main" | tee /etc/apt/sources.list.d/k6.list \ && apt-get update \ && apt-get install k6 - + # install xk6-browser RUN cd /opt && wget --quiet https://github.com/grafana/xk6-browser/releases/download/v0.3.0/xk6-browser-v0.3.0-linux-amd64.tar.gz \ && tar -xvzf /opt/xk6-browser-v0.3.0-linux-amd64.tar.gz @@ -71,15 +75,13 @@ RUN wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && c # # install docker tools: -# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 -# * https://docs.docker.com/compose/install/#install-compose # RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && /usr/bin/add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian \ $(lsb_release -c -s) \ stable" \ && apt-get update \ - && apt-get install -y docker-ce \ + && apt-get install -y docker-ce-cli \ && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ && chmod a+rx /usr/local/bin/docker-compose @@ -118,6 +120,7 @@ RUN chmod +x /root/tmp/install-python3.9.sh; sync && \ bash /root/tmp/install-python3.9.sh && \ rm -rf /root/tmp/install-python3.9.sh && \ unlink /usr/bin/python3 && \ + ln -s /usr/local/bin/python3.9 /usr/bin/python && \ ln -s /usr/local/bin/python3.9 /usr/bin/python3 RUN env diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index a872ee1dd..04ebe5864 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -1,11 +1,10 @@ -FROM jenkins/jenkins:2.375 +FROM jenkins/jenkins:2.426.3-lts-jdk21 USER root ENV DEBIAN_FRONTEND=noninteractive -# install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -30,6 +29,7 @@ RUN set -xe && apt-get update \ software-properties-common \ sudo \ tk-dev \ + wget \ zlib1g-dev \ zsh \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua @@ -45,18 +45,15 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ # # install docker tools: -# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 -# * https://docs.docker.com/compose/install/#install-compose # -RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ - && add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" \ - && apt-get update \ - && apt-get install -y docker-ce \ - && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ - && chmod a+rx /usr/local/bin/docker-compose +RUN sudo install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && sudo chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update && apt-get install -y docker-ce # install nodejs RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - @@ -81,8 +78,8 @@ RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ ln -s /Python-3.8.0/python /usr/bin/python3 # Fix shebang for lsb_release -RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ - sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository +RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \ + sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile index 59cb5672e..e6b73bc76 100644 --- a/Docker/jenkins/Jenkins2/Dockerfile +++ b/Docker/jenkins/Jenkins2/Dockerfile @@ -1,11 +1,10 @@ -FROM jenkins/jenkins:2.375 +FROM jenkins/jenkins:2.426.3-lts-jdk21 USER root ENV DEBIAN_FRONTEND=noninteractive -# install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -30,6 +29,7 @@ RUN set -xe && apt-get update \ software-properties-common \ sudo \ tk-dev \ + wget \ zlib1g-dev \ zsh \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua @@ -45,18 +45,16 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ # # install docker tools: -# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 -# * https://docs.docker.com/compose/install/#install-compose # -RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ - && add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" \ - && apt-get update \ - && apt-get install -y docker-ce \ - && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ - && chmod a+rx /usr/local/bin/docker-compose +RUN sudo install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && sudo chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update && apt-get install -y docker-ce + # install nodejs RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - diff --git a/Jenkinsfile b/Jenkinsfile index 4e3470ded..908c2d01a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -99,8 +99,8 @@ spec: resources: requests: cpu: 0.2 - memory: 200Mi - ephemeral-storage: 200Mi + memory: 400Mi + ephemeral-storage: 1Gi env: - name: AWS_DEFAULT_REGION value: us-east-1 @@ -134,8 +134,8 @@ spec: readOnly: true mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" subPath: "ca.pem" - - name: dockersock - mountPath: "/var/run/docker.sock" + - name: containerdsock + mountPath: "/var/run/containerd/containerd.sock" serviceAccount: jenkins-service serviceAccountName: jenkins-service volumes: @@ -145,9 +145,9 @@ spec: - name: ca-volume secret: secretName: "service-ca" - - name: dockersock + - name: containerdsock hostPath: - path: /var/run/docker.sock + path: /var/run/containerd/containerd.sock ''' defaultContainer 'shell' } @@ -293,8 +293,8 @@ spec: script { try { if(!skipUnitTests) { - sh '/usr/bin/pip3 install boto3 --upgrade --user' - sh '/usr/bin/pip3 install kubernetes --upgrade --user' + sh '/usr/local/bin/pip3 install boto3 --upgrade --user' + sh '/usr/local/bin/pip3 install kubernetes --upgrade --user' sh 'python3 -m pytest cloud-automation/apis_configs/' sh 'python3 -m pytest cloud-automation/gen3/lib/dcf/' sh 'cd cloud-automation/tf_files/aws/modules/common-logging && python3 -m pytest testLambda.py' diff --git a/doc/s3-to-google-replication.md b/doc/s3-to-google-replication.md new file mode 100644 index 000000000..82d0374c7 --- /dev/null +++ b/doc/s3-to-google-replication.md @@ -0,0 +1,68 @@ +# S3 to Google Cloud Storage Replication Pipeline + +This document will guide you through setting up a replication pipeline from AWS S3 to Google Cloud Storage (GCS) using VPC Service Controls and Storage Transfer Service. This solution is compliant with security best practices, ensuring that data transfer between AWS S3 and GCS is secure and efficient. + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Step-by-step Guide](#step-by-step-guide) + - [Setup VPC Service Controls](#setup-vpc-service-controls) + - [Initiate Storage Transfer Service](#initiate-storage-transfer-service) +- [Compliance Benefits](#compliance-benefits) +- [Cost Benefit Analysis](#cost-benefit-analysis) + +## Prerequisites + +1. **AWS account** with access to the S3 bucket. +2. **Google Cloud account** with permissions to create buckets in GCS and set up VPC Service Controls and Storage Transfer Service. +3. Familiarity with AWS IAM for S3 bucket access and Google Cloud IAM for GCS access. + +## Step-by-step Guide + +### Setup VPC Service Controls + +1. **Access the VPC Service Controls** in the Google Cloud Console. +2. **Create a new VPC Service Control perimeter**. + - Name the perimeter and choose the desired region. + - Add the necessary GCP services. Ensure to include `storagetransfer.googleapis.com` for Storage Transfer Service. +3. **Setup VPC Service Control Policy** to allow connections from AWS. + - Use the [documentation](https://cloud.google.com/vpc-service-controls/docs/set-up) to help set up. + +### Initiate Storage Transfer Service + +1. Navigate to **Storage Transfer Service** in the Google Cloud Console. +2. Click **Create Transfer Job**. +3. **Select Source**: Choose Amazon S3 bucket and provide the necessary details. + - Ensure to have necessary permissions for the S3 bucket in AWS IAM. +4. **Select Destination**: Choose your GCS bucket. +5. **Schedule & Advanced Settings**: Set the frequency and conditions for the transfer. Consider setting up notifications for job completion or errors. +6. **Review & Create**: Confirm the details and initiate the transfer job. + +## Compliance Benefits + +Setting up a secure replication pipeline from AWS S3 to GCS using VPC Service Controls and Storage Transfer Service offers the following compliance benefits: + +1. **Data Security**: The VPC Service Controls provide an additional layer of security by ensuring that the transferred data remains within a defined security perimeter, reducing potential data leak risks. +2. **Auditability**: Both AWS and GCS offer logging and monitoring tools that can provide audit trails for data transfer. This can help in meeting regulatory compliance requirements. +3. **Consistent Data Replication**: The Storage Transfer Service ensures that data in GCS is up to date with the source S3 bucket, which is essential for consistent backup and disaster recovery strategies. + +## Cost Benefit Analysis + +**Benefits**: + +1. **Data Redundancy**: Having data stored in multiple cloud providers can be a part of a robust disaster recovery strategy. +2. **Flexibility**: Replicating data to GCS provides flexibility in multi-cloud strategies, enabling seamless migrations or usage of GCP tools and services. +3. **Security**: Utilizing VPC Service Controls strengthens the security posture. + +**Costs**: + +1. **Data Transfer Costs**: Both AWS and Google Cloud might charge for data transfer. It's crucial to analyze the cost, especially for large data transfers. +2. **Storage Costs**: Storing data redundantly incurs additional storage costs in GCS. + +**Analysis**: + +To stay in compliance, we require multiple copies of our data in separate datacenters or clouds. After our security audit, we found the important of not keeping data in a single cloud. It may be expensive to transfer data from AWS to GCP and to store it in 2 clouds simultaniously, but if we need to, then this solution could be an easy way to achieve compliance. + +--- + +Please note that while this guide is based on the provided Google Cloud documentation, it's crucial to refer to the original [documentation](https://cloud.google.com/architecture/transferring-data-from-amazon-s3-to-cloud-storage-using-vpc-service-controls-and-storage-transfer-service) for the most accurate and up-to-date information. diff --git a/files/authorized_keys/squid_authorized_keys_user b/files/authorized_keys/squid_authorized_keys_user index 46b43a030..4b35fecd9 100644 --- a/files/authorized_keys/squid_authorized_keys_user +++ b/files/authorized_keys/squid_authorized_keys_user @@ -18,4 +18,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhY ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3vyd6a7tsANi149ylPQYS8Gsp/SxJyhdK/j6arv77KbM0EIzzUiclFLnMKcqUQ263FrPyx3a3UP80R77ayCnwcEHrxlJrYfyFUva8vtmI9mu8VE7oXvuR/jcOyXM9NosxyYacL/p6W5X4r8tqo/gJFjmls1YRfu3JPlTgTT0VzGJu+B6rLEsw53c37VVzSaCtu/jBOjyxI1/UaNg1cd+hcfoQxJ9zSDqqE7ZUNOc3zHP+1AGYCQ/CJsNrDl2OkppIdC9He5jgjLhyD7yvyarI+oF05oHknol/K1hXK+yxIkF2Ou5krfjw7TMBvD+JbQVb35vL9acXFF20+lHLRLbobPU/6ZZTup3q7IRm5OWaL2CJtYZbJvicKW0Ep+vTzaiQjK71L6UxcIvnzvbP9Dnatv1GBMMDaQxAa4Lood8NG2ty1yfLN972akGqBlwJASXMRd/ogzxv2KSH9w6HHYoc2WpDhUtNHmjwX1FSLYPW3qx5ICMW6j9gR2u1tG4Ohzp1CmYVElnRHbnBrTkLde65Vqedk2tQy8fcopH59ZASIuR4GbhCb2SiNkr1PHEvfhLMzg/UCSnnhX9vUNhkPjJRy/bdL3pOt/77lpIQUqQBArOiZmfG8OD0q4+3Nr+c9v5bSSvynjirlKk+wb8sKyOoSAXdFeovL/A0BUKUjCtsXQ== dev@test.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQChK/8JjpUeWcF/1Ea2M4mSbLz1tOfpq74xD2USxE54kx7VoN1G7ylV76yqSIeRq1e7PPBEg5ZD1aXUJnlI32RwLJ5kaHnoB82Ta+Fv1B/vVoHCObcALfiHPpwPf1kM2liWEB0EhYcz1OUv3YQriPqjiRoWfnbw60GIyzhpWZhKRq0zlISOaTYdV9kafX+N7M6/gSU0632TgUwwsStYrffEleyrC/Lh+4UaESozWoPFiZLl2eMCKfZNFBB99HTFifImW2yC6Ag1QhCd1i3NpfiYuaSDH7WR3slPRSd8DiUAwGC2DkIuWPp3bhaAv2V4mtLIBAaTZsINIACB2+w7yf9yvCGtdobCmp4AA7ik9rEkRLk/Jff0YBHd6Z4qyIuRht3ZeWXIYSK1zOlPfs4lPUgvbjlPgMVFV2CrvOTnS+YZdW+8AklwRC3HDPD8wv3H/eGxl3K0vHWTBbTb774nVNfRDw81wcezCXFNUn4p2he7fgKcxs/rnMsYUcY8JJNR7Iz+NNIGUCom6HFwCMQdangFMHUW5TxxrlJcwVRaAns1M6g3ilYO+uvN/XsgCpZWYWnv5rBk8qz6dBM7gpc8tSr6Hvr7/vlghF3jpL+mQiW+7vUL+UZrUFNyoacUcQ+NuxKacHtHQKuRDyWofp+CB2b2a744F3mpkxx74HIkiZ72mQ== dev@test.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDTX+pQvGrQVXmHGDhBP+632tgbb1j+BQWkrsUkDJGzwFiGs4dgqDs2eC+aDVq2LFz4xj0SgussFAKciB45OgmSZKX5yUE3Oo/lqov0Bb5f85iBHGv/X/JiuIYaq8GJklVyyo1sfKLUK1SOal6bE1WofezyTyDsdrHjIU50quzW7nB1CmL6rekIv/+df/seut4b3De1d2uX5WGGtcvQ5yTSgBW5aabMAJ2V9WlP/6Dw040Kq0MyKV01cIJ1HAjFhP58gbf3Eytz3AqqJVT6u0QroxhesCgKTyGcAyYy3airI/N0FHdC5oABVEJ6dKyy1rYvOchuxYeVMVVWn0vS7mZ+vP7dqaDmgEUU2qmTPBQZV2xBWCdpfyUYYARW2JzlEaySbmA+yoxFBsquunVbIgUGNEUbxefsFdM3k5pS6I1uuEM0ATYH5iNz84nKKCcksGlib0i/pEtra6N/mFF7yjHYBRb/E/VCZig0gKezDJWu/DO0emJA+kdQpqp48U+qFrSWkuiO0dCQYl3VCVo8vedgMGPjr8MbUjU7o8W1+DYyjFM8HYMknRNdVAqAoK+cedw9mAWVGpKFrl61caGTFck0634nAVFUmfGTh9XRaZeFdDnivxnqP837gcsdKnEGYnkrxWap97XeXzK0P0Svy1zBfUQyzU5vrHfHt2H7ILDMw== prodv1-usersync-sftp -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com \ No newline at end of file +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTpJ2l8nfOvhJ4Y3pjadFU69nfJBRuE0BaHE22LK9qflFWdhGW+T/x8Qy9406DFXCh6KED+q9lC+N4nR92AfgFNaBmkXZkzWLoXcqO1IWRexHwTqAUcrtLjpx5wNdCr3+vv9hWhXtvYg8ewnrZc+WxYde4EUmXbhzPXbg0SkBXTr6bpYhs6inyttfBeJNxbeydrW7cmhFiAdOkm03o3AXdH86PNlWVfVHy8OHHzf4fbvlJlOx7OeB+wOyQUr3DW+IWBLQFJk4uyagn/ECV9OIQpxoJFTQjcSrJ6v/GqlY5PImM6YxL8NlZu46CDIxagaZkum+iJ8dtPYr6tJuLiP5Ny0Gsl1X5DoKlstgyqqPNYTnZVS4GSS5Hyxm6HmodZ78OR5+vAoyWKZ3unXU5Dbkz0Qxq9VtrGo2xd0M+dDi/7YazRpLL0tc39w48Wl7KD3jFzoesZp1JHeEGLdGXlGCw8AM1FT0WDf28ShTRds6uWPGvMtM3XkVDPMLFwroKv1RCErmqLYod4HOMuwlmdRvtDGYb3NYsliOnHPiT9nhu2J6KmT1jj8uFOLyTaJCArtBqIsXscP3R4o0wBlQl3FniMdiK7ESkv8DUaOr1Co+/3wX9n/p/BW5bxuq1R9HpNyKsrALyNJUkquVT+5aPcNKXvmAeHAw/D0TYzy6ZKBpnDw== kyuleburton@Kyules-MacBook-Pro.local diff --git a/files/dashboard/maintenance-page/index.html b/files/dashboard/maintenance-page/index.html index a3e34479b..fac49e64e 100644 --- a/files/dashboard/maintenance-page/index.html +++ b/files/dashboard/maintenance-page/index.html @@ -16,7 +16,7 @@ @@ -27,12 +27,12 @@

This site is under maintenance...

Please check back later.

- + A shiba dog looking into the distance diff --git a/files/scripts/ecr-access-job-requirements.txt b/files/scripts/ecr-access-job-requirements.txt new file mode 100644 index 000000000..bb6d4b847 --- /dev/null +++ b/files/scripts/ecr-access-job-requirements.txt @@ -0,0 +1 @@ +boto3<2 diff --git a/files/scripts/ecr-access-job.md b/files/scripts/ecr-access-job.md new file mode 100644 index 000000000..9659b186b --- /dev/null +++ b/files/scripts/ecr-access-job.md @@ -0,0 +1,85 @@ +# ecr-access-job + +### How to run + +Configure `global.ecr-access-job-role-arn` to the ARN of the `EcrRepoPolicyUpdateRole` role (described below) in the `manifest.json` file. + +Run `gen3 kube-setup-ecr-access-cronjob` to set up the ECR access cronjob. + +### What does it do? + +The job runs the `ecr-access-job.py` script. + +This script updates the configuration of ECR repositories so that users can access the repositories that were created for them. + +It queries a DynamoDB table which has the following (simplified) structure: +| user_id | workspace_type | account_id | +| ------------------ | -------------------- | ---------- | +| user1@username.com | Direct Pay | 123456 | +| user2@username.com | Direct Pay | 789012 | +| user1@username.com | Other workspace type | | + +and then allows each AWS account to acccess the appropriate ECR repositories. The users' ECR repositories are based on their username as stored in the table. For example, `user1@username.com`'s ECR repository is assumed to be `nextflow-approved/user1-40username-2ecom`. + +### Access needed + +- "EcrRepoPolicyUpdateRole" role in the account (Acct1) that contains the ECR repositories: + +**Note:** `kube-setup-ecr-access-cronjob.sh` assumes this role already exists. + +Permissions: +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "UpdateEcrRepoPolicy", + "Effect": "Allow", + "Action": "ecr:SetRepositoryPolicy", + "Resource": "arn:aws:ecr:us-east-1::repository/nextflow-approved/*" + } + ] +} +``` + +Trust policy (allows Acct2): +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowAssumingRole", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::root" + }, + "Action": "sts:AssumeRole" + } + ] +} +``` + +- Policy in the account (Acct2) that contains the DynamoDB table (created automatically by `kube-setup-ecr-access-job.sh`): +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ReadDynamoDB", + "Effect": "Allow", + "Action": [ + "dynamodb:Scan" + ], + "Resource": "arn:aws:dynamodb:::table/" + }, + { + "Sid": "AssumeEcrRole", + "Effect": "Allow", + "Action": [ + "sts:AssumeRole" + ], + "Resource": "arn:aws:iam:::role/" + } + ] +} +``` diff --git a/files/scripts/ecr-access-job.py b/files/scripts/ecr-access-job.py new file mode 100644 index 000000000..828d94c96 --- /dev/null +++ b/files/scripts/ecr-access-job.py @@ -0,0 +1,177 @@ +""" +See documentation at https://github.com/uc-cdis/cloud-automation/blob/master/files/scripts/ecr-access-job.md +""" + +from decimal import Decimal +import json +import os +from typing import List +import uuid + +import boto3 +from boto3.dynamodb.conditions import Attr + + +REGION = "us-east-1" + +# for local testing. in production, use a service account instead of a key. +MAIN_ACCOUNT_CREDS = {"key_id": os.environ.get("KEY_ID"), "key_secret": os.environ.get("KEY_SECRET")} + + +def escapism(string: str) -> str: + """ + This is a direct translation of Hatchery's `escapism` golang function to python. + We need to escape the username in the same way it's escaped by Hatchery's `escapism` function because + special chars cannot be used in an ECR repo name, and so that the ECR repo generated here matches the + name expected by Hatchery. + """ + safeBytes = "abcdefghijklmnopqrstuvwxyz0123456789" + escaped = "" + for v in string: + if v not in safeBytes: + hexCode = "{0:02x}".format(ord(v)) + escaped += "-" + hexCode + else: + escaped += v + return escaped + + +def get_configs() -> (str, str): + table_name = os.environ.get("PAY_MODELS_DYNAMODB_TABLE") + if not table_name: + raise Exception("Missing 'PAY_MODELS_DYNAMODB_TABLE' environment variable") + + ecr_role_arn = os.environ.get("ECR_ACCESS_JOB_ARN") + if not ecr_role_arn: + raise Exception("Missing 'ECR_ACCESS_JOB_ARN' environment variable") + + return table_name, ecr_role_arn + + +def query_usernames_and_account_ids(table_name: str) -> List[dict]: + """ + Returns: + List[dict]: [ { "user_id": "user1@username.com", "account_id": "123456" } ] + """ + if MAIN_ACCOUNT_CREDS["key_id"]: + session = boto3.Session( + aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"], + aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"], + ) + else: + session = boto3.Session() + dynamodb = session.resource("dynamodb", region_name=REGION) + table = dynamodb.Table(table_name) + + # get usernames and AWS account IDs from DynamoDB + queried_keys = ["user_id", "account_id"] + filter_expr = Attr("workspace_type").eq("Direct Pay") + proj = ", ".join("#" + key for key in queried_keys) + expr = {"#" + key: key for key in queried_keys} + response = table.scan( + FilterExpression=filter_expr, + ProjectionExpression=proj, + ExpressionAttributeNames=expr, + ) + assert response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200, response + items = response["Items"] + # if the response is paginated, get the rest of the items + while response["Count"] > 0: + if "LastEvaluatedKey" not in response: + break + response = table.scan( + FilterExpression=filter_expr, + ProjectionExpression=proj, + ExpressionAttributeNames=expr, + ExclusiveStartKey=response["LastEvaluatedKey"], + ) + assert ( + response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200 + ), response + items.extend(response["Items"]) + + return items + + +def update_access_in_ecr(repo_to_account_ids: List[dict], ecr_role_arn: str) -> None: + # get access to ECR in the account that contains the ECR repos + if MAIN_ACCOUNT_CREDS["key_id"]: + sts = boto3.client( + "sts", + aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"], + aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"], + ) + else: + sts = boto3.client("sts") + assumed_role = sts.assume_role( + RoleArn=ecr_role_arn, + DurationSeconds=900, # minimum time for aws assume role as per boto docs + RoleSessionName=f"ecr-access-assume-role-{str(uuid.uuid4())[:8]}", + ) + assert "Credentials" in assumed_role, "Unable to assume role" + ecr = boto3.client( + "ecr", + aws_access_key_id=assumed_role["Credentials"]["AccessKeyId"], + aws_secret_access_key=assumed_role["Credentials"]["SecretAccessKey"], + aws_session_token=assumed_role["Credentials"]["SessionToken"], + ) + + # for each ECR repo, whitelist the account IDs so users can access the repo + for repo, account_ids in repo_to_account_ids.items(): + print(f"Allowing AWS accounts {account_ids} to use ECR repository '{repo}'") + policy = { + "Version": "2008-10-17", + "Statement": [ + { + "Sid": "AllowCrossAccountPull", + "Effect": "Allow", + "Principal": { + "AWS": [ + f"arn:aws:iam::{account_id}:root" + for account_id in account_ids + ] + }, + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + ], + } + ], + } + # Note that this is overwriting the repo policy, not appending to it. This means we can't have 2 dynamodb + # tables pointing at the same set of ECR repos: the repos would only allow the accounts in the table for + # which the script was run most recently. eg QA and Staging can't use the same ECR repos. + # Appending is not possible since this code will eventually rely on Arborist for authorization information + # and we'll need to overwrite in order to remove expired access. + try: + ecr.set_repository_policy( + repositoryName=repo, + policyText=json.dumps(policy), + ) + except Exception as e: + print(f" Unable to update '{repo}'; skipping it: {e}") + + +def main() -> None: + table_name, ecr_role_arn = get_configs() + items = query_usernames_and_account_ids(table_name) + + # construct mapping: { ECR repo url: [ AWS account IDs with access ] } + ecr_repo_prefix = "nextflow-approved" + repo_to_account_ids = { + f"{ecr_repo_prefix}/{escapism(e['user_id'])}": [e["account_id"]] + for e in items + if "account_id" in e + } + print( + "Mapping of ECR repository to allowed AWS accounts:\n", + json.dumps(repo_to_account_ids, indent=2), + ) + + update_access_in_ecr(repo_to_account_ids, ecr_role_arn) + + +if __name__ == "__main__": + main() diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 1235c6f58..e0c4b3c46 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -1,15 +1,19 @@ import argparse +import copy +import json import sys import requests import pydash +from uuid import UUID # Defines how a field in metadata is going to be mapped into a key in filters FILTER_FIELD_MAPPINGS = { - "Study Type.study_stage": "Study Type", - "Data.data_type": "Data Type", - "Study Type.study_subject_type": "Subject Type", - "Human Subject Applicability.gender_applicability": "Gender", - "Human Subject Applicability.age_applicability": "Age" + "study_metadata.study_type.study_stage": "Study Type", + "study_metadata.data.data_type": "Data Type", + "study_metadata.study_type.study_subject_type": "Subject Type", + "study_metadata.human_subject_applicability.gender_applicability": "Gender", + "study_metadata.human_subject_applicability.age_applicability": "Age", + "research_program": "Research Program", } # Defines how to handle special cases for values in filters @@ -21,19 +25,54 @@ "Questionnaire/Survey/Assessment - unvalidated instrument": "Questionnaire/Survey/Assessment", "Cis Male": "Male", "Cis Female": "Female", - "Trans Male": "Female-to-male transsexual", - "Trans Female": "Male-to-female transsexual", - "Agender, Non-binary, gender non-conforming": "Other", - "Gender Queer": "Other", - "Intersex": "Intersexed", - "Buisness Development": "Business Development" + "Trans Male": "Transgender man/trans man/female-to-male (FTM)", + "Female-to-male transsexual": "Transgender man/trans man/female-to-male (FTM)", + "Trans Female": "Transgender woman/trans woman/male-to-female (MTF)", + "Male-to-female transsexual": "Transgender woman/trans woman/male-to-female (MTF)", + "Agender, Non-binary, gender non-conforming": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Gender Queer": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Intersex": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Intersexed": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Buisness Development": "Business Development", } # Defines field that we don't want to include in the filters OMITTED_VALUES_MAPPING = { - "Human Subject Applicability.gender_applicability": "Not applicable" + "study_metadata.human_subject_applicability.gender_applicability": "Not applicable" } +# repository links +REPOSITORY_STUDY_ID_LINK_TEMPLATE = { + "NIDDK Central": "https://repository.niddk.nih.gov/studies//", + "NIDA Data Share": "https://datashare.nida.nih.gov/study/", + "NICHD DASH": "https://dash.nichd.nih.gov/study/", + "ICPSR": "https://www.icpsr.umich.edu/web/ICPSR/studies/", + "BioSystics-AP": "https://biosystics-ap.com/assays/assaystudy//", +} + + +def is_valid_uuid(uuid_to_test, version=4): + """ + Check if uuid_to_test is a valid UUID. + + Parameters + ---------- + uuid_to_test : str + version : {1, 2, 3, 4} + + Returns + ------- + `True` if uuid_to_test is a valid UUID, otherwise `False`. + + """ + + try: + uuid_obj = UUID(uuid_to_test, version=version) + except ValueError: + return False + return str(uuid_obj) == uuid_to_test + + def update_filter_metadata(metadata_to_update): filter_metadata = [] for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items(): @@ -45,19 +84,91 @@ def update_filter_metadata(metadata_to_update): print(filter_field_values) raise TypeError("Neither a string nor a list") for filter_field_value in filter_field_values: - if (metadata_field_key, filter_field_value) in OMITTED_VALUES_MAPPING.items(): + if ( + metadata_field_key, + filter_field_value, + ) in OMITTED_VALUES_MAPPING.items(): continue if filter_field_value in SPECIAL_VALUE_MAPPINGS: filter_field_value = SPECIAL_VALUE_MAPPINGS[filter_field_value] - filter_metadata.append({"key": filter_field_key, "value": filter_field_value}) + filter_metadata.append( + {"key": filter_field_key, "value": filter_field_value} + ) filter_metadata = pydash.uniq(filter_metadata) metadata_to_update["advSearchFilters"] = filter_metadata + # Retain these from existing tags + save_tags = ["Data Repository"] + tags = [tag for tag in metadata_to_update["tags"] if tag["category"] in save_tags] + # Add any new tags from advSearchFilters + for f in metadata_to_update["advSearchFilters"]: + if f["key"] == "Gender": + continue + tag = {"name": f["value"], "category": f["key"]} + if tag not in tags: + tags.append(tag) + metadata_to_update["tags"] = tags return metadata_to_update + +def get_client_token(client_id: str, client_secret: str): + try: + token_url = f"http://revproxy-service/user/oauth2/token" + headers = {"Content-Type": "application/x-www-form-urlencoded"} + params = {"grant_type": "client_credentials"} + data = "scope=openid user data" + + token_result = requests.post( + token_url, + params=params, + headers=headers, + data=data, + auth=(client_id, client_secret), + ) + token = token_result.json()["access_token"] + except: + raise Exception("Could not get token") + return token + + +def get_related_studies(serial_num, guid, hostname): + related_study_result = [] + + if serial_num: + mds = requests.get( + f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000" + ) + if mds.status_code == 200: + related_study_metadata = mds.json() + + for ( + related_study_metadata_key, + related_study_metadata_value, + ) in related_study_metadata.items(): + if related_study_metadata_key == guid or ( + related_study_metadata_value["_guid_type"] != "discovery_metadata" + and related_study_metadata_value["_guid_type"] + != "unregistered_discovery_metadata" + ): + # do nothing for self, or for archived studies + continue + title = ( + related_study_metadata_value.get("gen3_discovery", {}) + .get("study_metadata", {}) + .get("minimal_info", {}) + .get("study_name", "") + ) + link = ( + f"https://{hostname}/portal/discovery/{related_study_metadata_key}/" + ) + related_study_result.append({"title": title, "link": link}) + return related_study_result + + parser = argparse.ArgumentParser() parser.add_argument("--directory", help="CEDAR Directory ID for registering ") -parser.add_argument("--access_token", help="User access token") +parser.add_argument("--cedar_client_id", help="The CEDAR client id") +parser.add_argument("--cedar_client_secret", help="The CEDAR client secret") parser.add_argument("--hostname", help="Hostname") @@ -66,95 +177,220 @@ def update_filter_metadata(metadata_to_update): if not args.directory: print("Directory ID is required!") sys.exit(1) -if not args.access_token: - print("User access token is required!") +if not args.cedar_client_id: + print("CEDAR client id is required!") + sys.exit(1) +if not args.cedar_client_secret: + print("CEDAR client secret is required!") sys.exit(1) if not args.hostname: print("Hostname is required!") sys.exit(1) dir_id = args.directory -access_token = args.access_token +client_id = args.cedar_client_id +client_secret = args.cedar_client_secret hostname = args.hostname -token_header = {"Authorization": 'bearer ' + access_token} +print("Getting CEDAR client access token") +access_token = get_client_token(client_id, client_secret) +token_header = {"Authorization": "bearer " + access_token} limit = 10 offset = 0 -# initalize this to be bigger than our inital call so we can go through while loop +# initialize this to be bigger than our initial call so we can go through while loop total = 100 -while((limit + offset <= total)): +if not is_valid_uuid(dir_id): + print("Directory ID is not in UUID format!") + sys.exit(1) + +while limit + offset <= total: # Get the metadata from cedar to register print("Querying CEDAR...") - cedar = requests.get(f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", headers=token_header) + cedar = requests.get( + f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", + headers=token_header, + ) # If we get metadata back now register with MDS if cedar.status_code == 200: metadata_return = cedar.json() if "metadata" not in metadata_return: - print("Got 200 from CEDAR wrapper but no metadata in body, something is not right!") + print( + "Got 200 from CEDAR wrapper but no metadata in body, something is not right!" + ) sys.exit(1) total = metadata_return["metadata"]["totalCount"] returned_records = len(metadata_return["metadata"]["records"]) print(f"Successfully got {returned_records} record(s) from CEDAR directory") for cedar_record in metadata_return["metadata"]["records"]: - if "appl_id" not in cedar_record: + # get the appl id from cedar for querying in our MDS + cedar_appl_id = pydash.get( + cedar_record, "metadata_location.nih_application_id" + ) + if cedar_appl_id is None: print("This record doesn't have appl_id, skipping...") continue - # get the appl id from cedar for querying in our MDS - cedar_appl_id = str(cedar_record["appl_id"]) - # Get the metadata record for the nih_application_id - mds = requests.get(f"http://revproxy-service/mds/metadata?gen3_discovery.appl_id={cedar_appl_id}&data=true") + mds = requests.get( + f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.nih_application_id={cedar_appl_id}&data=true" + ) if mds.status_code == 200: mds_res = mds.json() # the query result key is the record of the metadata. If it doesn't return anything then our query failed. if len(list(mds_res.keys())) == 0 or len(list(mds_res.keys())) > 1: - print("Query returned nothing for ", cedar_appl_id, "appl id") + print("Query returned nothing for", cedar_appl_id, "appl id") continue # get the key for our mds record - cedar_record_id = list(mds_res.keys())[0] + mds_record_guid = list(mds_res.keys())[0] - mds_res = mds_res[cedar_record_id] - mds_cedar_register_data_body = {} + mds_res = mds_res[mds_record_guid] + mds_cedar_register_data_body = {**mds_res} mds_discovery_data_body = {} + mds_clinical_trials = {} if mds_res["_guid_type"] == "discovery_metadata": print("Metadata is already registered. Updating MDS record") elif mds_res["_guid_type"] == "unregistered_discovery_metadata": - print("Metadata is has not been registered. Registering it in MDS record") + print( + "Metadata has not been registered. Registering it in MDS record" + ) + else: + print( + f"This metadata data record has a special GUID type \"{mds_res['_guid_type']}\" and will be skipped" + ) continue - pydash.merge(mds_discovery_data_body, mds_res["gen3_discovery"], cedar_record) - mds_discovery_data_body = update_filter_metadata(mds_discovery_data_body) + if "clinicaltrials_gov" in cedar_record: + mds_clinical_trials = cedar_record["clinicaltrials_gov"] + del cedar_record["clinicaltrials_gov"] + + # some special handing for this field, because its parent will be deleted before we merging the CEDAR and MDS SLMD to avoid duplicated values + cedar_record_other_study_websites = cedar_record.get( + "metadata_location", {} + ).get("other_study_websites", []) + del cedar_record["metadata_location"] + + mds_res["gen3_discovery"]["study_metadata"].update(cedar_record) + mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][ + "other_study_websites" + ] = cedar_record_other_study_websites + + # setup citations + doi_citation = mds_res["gen3_discovery"]["study_metadata"].get( + "doi_citation", "" + ) + mds_res["gen3_discovery"]["study_metadata"]["citation"][ + "heal_platform_citation" + ] = doi_citation + + # setup repository_study_link + data_repositories = ( + mds_res.get("gen3_discovery", {}) + .get("study_metadata", {}) + .get("metadata_location", {}) + .get("data_repositories", []) + ) + repository_citation = "Users must also include a citation to the data as specified by the local repository." + repository_citation_additional_text = ' The link to the study page at the local repository can be found in the "Data" tab.' + for repository in data_repositories: + if ( + repository["repository_name"] + and repository["repository_name"] + in REPOSITORY_STUDY_ID_LINK_TEMPLATE + and repository["repository_study_ID"] + ): + repository_study_link = REPOSITORY_STUDY_ID_LINK_TEMPLATE[ + repository["repository_name"] + ].replace("", repository["repository_study_ID"]) + repository.update( + {"repository_study_link": repository_study_link} + ) + if ( + repository_citation_additional_text + not in repository_citation + ): + repository_citation += repository_citation_additional_text + if len(data_repositories): + data_repositories[0] = { + **data_repositories[0], + "repository_citation": repository_citation, + } + + mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][ + "data_repositories" + ] = copy.deepcopy(data_repositories) + + # set up related studies + serial_num = None + try: + serial_num = ( + mds_res.get("nih_reporter", {}) + .get("project_num_split", {}) + .get("serial_num", None) + ) + except Exception: + print("Unable to get serial number for study") + + if serial_num is None: + print("Unable to get serial number for study") + + related_study_result = get_related_studies( + serial_num, mds_record_guid, hostname + ) + mds_res["gen3_discovery"]["related_studies"] = copy.deepcopy(related_study_result) + + # merge data from cedar that is not study level metadata into a level higher + deleted_keys = [] + for key, value in mds_res["gen3_discovery"]["study_metadata"].items(): + if not isinstance(value, dict): + mds_res["gen3_discovery"][key] = value + deleted_keys.append(key) + for key in deleted_keys: + del mds_res["gen3_discovery"]["study_metadata"][key] + + mds_discovery_data_body = update_filter_metadata( + mds_res["gen3_discovery"] + ) + mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body + if mds_clinical_trials: + mds_cedar_register_data_body["clinicaltrials_gov"] = { + **mds_cedar_register_data_body.get("clinicaltrials_gov", {}), + **mds_clinical_trials, + } + mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" - print("Metadata is now being registered.") - mds_put = requests.put(f"http://revproxy-service/mds/metadata/{cedar_record_id}", + print(f"Metadata {mds_record_guid} is now being registered.") + mds_put = requests.put( + f"http://revproxy-service/mds/metadata/{mds_record_guid}", headers=token_header, - json = mds_cedar_register_data_body + json=mds_cedar_register_data_body, ) if mds_put.status_code == 200: - print(f"Successfully registered: {cedar_record_id}") + print(f"Successfully registered: {mds_record_guid}") else: - print(f"Failed to register: {cedar_record_id}. Might not be MDS admin") + print( + f"Failed to register: {mds_record_guid}. Might not be MDS admin" + ) print(f"Status from MDS: {mds_put.status_code}") else: print(f"Failed to get information from MDS: {mds.status_code}") + else: + print( + f"Failed to get information from CEDAR wrapper service: {cedar.status_code}" + ) + if offset + limit == total: break offset = offset + limit if (offset + limit) > total: - limit = (offset + limit) - total - - -else: - print(f"Failed to get information from CEDAR wrapper service: {cedar.status_code}") + limit = total - offset diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index c36194765..6896314ab 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -7,14 +7,15 @@ achecker.ca apache.github.io api.epigraphdb.org api.monqcle.com +awslabs.github.io biodata-integration-tests.net marketing.biorender.com clinicaltrials.gov +charts.bitnami.com ctds-planx.atlassian.net data.cityofchicago.org dataguids.org api.login.yahoo.com -api.snapcraft.io apt.kubernetes.io argoproj.github.io archive.cloudera.com @@ -33,6 +34,7 @@ cernvm.cern.ch charts.bitnami.com charts.helm.sh cloud.r-project.org +coredns.github.io coreos.com covidstoplight.org cpan.mirrors.tds.net @@ -69,11 +71,14 @@ ftp.ussg.iu.edu fmwww.bc.edu gcr.io get.helm.sh +ghcr.io git.io go.googlesource.com golang.org gopkg.in grafana.com +grafana.github.io +helm.elastic.co http.us.debian.org ifconfig.io ingress.coralogix.us @@ -123,6 +128,7 @@ orcid.org pgp.mit.edu ppa.launchpad.net prometheus-community.github.io +proxy.golang.org public.ecr.aws pubmirrors.dal.corespace.com reflector.westga.edu @@ -138,8 +144,10 @@ repo.dimenoc.com repos.mia.quadranet.com repos.redrockhost.com repos.sensuapp.org +repo.vmware.com repository.cloudera.com resource.metadatacenter.org +rmq.n3c.ncats.io rules.emergingthreats.net rweb.quant.ku.edu sa-update.dnswl.org @@ -158,3 +166,5 @@ www.rabbitmq.com www.uniprot.org vpodc.org yahoo.com +idp.stage.qdr.org +stage.qdr.org \ No newline at end of file diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index a8c765814..b71ee76c2 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -21,6 +21,7 @@ .centos.org .ceph.com .chef.io +.chordshealth.org .clamav.net .cloud.google.com .cloudfront.net @@ -31,6 +32,7 @@ .data-commons.org .datadoghq.com .datastage.io +.ddog-gov.com .diseasedatahub.org .docker.com .docker.io @@ -38,6 +40,7 @@ .dph.illinois.gov .elasticsearch.org .erlang-solutions.com +.external-secrets.io .extjs.com .fedoraproject.org .gen3.org @@ -94,9 +97,12 @@ .sks-keyservers.net .slack.com .slack-msgs.com +.snapcraft.io +.snapcraftcontent.com .sourceforge.net .southsideweekly.com .theanvil.io +.tigera.io .twistlock.com .ubuntu.com .ucsc.edu diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 05607f304..812a9f738 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -8,6 +8,9 @@ DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release) WORK_USER="ubuntu" if [[ $DISTRO == "Amazon Linux" ]]; then WORK_USER="ec2-user" + if [[ $(awk -F '[="]*' '/^VERSION_ID/ { print $2 }' < /etc/os-release) == "2023" ]]; then + DISTRO="al2023" + fi fi HOME_FOLDER="/home/${WORK_USER}" SUB_FOLDER="${HOME_FOLDER}/cloud-automation" @@ -60,6 +63,8 @@ fi function install_basics(){ if [[ $DISTRO == "Ubuntu" ]]; then apt -y install atop + elif [[ $DISTRO == "al2023" ]]; then + sudo dnf install cronie nc -y fi } @@ -69,10 +74,18 @@ function install_docker(){ # Docker ############################################################### # Install docker from sources - curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add - - add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable" - apt update - apt install -y docker-ce + if [[ $DISTRO == "Ubuntu" ]]; then + curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add - + add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable" + apt update + apt install -y docker-ce + else + sudo yum update -y + sudo yum install -y docker + # Start and enable Docker service + sudo systemctl start docker + sudo systemctl enable docker + fi mkdir -p /etc/docker cp ${SUB_FOLDER}/flavors/squid_auto/startup_configs/docker-daemon.json /etc/docker/daemon.json chmod -R 0644 /etc/docker @@ -201,8 +214,10 @@ function install_awslogs { if [[ $DISTRO == "Ubuntu" ]]; then wget ${AWSLOGS_DOWNLOAD_URL} -O amazon-cloudwatch-agent.deb dpkg -i -E ./amazon-cloudwatch-agent.deb - else + elif [[ $DISTRO == "Amazon Linux" ]]; then sudo yum install amazon-cloudwatch-agent nc -y + elif [[ $DISTRO == "al2023" ]]; then + sudo dnf install amazon-cloudwatch-agent -y fi # Configure the AWS logs @@ -292,6 +307,19 @@ function main(){ --volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \ --volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \ quay.io/cdis/squid:${SQUID_IMAGE_TAG} + + max_attempts=10 + attempt_counter=0 + while [ $attempt_counter -lt $max_attempts ]; do + #((attempt_counter++)) + sleep 10 + if [[ -z "$(sudo lsof -i:3128)" ]]; then + echo "Squid not healthy, restarting." + docker restart squid + else + echo "Squid healthy" + fi + done } main diff --git a/flavors/squid_auto/startup_configs/squid.conf b/flavors/squid_auto/startup_configs/squid.conf index 653026200..b1e44810a 100644 --- a/flavors/squid_auto/startup_configs/squid.conf +++ b/flavors/squid_auto/startup_configs/squid.conf @@ -56,7 +56,6 @@ http_access deny all persistent_request_timeout 5 seconds -cache_dir ufs /var/cache/squid 100 16 256 pid_filename /var/run/squid/squid.pid # vi:syntax=squid.conf diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh index 476e7d003..dd19ea7a4 100644 --- a/gen3/bin/awsrole.sh +++ b/gen3/bin/awsrole.sh @@ -20,18 +20,22 @@ gen3_awsrole_help() { # NOTE: service-account to role is 1 to 1 # # @param serviceAccount to link to the role +# @param flag (optional) - specify a flag to use a different trust policy # function gen3_awsrole_ar_policy() { local serviceAccount="$1" shift || return 1 - if [[ ! -z $1 ]]; then - local namespace=$1 + if [[ -z $1 ]] || [[ $1 == -* ]]; then + namespace=$(gen3 db namespace) else - local namespace=$(gen3 db namespace) + namespace=$1 + shift fi local issuer_url local account_id local vpc_name + local flag=$flag + vpc_name="$(gen3 api environment)" || return 1 issuer_url="$(aws eks describe-cluster \ --name ${vpc_name} \ @@ -42,7 +46,42 @@ function gen3_awsrole_ar_policy() { local provider_arn="arn:aws:iam::${account_id}:oidc-provider/${issuer_url}" - cat - < config.tfvars @@ -182,10 +226,14 @@ gen3_awsrole_create() { gen3_log_err "use: gen3 awsrole create roleName saName" return 1 fi - if [[ ! -z $1 ]]; then - local namespace=$1 + if [[ -z $1 ]] || [[ $1 == -* ]]; then + namespace=$(gen3 db namespace) else - local namespace=$(gen3 db namespace) + namespace=$1 + shift + fi + if [[ ! -z $1 ]]; then + flag=$1 fi # do simple validation of name local regexp="^[a-z][a-z0-9\-]*$" @@ -200,6 +248,7 @@ EOF return 1 fi + # check if the name is already used by another entity local entity_type entity_type=$(_get_entity_type $rolename) @@ -216,9 +265,11 @@ EOF fi TF_IN_AUTOMATION="true" - if ! _tfplan_role $rolename $saName $namespace; then + + if ! _tfplan_role $rolename $saName $namespace $flag; then return 1 fi + if ! _tfapply_role $rolename; then return 1 fi @@ -367,4 +418,4 @@ gen3_awsrole() { # Let testsuite source file if [[ -z "$GEN3_SOURCE_ONLY" ]]; then gen3_awsrole "$@" -fi +fi \ No newline at end of file diff --git a/gen3/bin/create-es7-cluster.sh b/gen3/bin/create-es7-cluster.sh new file mode 100644 index 000000000..553dc2652 --- /dev/null +++ b/gen3/bin/create-es7-cluster.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +# Save the new and old cluster names to vars +environment=`gen3 api environment` +existing_cluster_name="$environment-gen3-metadata" +new_cluster_name="$environment-gen3-metadata-2" + +# Gather existing cluster information +cluster_info=$(aws es describe-elasticsearch-domain --domain-name "$existing_cluster_name") + +# Extract relevant information from the existing cluster +instance_type=`echo "$cluster_info" | jq -r '.DomainStatus.ElasticsearchClusterConfig.InstanceType'` +instance_count=`echo "$cluster_info" | jq -r '.DomainStatus.ElasticsearchClusterConfig.InstanceCount'` +volume_type=`echo "$cluster_info" | jq -r '.DomainStatus.EBSOptions.VolumeType'` +volume_size=`echo "$cluster_info" | jq -r '.DomainStatus.EBSOptions.VolumeSize'` +vpc_name=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.VPCId'` +subnet_ids=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.SubnetIds[]'` +security_groups=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.SecurityGroupIds[]'` +access_policies=`echo "$cluster_info" | jq -r '.DomainStatus.AccessPolicies'` +kms_key_id=`echo "$cluster_info" | jq -r '.DomainStatus.EncryptionAtRestOptions.KmsKeyId'` + +# Check if the new Elasticsearch cluster name already exists +new_cluster=`aws es describe-elasticsearch-domain --domain-name "$new_cluster_name"` + +if [ -n "$new_cluster" ]; then + echo "Cluster $new_cluster_name already exists" +else + echo "Cluster does not exist- creating..." + # Create the new Elasticsearch cluster + aws es create-elasticsearch-domain \ + --domain-name "$new_cluster_name" \ + --elasticsearch-version "7.10" \ + --elasticsearch-cluster-config \ + "InstanceType=$instance_type,InstanceCount=$instance_count" \ + --ebs-options \ + "EBSEnabled=true,VolumeType=$volume_type,VolumeSize=$volume_size" \ + --vpc-options "SubnetIds=${subnet_ids[*]},SecurityGroupIds=${security_groups[*]}" \ + --access-policies "$access_policies" \ + --encryption-at-rest-options "Enabled=true,KmsKeyId=$kms_key_id"\ + --node-to-node-encryption-options "Enabled=true" + > /dev/null 2>&1 + + # Wait for the new cluster to be available + sleep_duration=60 + max_retries=10 + retry_count=0 + + while [ $retry_count -lt $max_retries ]; do + cluster_status=$(aws es describe-elasticsearch-domain --domain-name "$new_cluster_name" | jq -r '.DomainStatus.Processing') + if [ "$cluster_status" != "true" ]; then + echo "New cluster is available." + break + fi + sleep $sleep_duration + ((retry_count++)) + done + + if [ $retry_count -eq $max_retries ]; then + echo "New cluster creation may still be in progress. Please check the AWS Management Console for the status." + fi +fi diff --git a/gen3/bin/dbbackup.sh b/gen3/bin/dbbackup.sh new file mode 100644 index 000000000..eb9611a90 --- /dev/null +++ b/gen3/bin/dbbackup.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +#################################################################################################### +# Script: dbdump.sh +# +# Description: +# This script facilitates the management of database backups within the gen3 environment. It is +# equipped to establish policies, service accounts, roles, and S3 buckets. Depending on the +# command provided, it will either initiate a database dump or perform a restore. +# +# Usage: +# gen3 dbbackup [dump|restore] +# +# dump - Initiates a database dump, creating the essential AWS resources if they are absent. +# The dump operation is intended to be executed from the namespace/commons that requires +# the backup. +# restore - Initiates a database restore, creating the essential AWS resources if they are absent. +# The restore operation is meant to be executed in the target namespace, where the backup +# needs to be restored. +# +# Notes: +# This script extensively utilizes the AWS CLI and the gen3 CLI. Proper functioning demands a +# configured gen3 environment and the availability of the necessary CLI tools. +# +#################################################################################################### + +# Exit on error +#set -e + +# Print commands before executing +#set -x + +#trap 'echo "Error at Line $LINENO"' ERR + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +policy_name="bucket_reader_writer_gen3_db_backup" +account_id=$(aws sts get-caller-identity --query "Account" --output text) +vpc_name="$(gen3 api environment)" +namespace="$(gen3 db namespace)" +sa_name="dbbackup-sa" +bucket_name="gen3-db-backups-${account_id}" + +gen3_log_info "policy_name: $policy_name" +gen3_log_info "account_id: $account_id" +gen3_log_info "vpc_name: $vpc_name" +gen3_log_info "namespace: $namespace" +gen3_log_info "sa_name: $sa_name" +gen3_log_info "bucket_name: $bucket_name" + + +# Create an S3 access policy if it doesn't exist +create_policy() { + # Check if policy exists + if ! aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep -q "arn:aws:iam"; then + # Create the S3 access policy - policy document + access_policy=$(cat <<-EOM +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:CreateBucket", + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::gen3-db-backups-*" + ] + } + ] +} +EOM + ) + + # Create the S3 access policy from the policy document + policy_arn=$(aws iam create-policy --policy-name "$policy_name" --policy-document "$access_policy" --query "Policy.Arn" --output text) + gen3_log_info "policy_arn: $policy_arn" + else + gen3_log_info "Policy $policy_name already exists, skipping policy creation." + policy_arn=$(aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep "arn:aws:iam" | head -n 1) + gen3_log_info "policy_arn: $policy_arn" + fi +} + + +# Create or update the Service Account and its corresponding IAM Role +create_service_account_and_role() { + cluster_arn=$(kubectl config current-context) + eks_cluster=$(echo "$cluster_arn" | awk -F'/' '{print $2}') + oidc_url=$(aws eks describe-cluster --name $eks_cluster --query 'cluster.identity.oidc.issuer' --output text | sed -e 's/^https:\/\///') + role_name="${vpc_name}-${namespace}-${sa_name}-role" + role_arn="arn:aws:iam::${account_id}:role/${role_name}" + local trust_policy=$(mktemp -p "$XDG_RUNTIME_DIR" "tmp_policy.XXXXXX") + gen3_log_info "trust_policy: $trust_policy" + gen3_log_info "eks_cluster: $eks_cluster" + gen3_log_info "oidc_url: $oidc_url" + gen3_log_info "role_name: $role_name" + + + cat > ${trust_policy} <&1; then + gen3_log_info "Updating existing role: $role_name" + aws iam update-assume-role-policy --role-name $role_name --policy-document "file://$trust_policy" + else + gen3_log_info "Creating new role: $role_name" + aws iam create-role --role-name $role_name --assume-role-policy-document "file://$trust_policy" + fi + + # Attach the policy to the IAM role + aws iam attach-role-policy --role-name $role_name --policy-arn $policy_arn + + # Create the Kubernetes service account if it doesn't exist + if ! kubectl get serviceaccount -n $namespace $sa_name 2>&1; then + kubectl create serviceaccount -n $namespace $sa_name + fi + # Annotate the KSA with the IAM role ARN + gen3_log_info "Annotating Service Account with IAM role ARN" + kubectl annotate serviceaccount -n ${namespace} ${sa_name} eks.amazonaws.com/role-arn=${role_arn} --overwrite + +} + +# Create an S3 bucket if it doesn't exist +create_s3_bucket() { + # Check if bucket already exists + if aws s3 ls "s3://$bucket_name" 2>&1 | grep -q 'NoSuchBucket'; then + gen3_log_info "Bucket does not exist, creating..." + aws s3 mb "s3://$bucket_name" + else + gen3_log_info "Bucket $bucket_name already exists, skipping bucket creation." + fi +} + + +# Function to trigger the database backup job +db_dump() { + gen3 job run psql-db-prep-dump +} + + +# Function to trigger the database backup restore job +db_restore() { + gen3 job run psql-db-prep-restore +} + +va_testing_db_dump() { + gen3 job run psql-db-dump-va-testing +} + + +# main function to determine whether dump or restore +main() { + case "$1" in + dump) + gen3_log_info "Triggering database dump..." + create_policy + create_service_account_and_role + create_s3_bucket + db_dump + ;; + restore) + gen3_log_info "Triggering database restore..." + create_policy + create_service_account_and_role + create_s3_bucket + db_restore + ;; + va-dump) + gen3_log_info "Running a va-testing DB dump..." + create_policy + create_service_account_and_role + create_s3_bucket + va_testing_db_dump + ;; + *) + echo "Invalid command. Usage: gen3 dbbackup [dump|restore|va-dump]" + return 1 + ;; + esac +} + +main "$1" diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 930202a87..36af791ef 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -32,6 +32,8 @@ accountList=( 205252583234 885078588865 922467707295 +533267425233 +048463324059 ) principalStr="" diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh index 48ba6512c..bc0358499 100644 --- a/gen3/bin/gitops.sh +++ b/gen3/bin/gitops.sh @@ -291,9 +291,15 @@ gen3_gitops_sync() { if g3kubectl get configmap manifest-versions; then oldJson=$(g3kubectl get configmap manifest-versions -o=json | jq ".data") fi - newJson=$(g3k_config_lookup ".versions") echo "old JSON is: $oldJson" - echo "new JSON is: $newJson" + newJson=$(g3k_config_lookup ".versions") + # Make sure the script exits if newJSON contains invalid JSON + if [ $? -ne 0 ]; then + echo "Error: g3k_config_lookup command failed- invalid JSON" + exit 1 + else + echo "new JSON is: $newJson" + fi if [[ -z $newJson ]]; then echo "Manifest does not have versions section. Unable to get new versions, skipping version update." elif [[ -z $oldJson ]]; then @@ -439,8 +445,13 @@ gen3_gitops_sync() { echo "DRYRUN flag detected, not rolling" gen3_log_info "dict_roll: $dict_roll; versions_roll: $versions_roll; portal_roll: $portal_roll; etl_roll: $etl_roll; fence_roll: $fence_roll" else - if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("fence_roll" = true) ]]; then + if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("$fence_roll" = true) ]]; then echo "changes detected, rolling" + tmpHostname=$(gen3 api hostname) + if [[ $slack = true ]]; then + curl -X POST --data-urlencode "payload={\"text\": \"Gitops-sync Cron: Changes detected on ${tmpHostname} - rolling...\"}" "${slackWebHook}" + fi + # run etl job before roll all so guppy can pick up changes if [[ "$etl_roll" = true ]]; then gen3 update_config etl-mapping "$(gen3 gitops folder)/etlMapping.yaml" @@ -466,7 +477,6 @@ gen3_gitops_sync() { rollRes=$? # send result to slack if [[ $slack = true ]]; then - tmpHostname=$(gen3 api hostname) resStr="SUCCESS" color="#1FFF00" if [[ $rollRes != 0 ]]; then diff --git a/gen3/bin/healthcheck.sh b/gen3/bin/healthcheck.sh index 149cb1aaa..b658ff033 100644 --- a/gen3/bin/healthcheck.sh +++ b/gen3/bin/healthcheck.sh @@ -137,6 +137,10 @@ gen3_healthcheck() { internetAccessExplicitProxy=false fi + gen3_log_info "Clearing Evicted pods" + sleep 5 + clear_evicted_pods + local healthJson=$(cat - < /dev/null; then + gen3 kube-setup-gen3-discovery-ai & +else + gen3_log_info "not deploying gen3-discovery-ai - no manifest entry for '.versions[\"gen3-discovery-ai\"]'" +fi + +if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then + gen3 kube-setup-ohdsi & +else + gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'" +fi + +if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then + gen3 kube-setup-cohort-middleware +else + gen3_log_info "not deploying cohort-middleware - no manifest entry for .versions[\"cohort-middleware\"]" +fi + gen3 kube-setup-revproxy if [[ "$GEN3_ROLL_FAST" != "true" ]]; then @@ -262,7 +280,7 @@ if [[ "$GEN3_ROLL_FAST" != "true" ]]; then else gen3 kube-setup-autoscaler & fi - gen3 kube-setup-kube-dns-autoscaler & + #gen3 kube-setup-kube-dns-autoscaler & gen3 kube-setup-metrics deploy || true gen3 kube-setup-tiller || true # @@ -334,18 +352,6 @@ else gen3_log_info "not deploying argo-wrapper - no manifest entry for '.versions[\"argo-wrapper\"]'" fi -if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then - gen3 roll cohort-middleware & -else - gen3_log_info "not deploying cohort-middleware - no manifest entry for '.versions[\"cohort-middleware\"]'" -fi - -if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then - gen3 kube-setup-ohdsi & -else - gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'" -fi - gen3_log_info "enable network policy" gen3 kube-setup-networkpolicy "enable" || true & diff --git a/gen3/bin/kube-setup-ambassador.sh b/gen3/bin/kube-setup-ambassador.sh index 0f4e0be28..5f92af5cc 100644 --- a/gen3/bin/kube-setup-ambassador.sh +++ b/gen3/bin/kube-setup-ambassador.sh @@ -25,7 +25,6 @@ deploy_api_gateway() { return 0 fi gen3 roll ambassador-gen3 - g3k_kv_filter "${GEN3_HOME}/kube/services/ambassador-gen3/ambassador-gen3-service-elb.yaml" GEN3_ARN "$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')" | g3kubectl apply -f - local luaYamlTemp="$(mktemp "$XDG_RUNTIME_DIR/lua.yaml.XXXXXX")" cat - > "$luaYamlTemp" < /dev/null 2>&1; then kubectl create namespace argo-events fi +# Check if target configmap exists +if ! kubectl get configmap environment -n argo-events > /dev/null 2>&1; then + + # Get value from source configmap + VALUE=$(kubectl get configmap global -n default -o jsonpath="{.data.environment}") + + # Create target configmap + kubectl create configmap environment -n argo-events --from-literal=environment=$VALUE + +fi + if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then if (! helm status argo -n argo-events > /dev/null 2>&1 ) || [[ "$force" == true ]]; then helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-events -n argo-events --version "2.1.3" + helm upgrade --install argo-events argo/argo-events -n argo-events --version "2.1.3" else gen3_log_info "argo-events Helm chart already installed. To force reinstall, run with --force" fi @@ -46,7 +57,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_na kubectl apply -f ${GEN3_HOME}/kube/services/argo-events/eventbus.yaml fi else - gen3_log_info "Not running in default namespace, will not install argo-events helm chart" + gen3_log_info "Not running in default namespace, will not install argo-events helm chart. This behavior can be overwritten with the --override-namespace flag" fi if [[ "$create_workflow_resources" == true ]]; then @@ -57,4 +68,5 @@ if [[ "$create_workflow_resources" == true ]]; then #Creating rolebindings to allow Argo Events to create jobs, and allow those jobs to manage Karpenter resources kubectl create rolebinding argo-events-job-admin-binding --role=job-admin --serviceaccount=argo-events:default --namespace=argo-events kubectl create clusterrolebinding karpenter-admin-binding --clusterrole=karpenter-admin --serviceaccount=argo-events:default + kubectl create clusterrolebinding argo-workflows-view-binding --clusterrole=argo-argo-workflows-view --serviceaccount=argo-events:default fi \ No newline at end of file diff --git a/gen3/bin/kube-setup-argo-wrapper.sh b/gen3/bin/kube-setup-argo-wrapper.sh index 5727a703e..9f7cc52ce 100644 --- a/gen3/bin/kube-setup-argo-wrapper.sh +++ b/gen3/bin/kube-setup-argo-wrapper.sh @@ -18,6 +18,26 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then gen3 roll argo-wrapper g3kubectl apply -f "${GEN3_HOME}/kube/services/argo-wrapper/argo-wrapper-service.yaml" + + + if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then + export ARGO_HOST=$(g3k_manifest_lookup .argo.argo_server_service_url) + else + export ARGO_HOST="http://argo-argo-workflows-server.argo.svc.cluster.local:2746" + fi + + if g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json 2> /dev/null; then + export ARGO_NAMESPACE=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + else + export ARGO_NAMESPACE="argo" + fi + + envsubst <"${GEN3_HOME}/kube/services/argo-wrapper/config.ini" > /tmp/config.ini + + g3kubectl delete configmap argo-wrapper-namespace-config + g3kubectl create configmap argo-wrapper-namespace-config --from-file /tmp/config.ini + + rm /tmp/config.ini gen3_log_info "the argo-wrapper service has been deployed onto the kubernetes cluster" -fi \ No newline at end of file +fi diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index c7243d3da..4c6c55eee 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -5,10 +5,25 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" gen3_load "gen3/lib/kube-setup-init" +override_namespace=false +force=false + +for arg in "${@}"; do + if [ "$arg" == "--override-namespace" ]; then + override_namespace=true + elif [ "$arg" == "--force" ]; then + force=true + else + #Print usage info and exit + gen3_log_info "Usage: gen3 kube-setup-argo [--override-namespace] [--force]" + exit 1 + fi +done ctx="$(g3kubectl config current-context)" ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" +argo_namespace=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) function setup_argo_buckets { local accountNumber @@ -28,14 +43,17 @@ function setup_argo_buckets { # try to come up with a unique but composable bucket name bucketName="gen3-argo-${accountNumber}-${environment//_/-}" - userName="gen3-argo-${environment//_/-}-user" - if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."s3-bucket"') ]]; then - if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then + nameSpace="$(gen3 db namespace)" + roleName="gen3-argo-${environment//_/-}-role" + bucketPolicy="argo-bucket-policy-${nameSpace}" + internalBucketPolicy="argo-internal-bucket-policy-${nameSpace}" + if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."downloadable-s3-bucket"') ]]; then + if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" - bucketName=$(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + bucketName=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) else gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" - bucketName=$(g3k_config_lookup '.argo."s3-bucket"') + bucketName=$(g3k_config_lookup '.argo."downloadable-s3-bucket"') fi fi if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"') ]]; then @@ -114,70 +132,41 @@ EOF ] } EOF - if ! secret="$(g3kubectl get secret argo-s3-creds -n argo 2> /dev/null)"; then - gen3_log_info "setting up bucket $bucketName" - - if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then - gen3_log_info "${bucketName} s3 bucket already exists" - # continue on ... - elif ! aws s3 mb "s3://${bucketName}"; then - gen3_log_err "failed to create bucket ${bucketName}" - fi - - gen3_log_info "Creating IAM user ${userName}" - if ! aws iam get-user --user-name ${userName} > /dev/null 2>&1; then - aws iam create-user --user-name ${userName} || true - else - gen3_log_info "IAM user ${userName} already exits.." - fi - - secret=$(aws iam create-access-key --user-name ${userName}) - if ! g3kubectl get namespace argo > /dev/null 2>&1; then - gen3_log_info "Creating argo namespace" - g3kubectl create namespace argo || true - g3kubectl label namespace argo app=argo || true - g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo || true - fi - else - # Else we want to recreate the argo-s3-creds secret so make a temp file with the current creds and delete argo-s3-creds secret - gen3_log_info "Argo S3 setup already completed" - local secretFile="$XDG_RUNTIME_DIR/temp_key_file_$$.json" - cat > "$secretFile" < /dev/null 2>&1; then + gen3_log_info "${bucketName} s3 bucket already exists" + # continue on ... + elif ! aws s3 mb "s3://${bucketName}"; then + gen3_log_err "failed to create bucket ${bucketName}" fi - - gen3_log_info "Creating s3 creds secret in argo namespace" - if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then - if [[ -z $internalBucketName ]]; then - g3kubectl delete secret -n argo argo-s3-creds || true - g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true - else - g3kubectl delete secret -n argo argo-s3-creds || true - g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} || true - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true - fi + if ! g3kubectl get namespace argo > /dev/null 2>&1; then + gen3_log_info "Creating argo namespace" + g3kubectl create namespace argo || true + g3kubectl label namespace argo app=argo || true + # Grant admin access within the argo namespace to the default SA in the argo namespace + g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n $argo_namespace || true + fi + gen3_log_info "Creating IAM role ${roleName}" + if aws iam get-role --role-name "${roleName}" > /dev/null 2>&1; then + gen3_log_info "IAM role ${roleName} already exists.." + roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) + gen3_log_info "Role annotate" + g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $argo_namespace + g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $nameSpace else - g3kubectl create sa argo || true - # Grant admin access within the current namespace to the argo SA in the current namespace - g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$(gen3 db namespace):argo -n $(gen3 db namespace) || true - aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile || true - if [[ -z $internalBucketName ]]; then - aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile || true - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true - else - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} || true - - fi + gen3 awsrole create $roleName argo $nameSpace -all_namespaces + roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) + g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n $argo_namespace fi + # Grant admin access within the current namespace to the argo SA in the current namespace + g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$nameSpace:argo -n $nameSpace || true + aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile || true + if [[ -z $internalBucketName ]]; then + aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile || true + fi ## if new bucket then do the following # Get the aws keys from secret @@ -189,9 +178,9 @@ EOF aws s3api put-bucket-lifecycle --bucket ${bucketName} --lifecycle-configuration file://$bucketLifecyclePolicyFile # Always update the policy, in case manifest buckets change - aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile + aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile if [[ ! -z $internalBucketPolicyFile ]]; then - aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile + aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile fi if [[ ! -z $(g3k_config_lookup '.indexd_admin_user' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo.indexd_admin_user') ]]; then if [[ ! -z $(g3k_config_lookup '.indexd_admin_user' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then @@ -203,39 +192,53 @@ EOF for serviceName in indexd; do secretName="${serviceName}-creds" # Only delete if secret is found to prevent early exits - if [[ ! -z $(g3kubectl get secrets -n argo | grep $secretName) ]]; then - g3kubectl delete secret "$secretName" -n argo > /dev/null 2>&1 + if [[ ! -z $(g3kubectl get secrets -n $argo_namespace | grep $secretName) ]]; then + g3kubectl delete secret "$secretName" -n $argo_namespace > /dev/null 2>&1 fi done sleep 1 # I think delete is async - give backend a second to finish indexdFencePassword=$(cat $(gen3_secrets_folder)/creds.json | jq -r .indexd.user_db.$indexd_admin_user) - g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n argo + g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n $argo_namespace fi } function setup_argo_db() { - if ! secret="$(g3kubectl get secret argo-db-creds -n argo 2> /dev/null)"; then + if ! secret="$(g3kubectl get secret argo-db-creds -n $argo_namespace 2> /dev/null)"; then gen3_log_info "Setting up argo db persistence" gen3 db setup argo || true dbCreds=$(gen3 secrets decode argo-g3auto dbcreds.json) - g3kubectl create secret -n argo generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database) + g3kubectl create secret -n $argo_namespace generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database) else gen3_log_info "Argo DB setup already completed" fi } - setup_argo_buckets +function setup_argo_template_secret() { + gen3_log_info "Started the template secret process" + downloadable_bucket_name=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + # Check if the secret already exists + if [[ ! -z $(g3kubectl get secret argo-template-values-secret -n $argo_namespace) ]]; then + gen3_log_info "Argo template values secret already exists, assuming it's stale and deleting" + g3kubectl delete secret argo-template-values-secret -n $argo_namespace + fi + gen3_log_info "Creating argo template values secret" + g3kubectl create secret generic argo-template-values-secret --from-literal=DOWNLOADABLE_BUCKET=$downloadable_bucket_name -n $argo_namespace +} + +setup_argo_buckets # only do this if we are running in the default namespace -if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then +if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then setup_argo_db - if (! helm status argo -n argo > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then - DBHOST=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_host | base64 -d) - DBNAME=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_database | base64 -d) - if [[ -z $(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.internalbucketname | base64 -d) ]]; then - BUCKET=$(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.bucketname | base64 -d) + setup_argo_template_secret + if (! helm status argo -n $argo_namespace > /dev/null 2>&1 ) || [[ "$force" == true ]]; then + DBHOST=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_host | base64 -d) + DBNAME=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_database | base64 -d) + if [[ -z $internalBucketName ]]; then + BUCKET=$bucketName else - BUCKET=$(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.internalbucketname | base64 -d) + BUCKET=$internalBucketName fi + valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" valuesTemplate="${GEN3_HOME}/kube/services/argo/values.yaml" @@ -243,7 +246,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version 0.29.1 + helm upgrade --install argo argo/argo-workflows -n $argo_namespace -f ${valuesFile} --version 0.29.1 else gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy" fi diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh index 9a899a770..c8f0d03c6 100644 --- a/gen3/bin/kube-setup-cedar-wrapper.sh +++ b/gen3/bin/kube-setup-cedar-wrapper.sh @@ -1,6 +1,58 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" +create_client_and_secret() { + local hostname=$(gen3 api hostname) + local client_name="cedar_ingest_client" + gen3_log_info "kube-setup-cedar-wrapper" "creating fence ${client_name} for $hostname" + # delete any existing fence cedar clients + g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client ${client_name} > /dev/null 2>&1 + local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client ${client_name} --grant-types client_credentials | tail -1) + # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + gen3_log_err "kube-setup-cedar-wrapper" "Failed generating ${client_name}" + return 1 + else + local client_id="${BASH_REMATCH[2]}" + local client_secret="${BASH_REMATCH[3]}" + gen3_log_info "Create cedar-client secrets file" + cat - < /dev/null 2>&1; then + local have_cedar_client_secret="1" + else + gen3_log_info "No g3auto cedar-client key present in secret" + fi + + local client_name="cedar_ingest_client" + local client_list=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-list) + local client_count=$(echo "$client_list=" | grep -cE "'name':.*'${client_name}'") + gen3_log_info "CEDAR client count = ${client_count}" + + if [[ -z $have_cedar_client_secret ]] || [[ ${client_count} -lt 1 ]]; then + gen3_log_info "Creating new cedar-ingest client and secret" + local credsPath="$(gen3_secrets_folder)/g3auto/cedar/${cedar_creds_file}" + if ! create_client_and_secret > $credsPath; then + gen3_log_err "Failed to setup cedar-ingest secret" + return 1 + else + gen3 secrets sync + gen3 job run usersync + fi + fi +} + [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then @@ -8,6 +60,9 @@ if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then return 1 fi +gen3_log_info "Checking cedar-client creds" +setup_creds + if ! gen3 secrets decode cedar-g3auto cedar_api_key.txt > /dev/null 2>&1; then gen3_log_err "No CEDAR api key present in cedar-g3auto secret, not rolling CEDAR wrapper" return 1 diff --git a/gen3/bin/kube-setup-cohort-middleware.sh b/gen3/bin/kube-setup-cohort-middleware.sh index 91b414849..a6a024578 100644 --- a/gen3/bin/kube-setup-cohort-middleware.sh +++ b/gen3/bin/kube-setup-cohort-middleware.sh @@ -7,6 +7,10 @@ gen3_load "gen3/lib/kube-setup-init" setup_secrets() { gen3_log_info "Deploying secrets for cohort-middleware" # subshell + if [[ -n "$JENKINS_HOME" ]]; then + gen3_log_err "skipping secrets setup in non-adminvm environment" + return 0 + fi ( if ! dbcreds="$(gen3 db creds ohdsi)"; then @@ -17,7 +21,7 @@ setup_secrets() { mkdir -p $(gen3_secrets_folder)/g3auto/cohort-middleware credsFile="$(gen3_secrets_folder)/g3auto/cohort-middleware/development.yaml" - if [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]]; then + if [[ (! -f "$credsFile") ]]; then DB_NAME=$(jq -r ".db_database" <<< "$dbcreds") export DB_NAME DB_USER=$(jq -r ".db_username" <<< "$dbcreds") @@ -46,17 +50,17 @@ EOM fi gen3 secrets sync "initialize cohort-middleware/development.yaml" - - # envsubst <"${GEN3_HOME}/kube/services/cohort-middleware/development.yaml" | g3kubectl create secret generic cohort-middleware-config --from-file=development.yaml=/dev/stdin ) } # main -------------------------------------- -setup_secrets - -gen3 roll cohort-middleware -g3kubectl apply -f "${GEN3_HOME}/kube/services/cohort-middleware/cohort-middleware-service.yaml" -cat < /dev/null 2>&1; then + export DICOM_SERVER_URL="/dicom-server" + gen3_log_info "attaching ohif viewer to old dicom-server (orthanc w/ aurora)" + fi + + if g3k_manifest_lookup .versions["orthanc"] > /dev/null 2>&1; then + export DICOM_SERVER_URL="/orthanc" + gen3_log_info "attaching ohif viewer to new dicom-server (orthanc w/ s3)" + fi + + envsubst <"${GEN3_HOME}/kube/services/ohif-viewer/app-config.js" > "$secretsFolder/app-config.js" + gen3 secrets sync 'setup orthanc-s3-g3auto secrets' } diff --git a/gen3/bin/kube-setup-ecr-access-cronjob.sh b/gen3/bin/kube-setup-ecr-access-cronjob.sh new file mode 100644 index 000000000..5c645ad35 --- /dev/null +++ b/gen3/bin/kube-setup-ecr-access-cronjob.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +setup_ecr_access_job() { + if g3kubectl get configmap manifest-global > /dev/null; then + ecrRoleArn=$(g3kubectl get configmap manifest-global -o jsonpath={.data.ecr-access-job-role-arn}) + fi + if [ -z "$ecrRoleArn" ]; then + gen3_log_err "Missing 'global.ecr-access-job-role-arn' configuration in manifest.json" + return 1 + fi + + local saName="ecr-access-job-sa" + if ! g3kubectl get sa "$saName" > /dev/null 2>&1; then + tempFile="ecr-access-job-policy.json" + cat - > $tempFile < /dev/null + cat ${GEN3_HOME}/kube/services/fluentd/gen3-1.15.3.conf | tee ${fluentdConfigmap} > /dev/null gen3 update_config fluentd-gen3 "${fluentdConfigmap}" rm ${fluentdConfigmap} else @@ -54,7 +54,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then fi # We need this serviceaccount to be in the default namespace for the job and cronjob to properly work g3kubectl apply -f "${GEN3_HOME}/kube/services/fluentd/fluent-jobs-serviceaccount.yaml" -n default - if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ]; + if [ ${fluentdVersion} == "v1.15.3-debian-cloudwatch-1.0" ]; then ( unset KUBECTL_NAMESPACE diff --git a/gen3/bin/kube-setup-gen3-discovery-ai.sh b/gen3/bin/kube-setup-gen3-discovery-ai.sh new file mode 100644 index 000000000..44a472a74 --- /dev/null +++ b/gen3/bin/kube-setup-gen3-discovery-ai.sh @@ -0,0 +1,154 @@ +#!/bin/bash +# +# Deploy the gen3-discovery-ai service +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +# NOTE: no db for this service yet, but we'll likely need it in the future +setup_database() { + gen3_log_info "setting up gen3-discovery-ai service ..." + + if g3kubectl describe secret gen3-discovery-ai-g3auto > /dev/null 2>&1; then + gen3_log_info "gen3-discovery-ai-g3auto secret already configured" + return 0 + fi + if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + gen3_log_err "skipping db setup in non-adminvm environment" + return 0 + fi + # Setup .env file that gen3-discovery-ai service consumes + if [[ ! -f "$secretsFolder/gen3-discovery-ai.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then + local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + if ! gen3 db setup gen3-discovery-ai; then + gen3_log_err "Failed setting up database for gen3-discovery-ai service" + return 1 + fi + fi + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + gen3_log_err "dbcreds not present in Gen3Secrets/" + return 1 + fi + + # go ahead and rotate the password whenever we regen this file + local password="$(gen3 random)" + cat - > "$secretsFolder/gen3-discovery-ai.env" < "$secretsFolder/base64Authz.txt" + fi + gen3 secrets sync 'setup gen3-discovery-ai-g3auto secrets' +} + +if ! g3k_manifest_lookup '.versions."gen3-discovery-ai"' 2> /dev/null; then + gen3_log_info "kube-setup-gen3-discovery-ai exiting - gen3-discovery-ai service not in manifest" + exit 0 +fi + +# There's no db for this service *yet* +# +# if ! setup_database; then +# gen3_log_err "kube-setup-gen3-discovery-ai bailing out - database failed setup" +# exit 1 +# fi + +setup_storage() { + local saName="gen3-discovery-ai-sa" + g3kubectl create sa "$saName" > /dev/null 2>&1 || true + + local secret + local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + + secret="$(g3kubectl get secret gen3-discovery-ai-g3auto -o json 2> /dev/null)" + local hasStorageCfg + hasStorageCfg=$(jq -r '.data | has("storage_config.json")' <<< "$secret") + + if [ "$hasStorageCfg" = "false" ]; then + gen3_log_info "setting up storage for gen3-discovery-ai service" + # + # gen3-discovery-ai-g3auto secret still does not exist + # we need to setup an S3 bucket and IAM creds + # let's avoid creating multiple buckets for different + # deployments to the same k8s cluster (dev, etc) + # + local bucketName + local accountNumber + local environment + + if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then + gen3_log_err "could not determine account numer" + return 1 + fi + + gen3_log_info "accountNumber: ${accountNumber}" + + if ! environment="$(g3kubectl get configmap manifest-global -o json | jq -r .data.environment)"; then + gen3_log_err "could not determine environment from manifest-global - bailing out of gen3-discovery-ai setup" + return 1 + fi + + gen3_log_info "environment: ${environment}" + + # try to come up with a unique but composable bucket name + bucketName="gen3-discovery-ai-${accountNumber}-${environment//_/-}" + + gen3_log_info "bucketName: ${bucketName}" + + if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then + gen3_log_info "${bucketName} s3 bucket already exists - probably in use by another namespace - copy the creds from there to $(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + # continue on ... + elif ! gen3 s3 create "${bucketName}"; then + gen3_log_err "maybe failed to create bucket ${bucketName}, but maybe not, because the terraform script is flaky" + fi + + local hostname + hostname="$(gen3 api hostname)" + jq -r -n --arg bucket "${bucketName}" --arg hostname "${hostname}" '.bucket=$bucket | .prefix=$hostname' > "${secretsFolder}/storage_config.json" + gen3 secrets sync 'setup gen3-discovery-ai credentials' + + local roleName + roleName="$(gen3 api safe-name gen3-discovery-ai)" || return 1 + + if ! gen3 awsrole info "$roleName" > /dev/null; then # setup role + bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || return 1 + gen3 awsrole create "$roleName" "$saName" || return 1 + gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${roleName}" + # try to give the gitops role read/write permissions on the bucket + local gitopsRoleName + gitopsRoleName="$(gen3 api safe-name gitops)" + gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${gitopsRoleName}" + fi + fi + + return 0 +} + +if ! setup_storage; then + gen3_log_err "kube-setup-gen3-discovery-ai bailing out - storage failed setup" + exit 1 +fi + +gen3_log_info "Setup complete, syncing configuration to bucket" + +bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || exit 1 +aws s3 sync "$(dirname $(g3k_manifest_path))/gen3-discovery-ai/knowledge" "s3://$bucketName" --delete + +gen3 roll gen3-discovery-ai +g3kubectl apply -f "${GEN3_HOME}/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml" + +if [[ -z "$GEN3_ROLL_ALL" ]]; then + gen3 kube-setup-networkpolicy + gen3 kube-setup-revproxy +fi + +gen3_log_info "The gen3-discovery-ai service has been deployed onto the kubernetes cluster" +gen3_log_info "test with: curl https://commons-host/ai" diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index 07172aa1e..bdcff8ed0 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -20,11 +20,81 @@ gen3 jupyter j-namespace setup # (g3k_kv_filter ${GEN3_HOME}/kube/services/hatchery/serviceaccount.yaml BINDING_ONE "name: hatchery-binding1-$namespace" BINDING_TWO "name: hatchery-binding2-$namespace" CURRENT_NAMESPACE "namespace: $namespace" | g3kubectl apply -f -) || true +function exists_or_create_gen3_license_table() { + # Create dynamodb table for gen3-license if it does not exist. + TARGET_TABLE="$1" + echo "Checking for dynamoDB table: ${TARGET_TABLE}" -# cron job to distribute licenses if using Stata workspaces -if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ]; -then - gen3 job cron distribute-licenses '* * * * *' + FOUND_TABLE=`aws dynamodb list-tables | jq -r .TableNames | jq -c -r '.[]' | grep $TARGET_TABLE` + if [ -n "$FOUND_TABLE" ]; then + echo "Target table already exists in dynamoDB: $FOUND_TABLE" + else + echo "Creating table ${TARGET_TABLE}" + GSI=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-global-secondary-index"'` + if [[ -z "$GSI" || "$GSI" == "null" ]]; then + echo "Error: No global-secondary-index in configuration" + return 0 + fi + aws dynamodb create-table \ + --no-cli-pager \ + --table-name "$TARGET_TABLE" \ + --attribute-definitions AttributeName=itemId,AttributeType=S \ + AttributeName=environment,AttributeType=S \ + AttributeName=isActive,AttributeType=S \ + --key-schema AttributeName=itemId,KeyType=HASH \ + AttributeName=environment,KeyType=RANGE \ + --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 \ + --global-secondary-indexes \ + "[ + { + \"IndexName\": \"$GSI\", + \"KeySchema\": [{\"AttributeName\":\"environment\",\"KeyType\":\"HASH\"}, + {\"AttributeName\":\"isActive\",\"KeyType\":\"RANGE\"}], + \"Projection\":{ + \"ProjectionType\":\"INCLUDE\", + \"NonKeyAttributes\":[\"itemId\",\"userId\",\"licenseId\",\"licenseType\"] + }, + \"ProvisionedThroughput\": { + \"ReadCapacityUnits\": 5, + \"WriteCapacityUnits\": 3 + } + } + ]" + fi +} + +TARGET_TABLE=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-dynamodb-table"'` +if [[ -z "$TARGET_TABLE" || "$TARGET_TABLE" == "null" ]]; then + echo "No gen3-license table in configuration" + # cron job to distribute licenses if using Stata workspaces but not using dynamoDB + if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ]; + then + gen3 job cron distribute-licenses '* * * * *' + fi +else + echo "Found gen3-license table in configuration: $TARGET_TABLE" + exists_or_create_gen3_license_table "$TARGET_TABLE" +fi + +# if `nextflow-global.imagebuilder-reader-role-arn` is set in hatchery config, allow hatchery +# to assume the configured role +imagebuilderRoleArn=$(g3kubectl get configmap manifest-hatchery -o jsonpath={.data.nextflow-global} | jq -r '."imagebuilder-reader-role-arn"') +assumeImageBuilderRolePolicyBlock="" +if [ -z "$imagebuilderRoleArn" ]; then + gen3_log_info "No 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, not granting AssumeRole" +else + gen3_log_info "Found 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, granting AssumeRole" + assumeImageBuilderRolePolicyBlock=$( cat < /dev/null 2>&1; then roleName="$(gen3 api safe-name hatchery-sa)" gen3 awsrole create $roleName $saName policyName="$(gen3 api safe-name hatchery-policy)" - policyInfo=$(gen3_aws_run aws iam create-policy --policy-name "$policyName" --policy-document "$policy" --description "Allow hathcery to assume csoc_adminvm role in other accounts, for multi-account workspaces") + policyInfo=$(gen3_aws_run aws iam create-policy --policy-name "$policyName" --policy-document "$policy" --description "Allow hatchery to assume csoc_adminvm role in other accounts and manage dynamodb for multi-account workspaces, and to create resources for nextflow workspaces") if [ -n "$policyInfo" ]; then - policyArn="$(jq -e -r '.["Policy"].Arn' <<< "$policyInfo")" || { echo "Cannot get 'Policy.Arn' from output: $policyInfo"; return 1; } + policyArn="$(jq -e -r '.["Policy"].Arn' <<< "$policyInfo")" || { echo "Cannot get 'Policy.Arn' from output: $policyInfo"; return 1; } else - echo "Unable to create policy $policyName. Assuming it already exists and continuing" + echo "Unable to create policy '$policyName'. Assume it already exists and create a new version to update the permissions..." policyArn=$(gen3_aws_run aws iam list-policies --query "Policies[?PolicyName=='$policyName'].Arn" --output text) - fi + # there can only be up to 5 versions, so delete old versions (except the current default one) + versions="$(gen3_aws_run aws iam list-policy-versions --policy-arn $policyArn | jq -r '.Versions[] | select(.IsDefaultVersion != true) | .VersionId')" + versions=(${versions}) # string to array + for v in "${versions[@]}"; do + echo "Deleting old version '$v'" + gen3_aws_run aws iam delete-policy-version --policy-arn $policyArn --version-id $v + done + + # create the new version + gen3_aws_run aws iam create-policy-version --policy-arn "$policyArn" --policy-document "$policy" --set-as-default + fi gen3_log_info "Attaching policy '${policyName}' to role '${roleName}'" gen3 awsrole attach-policy ${policyArn} --role-name ${roleName} --force-aws-cli || exit 1 gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1 diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index d0bcff9a4..b75470f73 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -232,6 +232,28 @@ gen3_ingress_setup_role() { } } }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" + ], + "Condition": { + "StringEquals": { + "elasticloadbalancing:CreateAction": [ + "CreateTargetGroup", + "CreateLoadBalancer" + ] + }, + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, { "Effect": "Allow", "Action": [ @@ -329,4 +351,4 @@ g3kubectl apply -f "${GEN3_HOME}/kube/services/revproxy/revproxy-service.yaml" envsubst <$scriptDir/ingress.yaml | g3kubectl apply -f - if [ "$deployWaf" = true ]; then gen3_ingress_setup_waf -fi \ No newline at end of file +fi diff --git a/gen3/bin/kube-setup-jenkins2.sh b/gen3/bin/kube-setup-jenkins2.sh new file mode 100644 index 000000000..f5233f978 --- /dev/null +++ b/gen3/bin/kube-setup-jenkins2.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Just a little helper for deploying jenkins onto k8s the first time +# + +set -e + +export WORKSPACE="${WORKSPACE:-$HOME}" +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +gen3 kube-setup-secrets + +# +# Assume Jenkins should use 'jenkins' profile credentials in "${WORKSPACE}"/.aws/credentials +# +aws_access_key_id="$(aws configure get jenkins.aws_access_key_id)" +aws_secret_access_key="$(aws configure get jenkins.aws_secret_access_key)" +google_acct1_email="$(jq -r '.jenkins.google_acct1.email' < $(gen3_secrets_folder)/creds.json)" +google_acct1_password="$(jq -r '.jenkins.google_acct1.password' < $(gen3_secrets_folder)/creds.json)" +google_acct2_email="$(jq -r '.jenkins.google_acct2.email' < $(gen3_secrets_folder)/creds.json)" +google_acct2_password="$(jq -r '.jenkins.google_acct2.password' < $(gen3_secrets_folder)/creds.json)" + +if [ -z "$aws_access_key_id" -o -z "$aws_secret_access_key" ]; then + gen3_log_err 'not configuring jenkins - could not extract secrets from aws configure' + exit 1 +fi +if [[ -z "$google_acct1_email" || -z "$google_acct1_password" || -z "$google_acct2_email" || -z "$google_acct2_password" ]]; then + gen3_log_err "missing google credentials in '.jenkins' of creds.json" + exit 1 +fi + +if ! g3kubectl get secrets jenkins-secret > /dev/null 2>&1; then + # make it easy to rerun kube-setup-jenkins.sh + g3kubectl create secret generic jenkins-secret "--from-literal=aws_access_key_id=$aws_access_key_id" "--from-literal=aws_secret_access_key=$aws_secret_access_key" +fi +if ! g3kubectl get secrets google-acct1 > /dev/null 2>&1; then + g3kubectl create secret generic google-acct1 "--from-literal=email=${google_acct1_email}" "--from-literal=password=${google_acct1_password}" +fi +if ! g3kubectl get secrets google-acct2 > /dev/null 2>&1; then + g3kubectl create secret generic google-acct2 "--from-literal=email=${google_acct2_email}" "--from-literal=password=${google_acct2_password}" +fi + +if ! g3kubectl get storageclass gp2 > /dev/null 2>&1; then + g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/10storageclass.yaml" +fi +if ! g3kubectl get persistentvolumeclaim datadir-jenkins > /dev/null 2>&1; then + g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/00pvc.yaml" +fi + +# Note: jenkins service account is configured by `kube-setup-roles` +gen3 kube-setup-roles +# Note: only the 'default' namespace jenkins-service account gets a cluster rolebinding +g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/clusterrolebinding-devops.yaml" + +# Note: requires Jenkins entry in cdis-manifest +gen3 roll jenkins2 +gen3 roll jenkins2-worker +gen3 roll jenkins2-ci-worker + +# +# Get the ARN of the SSL certificate for the commons - +# We'll optimistically assume it's a wildcard cert that +# is appropriate to also attach to the jenkins ELB +# +export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}') +if [[ ! -z $ARN ]]; then + envsubst <"${GEN3_HOME}/kube/services/jenkins/jenkins-service.yaml" | g3kubectl apply -f - +else + gen3_log_info "Global configmap not configured - not launching service (require SSL cert ARN)" +fi diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index bada6e69e..949c1ccd1 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -23,8 +23,10 @@ gen3_deploy_karpenter() { if g3k_config_lookup .global.karpenter_version; then karpenter=$(g3k_config_lookup .global.karpenter_version) fi - export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` - if [ "${clusterversion}" = "24+" ]; then + export clusterversion=`kubectl version -o json | jq -r .serverVersion.minor` + if [ "${clusterversion}" = "25+" ]; then + karpenter=${karpenter:-v0.27.0} + elif [ "${clusterversion}" = "24+" ]; then karpenter=${karpenter:-v0.24.0} else karpenter=${karpenter:-v0.22.0} @@ -77,6 +79,14 @@ gen3_deploy_karpenter() { "Effect": "Allow", "Resource": "*", "Sid": "ConditionalEC2Termination" + }, + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "kms:*" + ], + "Resource": "*" } ], "Version": "2012-10-17" @@ -140,7 +150,11 @@ gen3_deploy_karpenter() { --set serviceAccount.name=karpenter \ --set serviceAccount.create=false \ --set controller.env[0].name=AWS_REGION \ - --set controller.env[0].value=us-east-1 + --set controller.env[0].value=us-east-1 \ + --set controller.resources.requests.memory="2Gi" \ + --set controller.resources.requests.cpu="2" \ + --set controller.resources.limits.memory="2Gi" \ + --set controller.resources.limits.cpu="2" fi gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter gen3_log_info "Remove cluster-autoscaler" diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index d586570db..3d8165547 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -14,13 +14,8 @@ new_client() { local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1) # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then - # try delete client - g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client atlas > /dev/null 2>&1 - secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1) - if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then - gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets" - return 1 - fi + gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets" + return 1 fi local FENCE_CLIENT_ID="${BASH_REMATCH[2]}" local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}" @@ -87,6 +82,8 @@ setup_secrets() { export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds") export FENCE_URL="https://${hostname}/user/user" + # get arborist_url from manifest.json: + export ARBORIST_URL=$(g3k_manifest_lookup .global.arborist_url) export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration" export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds") export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds") diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index fcc2ef3b7..fd30b478b 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -111,15 +111,14 @@ for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name' fi done -if g3kubectl get namespace argo > /dev/null 2>&1; -then - for argo in $(g3kubectl get services -n argo -o jsonpath='{.items[*].metadata.name}'); - do - filePath="$scriptDir/gen3.nginx.conf/${argo}.conf" - if [[ -f "$filePath" ]]; then - confFileList+=("--from-file" "$filePath") - fi - done + +if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then + argo_server_service_url=$(g3k_manifest_lookup .argo.argo_server_service_url) + g3k_kv_filter "${scriptDir}/gen3.nginx.conf/argo-server.conf" SERVICE_URL "${argo_server_service_url}" > /tmp/argo-server-with-url$(gen3 db namespace).conf + filePath="/tmp/argo-server-with-url$(gen3 db namespace).conf" + if [[ -f "$filePath" ]]; then + confFileList+=("--from-file" "$filePath") + fi fi if g3kubectl get namespace argocd > /dev/null 2>&1; diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh index 609ee01c7..c26a04cb5 100644 --- a/gen3/bin/kube-setup-system-services.sh +++ b/gen3/bin/kube-setup-system-services.sh @@ -19,7 +19,7 @@ gen3_load "gen3/gen3setup" kubeproxy=${kubeproxy:-1.24.7} coredns=${coredns:-1.8.7} kubednsautoscaler=${kubednsautoscaler:-1.8.6} -cni=${cni:-1.12.2} +cni=${cni:-1.14.1} calico=${calico:-1.7.8} @@ -39,7 +39,7 @@ calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico} g3kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=${kube_proxy_image} g3kubectl set image --namespace kube-system deployment.apps/coredns coredns=${coredns_image} -g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f - +#g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f - g3kubectl apply -f ${cni_image} g3kubectl apply -f ${calico_yaml} diff --git a/gen3/bin/kube-setup-wts.sh b/gen3/bin/kube-setup-wts.sh index b807da2d5..ad8211d03 100644 --- a/gen3/bin/kube-setup-wts.sh +++ b/gen3/bin/kube-setup-wts.sh @@ -42,6 +42,8 @@ new_client() { "oidc_client_id": "$client_id", "oidc_client_secret": "$client_secret", + "aggregate_endpoint_allowlist": ["/authz/mapping"], + "external_oidc": [] } EOM diff --git a/gen3/bin/migrate-to-vpc-cni.sh b/gen3/bin/migrate-to-vpc-cni.sh new file mode 100644 index 000000000..510d9ebef --- /dev/null +++ b/gen3/bin/migrate-to-vpc-cni.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +#Get the K8s NS +ctx="$(g3kubectl config current-context)" +ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" + +# Set the cluster name variable +CLUSTER_NAME=`gen3 api environment` + +# Check if in default ns +if [[ ("$ctxNamespace" != "default" && "$ctxNamespace" != "null") ]]; then + gen3_log_err "Namespace must be default" + exit 1 +fi + +# Cd into Cloud-automation repo and pull the latest from master +gen3_log_info "Pulling the latest from Cloud-Auto" +cd /home/$CLUSTER_NAME/cloud-automation || { gen3_log_err "Cloud-automation repo not found"; exit 1; } +#### Change to master +git checkout master || { gen3_log_err "Failed to checkout master branch"; exit 1; } +git pull || { gen3_log_err "Failed to pull from the repository"; exit 1; } + +# Update the Karpenter Node Template +gen3_log_info "Apply new Karpenter Node Template" +if [[ -d $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter ]]; then + gen3_log_info "Karpenter setup in manifest. Open a cdismanifest PR and add this line to aws node templates: https://github.com/uc-cdis/cloud-automation/blob/master/kube/services/karpenter/nodeTemplateDefault.yaml#L40" + while true; do + read -p "Have you updated your manifest? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Proceeding with Karpenter deployment..." + gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; } + break + ;; + [Nn]* ) + gen3_log_info "Please update the cdismanifest before proceeding." + exit 1 + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac + done +else + gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; } +fi + +# Cordon all the nodes before running gen3 roll all" +gen3_log_info "Cordoning all nodes" +kubectl get nodes --no-headers -o custom-columns=":metadata.name" | grep -v '^fargate' | xargs -I{} kubectl cordon {} + +# Run a "gen3 roll all" so all nodes use the new mounted BPF File System +gen3_log_info "Cycling all the nodes by running gen3 roll all" +gen3 roll all --fast || exit 1 + +# Confirm that all nodes have been rotated +while true; do + read -p "Roll all complete. Have all cordoned nodes been rotated? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Continuing with script..." + break + ;; + [Nn]* ) + gen3_log_info "Please drain any remaining nodes with 'kubectl drain --ignore-daemonsets --delete-emptydir-data'" + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac +done + + +# Delete all existing network policies +gen3_log_info "Deleting networkpolicies" +kubectl delete networkpolicies --all + +# Delete all Calico related resources from the “kube-system” namespace +gen3_log_info "Deleting all Calico related resources" +kubectl get deployments -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete deployment -n kube-system +kubectl get daemonsets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete daemonset -n kube-system +kubectl get services -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete service -n kube-system +kubectl get replicasets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete replicaset -n kube-system + +# Backup the current VPC CNI configuration in case of rollback +gen3_log_info "Backing up current VPC CNI Configuration..." +kubectl get daemonset aws-node -n kube-system -o yaml > aws-k8s-cni-old.yaml || { gen3_log_err "Error backig up VPC CNI configuration"; exit 1; } + +# Check to ensure we are not using an AWS plugin to manage the VPC CNI Plugin +if aws eks describe-addon --cluster-name "$CLUSTER_NAME" --addon-name vpc-cni --query addon.addonVersion --output text 2>/dev/null; then + gen3_log_err "Error: VPC CNI Plugin is managed by AWS. Please log into the AWS UI and delete the VPC CNI Plugin in Amazon EKS, then re-run this script." + exit 1 +else + gen3_log_info "No managed VPC CNI Plugin found, proceeding with the script." +fi + +# Apply the new VPC CNI Version +gen3_log_info "Applying new version of VPC CNI" +g3kubectl apply -f https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.14.1/config/master/aws-k8s-cni.yaml || { gen3_log_err "Failed to apply new VPC CNI version"; exit 1; } + +# Check the version to make sure it updated +NEW_VERSION=$(kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3) +gen3_log_info "Current version of aws-k8s-cni is: $NEW_VERSION" +if [ "$NEW_VERSION" != "v1.14.1" ]; then + gen3_log_info "The version of aws-k8s-cni has not been updated correctly." + exit 1 +fi + +# Edit the amazon-vpc-cni configmap to enable network policy controller +gen3_log_info "Enabling NetworkPolicies in VPC CNI Configmap" +kubectl patch configmap -n kube-system amazon-vpc-cni --type merge -p '{"data":{"enable-network-policy-controller":"true"}}' || { gen3_log_err "Configmap patch failed"; exit 1; } + +# Edit the aws-node daemonset +gen3_log_info "Enabling NetworkPolicies in aws-node Daemonset" +kubectl patch daemonset aws-node -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/1/args", "value": ["--enable-network-policy=true", "--enable-ipv6=false", "--enable-cloudwatch-logs=false", "--metrics-bind-addr=:8162", "--health-probe-bind-addr=:8163"]}]' || { gen3_log_err "Daemonset edit failed"; exit 1; } + +# Ensure all the aws-nodes are running +kubectl get pods -n kube-system | grep aws +while true; do + read -p "Do all the aws-node pods in the kube-system ns have 2/2 containers running? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Running kube-setup-networkpolicy..." + gen3 kube-setup-networkpolicy || exit 1 + break + ;; + [Nn]* ) + gen3_log_err "Look at aws-node logs to figure out what went wrong. View this document for more details: https://docs.google.com/document/d/1fcBTciQSSwjvHktEnO_7EObY-xR_EvJ2NtgUa70wvL8" + gen3_log_info "Rollback instructions are also available in the above document" + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac +done \ No newline at end of file diff --git a/gen3/bin/mutate-guppy-config-for-guppy-test.sh b/gen3/bin/mutate-guppy-config-for-guppy-test.sh index de7da10d5..151bb7169 100644 --- a/gen3/bin/mutate-guppy-config-for-guppy-test.sh +++ b/gen3/bin/mutate-guppy-config-for-guppy-test.sh @@ -16,7 +16,7 @@ sed -i 's/\(.*\)"index": "\(.*\)_etl",$/\1"index": "jenkins_subject_alias",/' or # for bloodpac-like envs sed -i 's/\(.*\)"index": "\(.*\)_case",$/\1"index": "jenkins_subject_alias",/' original_guppy_config.yaml # the pre-defined Canine index works with subject ONLY (never case) -sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml +# sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml sed -i 's/\(.*\)"index": "\(.*\)_file",$/\1"index": "jenkins_file_alias",/' original_guppy_config.yaml sed -i 's/\(.*\)"config_index": "\(.*\)_array-config",$/\1"config_index": "jenkins_configs_alias",/' original_guppy_config.yaml diff --git a/gen3/lib/logs/snapshot.sh b/gen3/lib/logs/snapshot.sh index 31cb80283..ae769a285 100644 --- a/gen3/lib/logs/snapshot.sh +++ b/gen3/lib/logs/snapshot.sh @@ -36,10 +36,11 @@ gen3_logs_snapshot_container() { # Snapshot all the pods # gen3_logs_snapshot_all() { + # For each pod for which we can list the containers, get the pod name and get its list of containers + # (container names + initContainers names). Diplay them as lines of " ". g3kubectl get pods -o json | \ - jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | map( {pod: .metadata.name, containers: .spec.containers | map(.name) } ) | map( .pod as $pod | .containers | map( { pod: $pod, cont: .})[]) | map(select(.cont != "pause" and .cont != "jupyterhub"))[] | .pod + " " + .cont' | \ + jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | .[] | .metadata.name as $pod | (.spec.containers + .spec.initContainers) | map(select(.name != "pause" and .name != "jupyterhub")) | .[] | {pod: $pod, cont: .name} | "\(.pod) \(.cont)"' | \ while read -r line; do gen3_logs_snapshot_container $line done } - diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml index f6d76d790..98c360531 100644 --- a/gen3/lib/testData/default/expectedFenceResult.yaml +++ b/gen3/lib/testData/default/expectedFenceResult.yaml @@ -44,6 +44,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType @@ -136,6 +143,7 @@ spec: ports: - containerPort: 80 - containerPort: 443 + - containerPort: 6567 volumeMounts: # ----------------------------------------------------------------------------- # DEPRECATED! Remove when all commons are no longer using local_settings.py diff --git a/gen3/lib/testData/default/expectedSheepdogResult.yaml b/gen3/lib/testData/default/expectedSheepdogResult.yaml index b9db85a36..a2bd3efcc 100644 --- a/gen3/lib/testData/default/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/default/expectedSheepdogResult.yaml @@ -17,6 +17,7 @@ spec: template: metadata: labels: + netnolimit: "yes" app: sheepdog release: production public: "yes" @@ -39,12 +40,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index d4196c070..adc35ad2f 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -47,6 +47,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml index f54fd3e03..08407ae52 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml @@ -43,6 +43,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/gen3/test/ec2Test.sh b/gen3/test/ec2Test.sh index 21310a24c..4981c925c 100644 --- a/gen3/test/ec2Test.sh +++ b/gen3/test/ec2Test.sh @@ -1,6 +1,6 @@ -if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[0].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then +if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[3].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then gen3_log_err "ec2Test failed to acquire IP address of a k8s node to test against" fi diff --git a/gen3/test/ecrTest.sh b/gen3/test/ecrTest.sh index 91edf798b..57847abe5 100644 --- a/gen3/test/ecrTest.sh +++ b/gen3/test/ecrTest.sh @@ -10,8 +10,8 @@ test_ecr_login() { test_ecr_setup() { if [[ -n "$JENKINS_HOME" ]]; then - # give ourselves read/write permissions on /var/run/docker.sock - sudo chmod a+rw /var/run/docker.sock; because $? "ecr_setup modified docker.sock" + # give ourselves permissions on /run/containerd/containerd.sock + sudo chown root:sudo /run/containerd/containerd.sock; because $? "ecr_setup modified containerd.sock" fi } diff --git a/gen3/test/jobTest.sh b/gen3/test/jobTest.sh index 84a4d046b..bb37b4f72 100644 --- a/gen3/test/jobTest.sh +++ b/gen3/test/jobTest.sh @@ -6,7 +6,7 @@ excludeJob() { local jobKey="$1" local excludeList=( - /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup + /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup /etl- /indexd- /metadata- ) for exclude in "${excludeList[@]}"; do if [[ "$it" =~ $exclude ]]; then return 0; fi diff --git a/gen3/test/jupyterTest.sh b/gen3/test/jupyterTest.sh index f0e327d71..db6a62618 100644 --- a/gen3/test/jupyterTest.sh +++ b/gen3/test/jupyterTest.sh @@ -30,7 +30,7 @@ test_jupyter_metrics() { } shunit_runtest "test_jupyter_idle" "jupyter" -shunit_runtest "test_jupyter_metrics" "jupyter" +# shunit_runtest "test_jupyter_metrics" "jupyter" shunit_runtest "test_jupyter_prepuller" "local,jupyter" shunit_runtest "test_jupyter_namespace" "local,jupyter" shunit_runtest "test_jupyter_setup" "jupyter" diff --git a/gen3/test/terraformTest.sh b/gen3/test/terraformTest.sh deleted file mode 100644 index 17bcc03c2..000000000 --- a/gen3/test/terraformTest.sh +++ /dev/null @@ -1,461 +0,0 @@ -GEN3_TEST_PROFILE="${GEN3_TEST_PROFILE:-cdistest}" -GEN3_TEST_WORKSPACE="gen3test" -GEN3_TEST_ACCOUNT=707767160287 - -# -# TODO - generalize these tests to setup their own test VPC, -# rather than relying on qaplanetv1 or devplanetv1 being there -# - -# -# Little macos/linux stat wrapper -# -file_mode() { - if [[ $(uname -s) == 'Linux' ]]; then - stat -c %a "$1" - else - stat -f %p "$1" - fi -} - -test_workspace() { - gen3 workon $GEN3_TEST_PROFILE $GEN3_TEST_WORKSPACE; because $? "Calling gen3 workon multiple times should be harmless" - [[ $GEN3_PROFILE = $GEN3_TEST_PROFILE ]]; because $? "gen3 workon sets the GEN3_PROFILE env variable: $GEN3_PROFILE" - [[ $GEN3_WORKSPACE = $GEN3_TEST_WORKSPACE ]]; because $? "gen3 workon sets the GEN3_WORKSPACE env variable: $GEN3_WORKSPACE" - [[ $GEN3_FLAVOR = "AWS" || \ - ($GEN3_FLAVOR == "GCP" && $GEN3_PROFILE =~ ^gcp-) || \ - ($GEN3_FLAVOR == "ONPREM" && $GEN3_PROFILE =~ ^onprem-) ]]; because $? "GEN3_FLAVOR is gcp for gcp-* profiles, else AWS" - [[ $GEN3_FLAVOR != "AWS" || $GEN3_S3_BUCKET = "cdis-state-ac${GEN3_TEST_ACCOUNT}-gen3" || $GEN3_S3_BUCKET = "cdis-terraform-state.account-${GEN3_TEST_ACCOUNT}.gen3" ]]; because $? "gen3 workon sets the GEN3_S3_BUCKET env variable: $GEN3_S3_BUCKET" - [[ (! -z $GEN3_WORKDIR) && -d $GEN3_WORKDIR ]]; because $? "gen3 workon sets the GEN3_WORKDIR env variable, and initializes the folder: $GEN3_WORKDIR" - [[ $(file_mode $GEN3_WORKDIR) =~ 700$ ]]; because $? "gen3 workon sets the GEN3_WORKDIR to mode 0700, because secrets are in there" - gen3 cd && [[ $(pwd) = "$GEN3_WORKDIR" ]]; because $? "gen3 cd should take us to the workspace by default: $(pwd) =? $GEN3_WORKDIR" - for fileName in README.md config.tfvars backend.tfvars; do - [[ -f $fileName ]]; because $? "gen3 workon ensures we have a $fileName - local copy || s3 copy || generated from template" - done - [[ ! -z "$MD5" ]]; because $? "commons.sh sets MD5 to $MD5" - - if [[ $GEN3_TEST_WORKSPACE =~ __custom$ ]]; then - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_WORKDIR" ]]; because $? "a __custom workspace loads from the workspace folder" - elif [[ "$GEN3_TEST_PROFILE" =~ ^gcp- ]]; then - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/gcp/commons" ]]; because $? "a gcp- profile currently only support a commons workspace" - elif [[ "$GEN3_TEST_PROFILE" =~ ^onprem- ]]; then - for fileName in README.md creds.json 00configmap.yaml kube-setup.sh; do - filePath="onprem_scripts/$fileName" - [[ -f $filePath ]]; because $? "gen3 workon ensures we have a $filePath generated from template" - done - else # aws profile - [[ "$GEN3_TFSCRIPT_FOLDER" =~ ^"$GEN3_HOME/tf_files/aws/" ]]; because $? "an aws workspace references the aws/ folder: $GEN3_TFSCRIPT_FOLDER" - fi -} - -workspace_cleanup() { - # try to avoid accidentally erasing the user's data ... - cd /tmp && [[ -n "$GEN3_WORKDIR" && "$GEN3_WORKDIR" =~ /gen3/ && -f "$GEN3_WORKDIR/config.tfvars" ]] && /bin/rm -rf "$GEN3_WORKDIR"; - because $? "was able to cleanup $GEN3_WORKDIR" -} - -test_uservpc_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_user" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_vpc" ]]; because $? "a _user workspace should use the ./aws/user_vpc resources: $GEN3_TFSCRIPT_FOLDER" - workspace_cleanup -} - -test_usergeneric_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_usergeneric" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_generic" ]]; because $? "a _usergeneric workspace should use the ./aws/user_generic resources: $GEN3_TFSCRIPT_FOLDER" - cat << EOF > config.tfvars -username="frickjack" -EOF - gen3 tfplan; because $? "_usergeneric tfplan should work"; - workspace_cleanup -} - -test_snapshot_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_snapshot" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/rds_snapshot" ]]; because $? "a _snapshot workspace should use the ./aws/rds_snapshot resources: $GEN3_TFSCRIPT_FOLDER" - workspace_cleanup -} - -test_databucket_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_databucket" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/data_bucket" ]]; because $? "a _databucket workspace should use the ./aws/data_bucket resources: $GEN3_TFSCRIPT_FOLDER" - cat - > config.tfvars < config.tfvars < config.tfvars < @ in password -db_password_fence="whatever" - -db_password_gdcapi="whatever" -db_password_sheepdog="whatever" -db_password_peregrine="whatever" - -db_password_indexd="g6pmYkcoR7qECjGoErzVb5gkX3kum0yo" - -# password for write access to indexd -gdcapi_indexd_password="oYva39mIPV5uXskv7jWnKuVZBUFBQcxd" - -fence_snapshot="" -gdcapi_snapshot="" -indexd_snapshot="" -# mailgun for sending alert e-mails -mailgun_api_key="" -mailgun_api_url="" -mailgun_smtp_host="" - -kube_ssh_key="" -EOM - [[ "$(pwd)" =~ "/$GEN3_WORKSPACE"$ ]]; because $? "commons workspace should have base $GEN3_WORKSPACE - $(pwd)" - gen3 tfplan; because $? "tfplan should run even with some invalid config variables" - [[ -f "$GEN3_WORKDIR/plan.terraform" ]]; because $? "'gen3 tfplan' generates a plan.terraform file used by 'gen3 tfapply'" - workspace_cleanup -} - -test_custom_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}__custom" - test_workspace - - local sourceFolder="../../../../../cloud-automation/tf_files/aws/modules/s3-bucket" - if [[ ! -d "$sourceFolder" ]]; then - # Jenkins has a different relative path setup - sourceFolder="../../../../cloud-automation/tf_files/aws/modules/s3-bucket" - fi - cat - > bucket.tf < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars <> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 @@ -96,7 +116,7 @@ data: blockDeviceMappings: - deviceName: /dev/xvda ebs: - volumeSize: 50Gi + volumeSize: 100Gi volumeType: gp2 encrypted: true deleteOnTermination: true diff --git a/kube/services/argo-events/workflows/eventsource-created.yaml b/kube/services/argo-events/workflows/eventsource-created.yaml index 9abf78e19..11d7084ca 100644 --- a/kube/services/argo-events/workflows/eventsource-created.yaml +++ b/kube/services/argo-events/workflows/eventsource-created.yaml @@ -15,4 +15,4 @@ spec: eventTypes: - ADD filter: - afterStart: false + afterStart: true diff --git a/kube/services/argo-events/workflows/sensor-completed.yaml b/kube/services/argo-events/workflows/sensor-completed.yaml index e92ad6918..293c0e119 100644 --- a/kube/services/argo-events/workflows/sensor-completed.yaml +++ b/kube/services/argo-events/workflows/sensor-completed.yaml @@ -43,18 +43,22 @@ spec: parallelism: 1 template: spec: - restartPolicy: Never + restartPolicy: OnFailure containers: - name: karpenter-resource-creator image: quay.io/cdis/awshelper command: ["/bin/sh"] - args: + args: - "-c" - | - kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME - kubectl delete provisioners workflow-$WORKFLOW_NAME - env: - - name: WORKFLOW_NAME - value: "" - backoffLimit: 0 + if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + fi + if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete provisioners workflow-$WORKFLOW_NAME + fi + env: + - name: WORKFLOW_NAME + value: "" + backoffLimit: 20 diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml index 27cbc5643..4221f5742 100644 --- a/kube/services/argo-events/workflows/sensor-created.yaml +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -51,20 +51,31 @@ spec: parallelism: 1 template: spec: - restartPolicy: Never + restartPolicy: OnFailure containers: - name: karpenter-resource-creator image: quay.io/cdis/awshelper command: ["/bin/sh"] - args: - - "-c" - - | - for file in /home/manifests/*.yaml; do envsubst < $file | kubectl apply -f -; done + args: + - "-c" + - | + if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" | kubectl apply -f - + fi + + if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" | kubectl apply -f - + fi env: - - name: WORKFLOW_NAME - value: "" - - name: GEN3_USERNAME - value: "" + - name: WORKFLOW_NAME + value: "" + - name: GEN3_USERNAME + value: "" + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: environment + key: environment volumeMounts: - name: karpenter-templates-volume mountPath: /home/manifests @@ -72,5 +83,4 @@ spec: - name: karpenter-templates-volume configMap: name: karpenter-templates - backoffLimit: 0 - + backoffLimit: 20 diff --git a/kube/services/argo-events/workflows/sensor-deleted.yaml b/kube/services/argo-events/workflows/sensor-deleted.yaml index 61e2235d7..c235a820a 100644 --- a/kube/services/argo-events/workflows/sensor-deleted.yaml +++ b/kube/services/argo-events/workflows/sensor-deleted.yaml @@ -39,18 +39,22 @@ spec: parallelism: 1 template: spec: - restartPolicy: Never + restartPolicy: OnFailure containers: - name: karpenter-resource-creator image: quay.io/cdis/awshelper command: ["/bin/sh"] - args: + args: - "-c" - | - kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME - kubectl delete provisioners workflow-$WORKFLOW_NAME - env: - - name: WORKFLOW_NAME - value: "" - backoffLimit: 0 + if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + fi + if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete provisioners workflow-$WORKFLOW_NAME + fi + env: + - name: WORKFLOW_NAME + value: "" + backoffLimit: 20 diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index 65f68d98a..89ec29ecc 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -58,7 +58,10 @@ spec: configMap: name: manifest-argo optional: true - + - name: argo-wrapper-namespace-config + configMap: + name: argo-wrapper-namespace-config + containers: - name: argo-wrapper GEN3_ARGO-WRAPPER_IMAGE @@ -70,3 +73,7 @@ spec: readOnly: true mountPath: /argo.json subPath: argo.json + - name: argo-wrapper-namespace-config + readOnly: true + mountPath: /argowrapper/config.ini + subPath: config.ini diff --git a/kube/services/argo-wrapper/config.ini b/kube/services/argo-wrapper/config.ini new file mode 100644 index 000000000..0693ee2e2 --- /dev/null +++ b/kube/services/argo-wrapper/config.ini @@ -0,0 +1,6 @@ +[DEFAULT] +ARGO_ACCESS_METHOD = access +ARGO_HOST = $ARGO_HOST +ARGO_NAMESPACE = $ARGO_NAMESPACE +COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = http://cohort-middleware-service/cohortdefinition-stats/by-source-id/{}/by-team-project?team-project={} +COHORT_MIDDLEWARE_URL = http://cohort-middleware-service diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index e8db62711..23dda4a5a 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,10 +1,17 @@ controller: - parallelism: 3 + parallelism: 10 + namespaceParallelism: 5 metricsConfig: # -- Enables prometheus metrics server enabled: true servicePort: 9090 + resources: + requests: + memory: 8Gi + limits: + memory: 8Gi + podAnnotations: prometheus.io/scrape: "true" prometheus.io/path: /metrics @@ -22,11 +29,11 @@ controller: } ] } - } + } resourceRateLimit: limit: 40 - burst: 4 + burst: 4 # -- enable persistence using postgres persistence: @@ -43,7 +50,7 @@ controller: port: 5432 database: GEN3_ARGO_DB_NAME tableName: argo_workflows - # # the database secrets must be in the same namespace of the controller + # # the database secrets must be in the same namespace of the controller userNameSecret: name: argo-db-creds key: db_username @@ -52,7 +59,7 @@ controller: key: db_password nodeStatusOffLoad: true - workflowDefaults: + workflowDefaults: spec: archiveLogs: true @@ -71,11 +78,16 @@ server: baseHref: "/argo/" # -- Extra arguments to provide to the Argo server binary, such as for disabling authentication. extraArgs: - - --auth-mode=server - - --auth-mode=client + - --auth-mode=server + - --auth-mode=client extraEnv: - - name: ARGO_HTTP1 - value: "true" + - name: ARGO_HTTP1 + value: "true" + resources: + requests: + memory: 8Gi + limits: + memory: 8Gi # -- Influences the creation of the ConfigMap for the workflow-controller itself. useDefaultArtifactRepo: true diff --git a/kube/services/argo/workflows/fence-usersync-cron.yaml b/kube/services/argo/workflows/fence-usersync-cron.yaml new file mode 100644 index 000000000..4723ce10f --- /dev/null +++ b/kube/services/argo/workflows/fence-usersync-cron.yaml @@ -0,0 +1,10 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + name: fence-usersync-cron +spec: + serviceAccountName: argo + schedule: "*/30 * * * *" + workflowSpec: + workflowTemplateRef: + name: fence-usersync-workflow diff --git a/kube/services/argo/workflows/fence-usersync-wf.yaml b/kube/services/argo/workflows/fence-usersync-wf.yaml new file mode 100644 index 000000000..d7f56a2ce --- /dev/null +++ b/kube/services/argo/workflows/fence-usersync-wf.yaml @@ -0,0 +1,257 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: fence-usersync-workflow +spec: + volumeClaimTemplates: + - metadata: + name: shared-data + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi + serviceAccountName: argo + entrypoint: fence-usersync + arguments: + parameters: + - name: ADD_DBGAP + value: "false" + - name: ONLY_DBGAP + value: "false" + templates: + - name: fence-usersync + steps: + - - name: wait-for-fence + template: wait-for-fence + - - name: awshelper + template: awshelper + - - name: usersyncer + template: usersyncer + + - name: wait-for-fence + container: + image: curlimages/curl:latest + command: ["/bin/sh","-c"] + args: ["while [ $(curl -sw '%{http_code}' http://fence-service -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for fence...'; done"] + + - name: awshelper + container: + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + securityContext: + runAsUser: 0 + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: hostname + - name: userYamlS3Path + valueFrom: + configMapKeyRef: + name: manifest-global + key: useryaml_s3path + - name: slackWebHook + value: None + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + - | + GEN3_HOME=/home/ubuntu/cloud-automation + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + + if [ "${userYamlS3Path}" = 'none' ]; then + # echo "using local user.yaml" + # cp /var/www/fence/user.yaml /mnt/shared/user.yaml + echo "s3 yaml not provided - bailing out" + exit 1 + else + # ----------------- + echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/user.yaml" + n=0 + until [ $n -ge 5 ]; do + echo "Download attempt $n" + aws s3 cp "${userYamlS3Path}" /mnt/shared/user.yaml && break + n=$[$n+1] + sleep 2 + done + fi + if [[ ! -f /mnt/shared/user.yaml ]]; then + echo "awshelper failed to retrieve /mnt/shared/user.yaml" + exit 1 + fi + #----------- + echo "awshelper updating etl configmap" + if ! gen3 gitops etl-convert < /mnt/shared/user.yaml > /tmp/user.yaml; then + echo "ERROR: failed to generate ETL config" + exit 1 + fi + # kubectl delete configmap fence > /dev/null 2>&1 + # kubectl create configmap fence --from-file=/tmp/user.yaml + if [ "${slackWebHook}" != 'None' ]; then + curl -X POST --data-urlencode "payload={\"text\": \"AWSHelper: Syncing users on ${gen3Env}\"}" "${slackWebHook}" + fi + echo "Helper exit ok" + + - name: usersyncer + volumes: + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + - name: config-volume + secret: + secretName: "fence-config" + - name: creds-volume + secret: + secretName: "fence-creds" + - name: fence-google-app-creds-secret-volume + secret: + secretName: "fence-google-app-creds-secret" + - name: fence-google-storage-creds-secret-volume + secret: + secretName: "fence-google-storage-creds-secret" + - name: fence-ssh-keys + secret: + secretName: "fence-ssh-keys" + defaultMode: 0400 + - name: fence-sshconfig + configMap: + name: "fence-sshconfig" + - name: projects + configMap: + name: "projects" + container: + image: quay.io/cdis/fence:master + imagePullPolicy: Always + env: + - name: PYTHONPATH + value: /var/www/fence + - name: SYNC_FROM_DBGAP + valueFrom: + configMapKeyRef: + name: manifest-global + key: sync_from_dbgap + - name: ADD_DBGAP + value: "{{workflow.parameters.ADD_DBGAP}}" + - name: ONLY_DBGAP + value: "{{workflow.parameters.ONLY_DBGAP}}" + - name: SLACK_SEND_DBGAP + valueFrom: + configMapKeyRef: + name: manifest-global + key: slack_send_dbgap + optional: true + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + optional: true + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: hostname + - name: FENCE_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-fence + key: fence-config-public.yaml + optional: true + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config.yaml" + subPath: fence-config.yaml + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/fence/creds.json" + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + - name: "fence-google-app-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_app_creds_secret.json" + subPath: fence_google_app_creds_secret.json + - name: "fence-google-storage-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" + subPath: fence_google_storage_creds_secret.json + - name: "fence-ssh-keys" + mountPath: "/root/.ssh/id_rsa" + subPath: "id_rsa" + - name: "fence-ssh-keys" + mountPath: "/root/.ssh/id_rsa.pub" + subPath: "id_rsa.pub" + - name: "fence-sshconfig" + mountPath: "/root/.ssh/config" + subPath: "config" + - name: "projects" + mountPath: "/var/www/fence/projects.yaml" + subPath: "projects.yaml" + command: ["/bin/bash"] + args: + - "-c" + # Script always succeeds if it runs (echo exits with 0) + - | + echo "${ADD_DBGAP}" + echo "${ONLY_DBGAP}" + echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" + python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml + echo 'options use-vc' >> /etc/resolv.conf + let count=0 + while [[ ! -f /mnt/shared/user.yaml && $count -lt 50 ]]; do + echo "fence container waiting for /mnt/shared/user.yaml"; + sleep 2 + let count=$count+1 + done + if [[ "$SYNC_FROM_DBGAP" != True && "$ADD_DBGAP" != "true" ]]; then + if [[ -f /mnt/shared/user.yaml ]]; then + echo "running fence-create" + time fence-create sync --arborist http://arborist-service --yaml /mnt/shared/user.yaml + else + echo "/mnt/shared/user.yaml did not appear within timeout :-(" + false # non-zero exit code + fi + exitcode=$? + else + output=$(mktemp "/tmp/fence-create-output_XXXXXX") + if [[ -f /mnt/shared/user.yaml && "$ONLY_DBGAP" != "true" ]]; then + echo "Running fence-create dbgap-sync with user.yaml - see $output" + time fence-create sync --arborist http://arborist-service --sync_from_dbgap "True" --projects /var/www/fence/projects.yaml --yaml /mnt/shared/user.yaml 2>&1 | tee "$output" + else + echo "Running fence-create dbgap-sync without user.yaml - see $output" + time fence-create sync --arborist http://arborist-service --sync_from_dbgap "True" --projects /var/www/fence/projects.yaml 2>&1 | tee "$output" + fi + exitcode="${PIPESTATUS[0]}" + echo "$output" + # Echo what files we are seeing on dbgap ftp to Slack + # We only do this step every 12 hours and not on weekends to reduce noise + if [[ -n "$SLACK_SEND_DBGAP" && "$SLACK_SEND_DBGAP" = True ]]; then + files=$(grep "Reading file" "$output") + let hour=$(date -u +10#%H) + let dow=$(date -u +10#%u) + if ! (( hour % 12 )) && (( dow < 6 )); then + if [ "${slackWebHook}" != 'None' ]; then + curl -X POST --data-urlencode "payload={\"text\": \"FenceHelper: \n\`\`\`\n${files}\n\`\`\`\"}" "${slackWebHook}" + fi + fi + fi + fi + if [[ $exitcode -ne 0 && "${slackWebHook}" != 'None' ]]; then + emptyfile=$(grep "EnvironmentError:" "$output") + if [ ! -z "$emptyfile" ]; then + curl -X POST --data-urlencode "payload={\"text\": \"JOBSKIPPED: User sync skipped on ${gen3Env} ${emptyfile}\"}" "${slackWebHook}"; + else + curl -X POST --data-urlencode "payload={\"text\": \"JOBFAIL: User sync failed on ${gen3Env}\"}" "${slackWebHook}" + fi + fi + echo "Exit code: $exitcode" + exit "$exitcode" \ No newline at end of file diff --git a/kube/services/datadog/datadog-application.yaml b/kube/services/datadog/datadog-application.yaml index f5a8925e1..19e0e1d86 100644 --- a/kube/services/datadog/datadog-application.yaml +++ b/kube/services/datadog/datadog-application.yaml @@ -5,14 +5,17 @@ metadata: namespace: argocd spec: project: default - source: - chart: datadog + sources: + - chart: datadog repoURL: 'https://helm.datadoghq.com' targetRevision: 3.6.4 helm: - valueFiles: - - https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/kube/services/datadog/values.yaml + valueFiles: + - $values/kube/services/datadog/values.yaml releaseName: datadog + - repoURL: 'https://github.com/uc-cdis/cloud-automation.git' + targetRevision: master + ref: values destination: server: 'https://kubernetes.default.svc' namespace: datadog @@ -21,4 +24,4 @@ spec: prune: true selfHeal: true syncOptions: - - CreateNamespace=true \ No newline at end of file + - CreateNamespace=true diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index c613bd079..fc0bbab8b 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -20,7 +20,18 @@ datadog: # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". - apiKeyExistingSecret: "datadog-agent" + apiKeyExistingSecret: "ddgov-apikey" + + # datadog.site -- The site of the Datadog intake to send Agent data to. + # (documentation: https://docs.datadoghq.com/getting_started/site/) + + ## Set to 'datadoghq.com' to send data to the US1 site (default). + ## Set to 'datadoghq.eu' to send data to the EU site. + ## Set to 'us3.datadoghq.com' to send data to the US3 site. + ## Set to 'us5.datadoghq.com' to send data to the US5 site. + ## Set to 'ddog-gov.com' to send data to the US1-FED site. + ## Set to 'ap1.datadoghq.com' to send data to the AP1 site. + site: ddog-gov.com # datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment ## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics @@ -59,11 +70,13 @@ datadog: apm: # datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe) ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ - socketEnabled: true + socketEnabled: false # datadog.apm.portEnabled -- Enable APM over TCP communication (port 8126 by default) ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ - portEnabled: true + portEnabled: false + + enabled: false # datadog.apm.port -- Override the trace Agent port ## Note: Make sure your client is sending to the same UDP port. @@ -80,15 +93,15 @@ datadog: # datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent ## Requires processAgent.enabled to be set to true to have any effect - processCollection: true + processCollection: false # datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes ## Requires processAgent.enabled and processAgent.processCollection to be set to true to have any effect ## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing - stripProcessArguments: true + stripProcessArguments: false # datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations - processDiscovery: true + processDiscovery: false ## Enable systemProbe agent and provide custom configs systemProbe: @@ -222,7 +235,7 @@ datadog: # timeout: 5 containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export* name:job-task" - + containerExclude: "kube_namespace:logging kube_namespace:kube-system kube_namespace:kubecost kube_namespace:argo kube_namespace:cortex-xdr kube_namespace:monitoring kube_namespace:datadog" ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements ## the external metrics API so you can autoscale HPAs based on datadog metrics @@ -327,4 +340,3 @@ agents: # agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true serviceAccountAnnotations: {} - diff --git a/kube/services/fenceshib/fenceshib-configmap.yaml b/kube/services/fenceshib/fenceshib-configmap.yaml index 2412518c0..b8e55243d 100644 --- a/kube/services/fenceshib/fenceshib-configmap.yaml +++ b/kube/services/fenceshib/fenceshib-configmap.yaml @@ -231,48 +231,48 @@ data: few exceptions for newer attributes where the name is the same for both versions. You will usually want to uncomment or map the names for both SAML versions as a unit. --> - + - + - + - + - + - + - + - + @@ -286,7 +286,7 @@ data: - + @@ -416,47 +416,51 @@ data: - MIIGeDCCBWCgAwIBAgITKwAE3xjJ0BmsXYl8hwAAAATfGDANBgkqhkiG9w0BAQsF - ADBOMRUwEwYKCZImiZPyLGQBGRYFTE9DQUwxHDAaBgoJkiaJk/IsZAEZFgxESEhT - U0VDVVJJVFkxFzAVBgNVBAMTDk5JSC1EUEtJLUNBLTFBMB4XDTIxMDMyMzEwMjMz - MloXDTIzMDMyMzEwMjMzMlowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1EMREw - DwYDVQQHEwhCZXRoZXNkYTEMMAoGA1UEChMDSEhTMQwwCgYDVQQLEwNOSUgxJTAj - BgNVBAMTHHdhbXNpZ25pbmdmZWRlcmF0aW9uLm5paC5nb3YwggEiMA0GCSqGSIb3 - DQEBAQUAA4IBDwAwggEKAoIBAQDrng8ItLe/PdN7+GT50g0xd4Kc5zVLk5JhHV/M - C0ICo3ulYpNnK8f0vGYvKXhG9B4gyYjjAVgY8dHL1Yi9Vw4OCMHiAhT80qidFhah - xdcz8EaKWueqlMV+SZ8/6luahSmYYjKHAxICMg253gHsG6A64pWBsf58fzOYeEV/ - HIItkthIJ7Rh71gXeZwmcir3fAve1sQXrgXsRb265yFQaxLrRI+QA7k+Tiemlt4+ - 7wBOXdROm0kxGJT6u6+IG8g2Qdbc1JWaAmwROGCByREQzfMNUVpXCXJHhKSrHype - z8Z0o4p2sLXyOysbBAmNoShMhvaaPlsrJt7PyDN5uj6KaXNNAgMBAAGjggMrMIID - JzAdBgNVHQ4EFgQUb/4wTaSXJ6P1tAmI8mWJhMv1VHowHwYDVR0jBBgwFoAUeWw4 - jBnSyRkHcaYQ+YnwrdCDBZMwggESBgNVHR8EggEJMIIBBTCCAQGggf6ggfuGgcFs - ZGFwOi8vL0NOPU5JSC1EUEtJLUNBLTFBLENOPU5JSERQS0lDQVNWQyxDTj1DRFAs - Q049UHVibGljJTIwS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmln - dXJhdGlvbixEQz1ESEhTU0VDVVJJVFksREM9TE9DQUw/Y2VydGlmaWNhdGVSZXZv - Y2F0aW9uTGlzdD9iYXNlP29iamVjdENsYXNzPWNSTERpc3RyaWJ1dGlvblBvaW50 - hjVodHRwOi8vTklIRFBLSUNSTC5OSUguR09WL0NlcnREYXRhL05JSC1EUEtJLUNB - LTFBLmNybDCCATkGCCsGAQUFBwEBBIIBKzCCAScwgbQGCCsGAQUFBzAChoGnbGRh - cDovLy9DTj1OSUgtRFBLSS1DQS0xQSxDTj1BSUEsQ049UHVibGljJTIwS2V5JTIw - U2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1ESEhTU0VD - VVJJVFksREM9TE9DQUw/Y0FDZXJ0aWZpY2F0ZT9iYXNlP29iamVjdENsYXNzPWNl - cnRpZmljYXRpb25BdXRob3JpdHkwQQYIKwYBBQUHMAKGNWh0dHA6Ly9OSUhEUEtJ - Q1JMLk5JSC5HT1YvQ2VydERhdGEvTklILURQS0ktQ0EtMUEuY3J0MCsGCCsGAQUF - BzABhh9odHRwOi8vTklIRFBLSU9DU1AuTklILkdPVi9vY3NwMAsGA1UdDwQEAwIF - oDA9BgkrBgEEAYI3FQcEMDAuBiYrBgEEAYI3FQiHscIohpH8F4b5jwiG7rxzgbud - JR2F39lChY/gIQIBZQIBJDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEw - JwYJKwYBBAGCNxUKBBowGDAKBggrBgEFBQcDAjAKBggrBgEFBQcDATANBgkqhkiG - 9w0BAQsFAAOCAQEAkgyJY5Pdyz7hF83hu9BsijKHOdMWe8fDyN7GsDR1O0URBuJW - oK7FsemmITwMCiDhH+NDkrRWM27EQhuv4w4yIUIFVqPeJS+Ff3gKyqB/VNcrDbfc - 1RU7Q0qyxwpItm/cEUTTTnfNppf/O6wn/FUbpvPbHMNukqhjtbiYJrmKcO1U0lEu - i7FlnPW6rRmEbhp/bChVJMkxw8sBH4K3Vrx9c15nPuBgv4E1cFLe1rwrt3wEeRlU - OaWMTbLwYBaBo2BC3iDHzWioSl4OtzItEkT5XxNOhViuoty09Tu5zd7byqiV7To3 - YVc+Yi/VBubgB+osvPXPAv0AQCLo88dO7MBWQg== + MIIGrDCCBZSgAwIBAgITKwAL5UokKuFiZ7VPlQAAAAvlSjANBgkqhkiG9w0B + AQsFADBOMRUwEwYKCZImiZPyLGQBGRYFTE9DQUwxHDAaBgoJkiaJk/IsZAEZ + FgxESEhTU0VDVVJJVFkxFzAVBgNVBAMTDk5JSC1EUEtJLUNBLTFBMB4XDTIy + MTIwNjE2NTUzNloXDTI0MTIwNTE2NTUzNlowgaMxCzAJBgNVBAYTAlVTMREw + DwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMIQmV0aGVzZGExDDAKBgNVBAoT + A05JSDEMMAoGA1UECxMDQ0lUMSUwIwYDVQQDExx3YW1zaWduaW5nZmVkZXJh + dGlvbi5uaWguZ292MSswKQYJKoZIhvcNAQkBFhxuaWhsb2dpbnN1cHBvcnRA + bWFpbC5uaWguZ292MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA + o3aHcoq0SAof+GXCl6aZOw9w8CrWTSxz3hxEvG2RaJ4Bm0+UQEcQHArCiQ+Y + Wjmx8eORRwOblQKmcozpQAOxNRu7fbJn8msdryKdju+nBJg/gn0Ygn44EJEq + pZmBn+FBRgH/lADRdpLM8uO654i1x5Pr8TQtNMevGNot8oiacOZkB1A5N6+l + 4guxToA2ZuNhHRhwrpd1wIyq6sgY3J8XpWlx54HjDc8bZvia0bEhJns/qZpM + mAh5wvIP1I2JngqJ55mpl/btbIXX+uTn3tIomWre3KKjDKh9ZjUQom8VqTzp + oGYHSjTExuopsHnnVpC1HTW0QJoxFa5yR1f2fiUTZwIDAQABo4IDKzCCAycw + HQYDVR0OBBYEFMqGnTB0W0rFy8tD2y6JnApAzRCyMB8GA1UdIwQYMBaAFHls + OIwZ0skZB3GmEPmJ8K3QgwWTMIIBEgYDVR0fBIIBCTCCAQUwggEBoIH+oIH7 + hoHBbGRhcDovLy9DTj1OSUgtRFBLSS1DQS0xQSxDTj1OSUhEUEtJQ0FTVkMs + Q049Q0RQLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2Vz + LENOPUNvbmZpZ3VyYXRpb24sREM9REhIU1NFQ1VSSVRZLERDPUxPQ0FMP2Nl + cnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q/YmFzZT9vYmplY3RDbGFzcz1jUkxE + aXN0cmlidXRpb25Qb2ludIY1aHR0cDovL05JSERQS0lDUkwuTklILkdPVi9D + ZXJ0RGF0YS9OSUgtRFBLSS1DQS0xQS5jcmwwggE5BggrBgEFBQcBAQSCASsw + ggEnMIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049TklILURQS0ktQ0EtMUEs + Q049QUlBLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2Vz + LENOPUNvbmZpZ3VyYXRpb24sREM9REhIU1NFQ1VSSVRZLERDPUxPQ0FMP2NB + Q2VydGlmaWNhdGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0 + aG9yaXR5MEEGCCsGAQUFBzAChjVodHRwOi8vTklIRFBLSUNSTC5OSUguR09W + L0NlcnREYXRhL05JSC1EUEtJLUNBLTFBLmNydDArBggrBgEFBQcwAYYfaHR0 + cDovL05JSERQS0lPQ1NQLk5JSC5HT1Yvb2NzcDALBgNVHQ8EBAMCBaAwPQYJ + KwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIh7HCKIaR/BeG+Y8Ihu68c4G7nSUd + gZOnCYKOiSECAWQCAUwwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwDQYJ + KoZIhvcNAQELBQADggEBAGxvrAxX3RUmFXeUa1UewCWfzWCnI3wTMKkqvmI2 + CySFEOniXNXC/hhu0i000QD9mS527u+lGqgN6eaUaEaSDXMszYR753whJ1Wf + xJ50zji2mvUWDyzdRbcvxbVfYe6h6+TzQl0gd8z1DjAxkUWydv9aAFYHNiIY + BbhPqvrlOT+oV8CYI8ghEg7qyxo1mso99aVGCbnBA+6IC+jt8lvwQYFISW8J + lxJbz5P9fyAbQFuMvcvSkx1WWCCK+d3WsLzU2JETjmYNoID5skFaIfrq+rV1 + nBqQfCSKApojRaUMwn83IRcosSu0Y3dhpmxz2oDkOURbwOkuPJRgYnZRLBDn + e50= - + urn:oasis:names:tc:SAML:2.0:nameid-format:persistent - + diff --git a/kube/services/fluentd/gen3-1.15.3.conf b/kube/services/fluentd/gen3-1.15.3.conf new file mode 100644 index 000000000..d9b6bed5d --- /dev/null +++ b/kube/services/fluentd/gen3-1.15.3.conf @@ -0,0 +1,231 @@ +# +# Gen3 customization of fluent config. +# - tries to extract structure from gen3 service logs +# - includes the default conf at the bottom - just adds prefix rules +# +# Deploy by: +# - mount this file into the container at /fluentd/etc/gen3.conf +# - set environment variable FLUENTD_CONF=gen3.conf +# +# https://www.fluentd.org/guides/recipes/docker-logging +# https://docs.fluentd.org/v0.12/articles/config-file#introduction:-the-life-of-a-fluentd-event +# https://docs.fluentd.org/v1.0/articles/out_rewrite_tag_filter + + + + + + @type tail + @id in_tail_container_logs + path /var/log/containers/*.log + pos_file /var/log/fluentd-containers.log.pos + tag "#{ENV['FLUENT_CONTAINER_TAIL_TAG'] || 'kubernetes.*'}" + exclude_path "#{ENV['FLUENT_CONTAINER_TAIL_EXCLUDE_PATH'] || use_default}" + read_from_head true + + @type "#{ENV['FLUENT_CONTAINER_TAIL_PARSER_TYPE'] || 'json'}" + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + + + @type tail + path /var/log/messages + pos_file /var/log/host-messages.log.pos + + @type syslog + + tag host.messages + + + + + @type tail + path /var/log/secure + pos_file /var/log/host-secure.log.pos + + @type syslog + + tag host.secure + + + + @type tail + @id in_tail_docker + path /var/log/docker.log + pos_file /var/log/fluentd-docker.log.pos + tag docker + + @type regexp + expression /^time="(? + + + + + @type tail + @id in_tail_kubelet + multiline_flush_interval 5s + path /var/log/kubelet.log + pos_file /var/log/fluentd-kubelet.log.pos + tag kubelet + + @type kubernetes + + + + + + + @type kubernetes_metadata + @id filter_kube_metadata + kubernetes_url "#{ENV['FLUENT_FILTER_KUBERNETES_URL'] || 'https://' + ENV.fetch('KUBERNETES_SERVICE_HOST') + ':' + ENV.fetch('KUBERNETES_SERVICE_PORT') + '/api'}" + verify_ssl "#{ENV['KUBERNETES_VERIFY_SSL'] || true}" + ca_file "#{ENV['KUBERNETES_CA_FILE']}" + skip_labels "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_LABELS'] || 'false'}" + skip_container_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_CONTAINER_METADATA'] || 'false'}" + skip_master_url "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_MASTER_URL'] || 'false'}" + skip_namespace_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_NAMESPACE_METADATA'] || 'false'}" + + + + @type null + + + + @type null + + + + @type rewrite_tag_filter + + key $._HOSTNAME + pattern ^(.+)$ + tag $1.docker + + + + + @type rewrite_tag_filter + + key $._HOSTNAME + pattern ^(.+)$ + tag $1.kubelet + + + + + @type rewrite_tag_filter + + key $.host + pattern ^(.+)$ + tag $1.messages + + + + + @type rewrite_tag_filter + + key $.host + pattern ^(.+)$ + tag $1.secure + + + + + @type rewrite_tag_filter + + # json structured log - consider adoption a standard json schema: + # https://github.com/timberio/log-event-json-schema + key message + pattern /^\{\s*"gen3log":/ + tag kubernetes.gen3.json.${tag} + + + # combined log format - default Apache and nginx structure + # https://httpd.apache.org/docs/1.3/logs.html#combined + key message + pattern /^(((\d+\.\d+\.\d+\.\d+)|-)\s+){2}\S+\s+\[\d\d?\// + tag kubernetes.gen3.combined.${tag} + + + # unstructured log line + key message + pattern /\S/ + tag kubernetes.gen3.raw.${tag} + + + + + + @type record_transformer + + log_type json + # This one doesn't work for whatever reason, if you do ${record["kubernetes"]} the whole blob would be added, but can't access subobjects + #container_name ${record["kubernetes"]["container_name"]} + + + + + @type record_transformer + + log_type combined + + + + + @type record_transformer + + log_type raw + + + + + @type rewrite_tag_filter + + key $.kubernetes.pod_name + pattern ^(.+)$ + tag "#{Time.now.strftime('%Y-%m-%d')}.$1" + +# +# key $.kubernetes +# pattern ^(.+)$ +# tag $1.container_name +# + + +# +# @type rewrite_tag_filter +# +# key $.kubernetes.container_name +# pattern ^(.+)$ + #tag $1.${tag} +# tag ${tag}.$1 +# +# + +# TODO: +# * python stack traces: "Traceback (most recent call last):"" +# https://docs.fluentd.org/v0.12/articles/parser_multiline#formatn +# +# Idea: add `visitor` cookie to revproxy ... + + + + @type cloudwatch_logs + @id out_cloudwatch_logs + log_group_name "#{ENV['LOG_GROUP_NAME']}" + auto_create_stream true + use_tag_as_stream true + retention_in_days "#{ENV['RETENTION_IN_DAYS'] || 'nil'}" + json_handler yajl # To avoid UndefinedConversionError + log_rejected_request "#{ENV['LOG_REJECTED_REQUEST']}" # Log rejected request for missing parts + + + +#@include fluent.conf +#@include conf.d/*.conf diff --git a/kube/services/gen3-discovery-ai/README.md b/kube/services/gen3-discovery-ai/README.md new file mode 100644 index 000000000..4c20678e0 --- /dev/null +++ b/kube/services/gen3-discovery-ai/README.md @@ -0,0 +1,42 @@ +# Gen3 Discovery AI Configuration + +Expects data in a `gen3-discovery-ai` folder relative to +where the `manifest.json` is. + +Basic setup: + +`{{dir where manifest.json is}}/gen3-discovery-ai/knowledge/` + +- `tsvs` folder + - tsvs with topic_name at beginning of file +- `markdown` folder + - {{topic_name_1}} + - markdown file(s) + - {{topic_name_2}} + - markdown file(s) + +The `kube-setup-gen3-discovery-ai` script syncs the above `/knowledge` folder to +an S3 bucket. The service configuration then pulls from the S3 bucket and runs load commands +to get the data into chromadb. + +> Note: See the `gen3-discovery-ai` service repo docs and README for more details on data load capabilities. + +Check the `gen3-discovery-ai-deploy.yaml` for what commands are being run in the automation. + +Expects secrets setup in `g3auto/gen3-discovery-ai` folder + - `credentials.json`: Google service account key if using a topic with Google Vertex AI + - `env`: .env file contents for service configuration (see service repo for a default one) + +## Populating Disk for In-Memory Vectordb Chromadb + +In order to setup pre-configured topics, we need to load a bunch of data +into Chromadb (which is an in-mem vectordb with an option to persist to disk). + +To load topics consistently, we setup an S3 bucket to house the persisted +data for the vectordb. + +### Getting data from S3 in mem + +We specify a path for Chromadb to use for persisted data and when it sees +data there, it loads it in. So the deployment automation: 1. aws syncs the bucket +and then 2. calls a script to load the files into the in-mem vectorstore from there. diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml new file mode 100644 index 000000000..dcfe03248 --- /dev/null +++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml @@ -0,0 +1,181 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gen3-discovery-ai-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: gen3-discovery-ai + release: production + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: gen3-discovery-ai + release: production + GEN3_DATE_LABEL + spec: + serviceAccountName: gen3-discovery-ai-sa + volumes: + - name: gen3-discovery-ai-g3auto-volume + secret: + secretName: gen3-discovery-ai-g3auto + - name: gen3-discovery-ai-knowledge-library-volume + emptyDir: {} + initContainers: + # chromadb's persisted disk support requires the ability to write. We don't technically need this ability + # since we're populating the entirety of the database from configured files (no live updates). + # + # Solution: utilize emptyDir as a writable space. + # + # Procedure: in init containers, copy files from s3 to writable + # temporary space in emptyDir, use files from writable space + # to load into knowledge libary, move final knowledge library + # files into top-level emptyDir and make available in final container + - name: gen3-discovery-ai-aws-init + GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + memory: 512Mi + command: ["/bin/bash"] + args: + - "-c" + - | + bucketName=$(grep -o "\"bucket\": *\"[^\"]*\"" /gen3discoveryai/storage_config.json | awk -F'"' '{print $4}') + echo BUCKET: "$bucketName" + echo + echo BEFORE /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + echo + echo syncing from s3 + aws s3 sync "s3://${bucketName}" "/gen3discoveryai/knowledge/tmp" + echo + echo AFTER /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + - name: gen3-discovery-ai-knowledge-init + GEN3_GEN3-DISCOVERY-AI_IMAGE + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: ANONYMIZED_TELEMETRY + value: "False" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /gen3discoveryai/credentials.json + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + memory: 512Mi + command: ["/bin/bash"] + args: + - "-c" + - | + echo + echo BEFORE /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + echo running load_into_knowledge_store.py + poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py tsvs /gen3discoveryai/knowledge/tmp/tsvs + + if [ -d "/gen3discoveryai/knowledge/tmp/markdown" ]; then + for dir in "/gen3discoveryai/knowledge/tmp/markdown"/*; do + if [ -d "$dir" ]; then + dir_name=$(basename "$dir") + + echo "Processing directory: $dir_name. Full path: $dir" + poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py markdown --topic $dir_name $dir + fi + done + else + echo "Not syncing markdown, directory not found: /gen3discoveryai/knowledge/tmp/markdown" + fi + + rm -r /gen3discoveryai/knowledge/tmp/ + echo + echo AFTER /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + containers: + - name: gen3-discovery-ai + GEN3_GEN3-DISCOVERY-AI_IMAGE + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: ANONYMIZED_TELEMETRY + value: "False" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /gen3discoveryai/credentials.json + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + # NOTE: If the configured data for the knowledge library (vector database) is large, you may need to bump this + memory: 512Mi diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml new file mode 100644 index 000000000..b4734c3b8 --- /dev/null +++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml @@ -0,0 +1,21 @@ +kind: Service +apiVersion: v1 +metadata: + name: gen3-discovery-ai-service +spec: + selector: + app: gen3-discovery-ai + release: production + ports: + - protocol: TCP + port: 80 + targetPort: 8089 + name: http + nodePort: null + - protocol: TCP + port: 443 + targetPort: 443 + name: https + nodePort: null + type: ClusterIP + diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index 01a8905de..c3e8d121c 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -155,6 +155,6 @@ spec: resources: requests: cpu: 100m - memory: 128Mi + memory: 256Mi limits: - memory: 1200Mi + memory: 2000Mi diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml index 3ceacf608..1db08e8ef 100644 --- a/kube/services/ingress/ingress.yaml +++ b/kube/services/ingress/ingress.yaml @@ -11,7 +11,7 @@ metadata: alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600 alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' - alb.ingress.kubernetes.io/ssl-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" + alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04 spec: ingressClassName: alb rules: diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml index c0eae2040..954e996f2 100644 --- a/kube/services/jenkins/jenkins-deploy.yaml +++ b/kube/services/jenkins/jenkins-deploy.yaml @@ -38,6 +38,10 @@ spec: operator: In values: - on-demand + - key: topology.kubernetes.io/zone + operator: In + values: + - us-east-1a serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml new file mode 100644 index 000000000..7f4e58109 --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + name: jenkins-agent-service + name: jenkins-agent + namespace: default +spec: + ports: + - name: slavelistener + port: 50000 + protocol: TCP + targetPort: 50000 + selector: + app: jenkins + sessionAffinity: None + type: ClusterIP diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml new file mode 100644 index 000000000..3dea38a5c --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml @@ -0,0 +1,149 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jenkins-ci-worker-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: jenkins-ci-worker + template: + metadata: + labels: + app: jenkins-ci-worker + # for network policy + netnolimit: "yes" + annotations: + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + serviceAccountName: jenkins-service + securityContext: + runAsUser: 1000 + fsGroup: 1000 + initContainers: + - args: + - -c + - | + # fix permissions for /var/run/docker.sock + chmod 666 /var/run/docker.sock + echo "done" + command: + - /bin/bash + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + name: awshelper + resources: {} + securityContext: + allowPrivilegeEscalation: false + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/docker.sock + name: dockersock + containers: + # + # See for details on running docker in a pod: + # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b + # + - name: jenkins-worker + image: "quay.io/cdis/gen3-ci-worker:master" + ports: + - containerPort: 8080 + env: + - name: JENKINS_URL + value: "https://jenkins2.planx-pla.net" + - name: JENKINS_SECRET + valueFrom: + secretKeyRef: + name: jenkins-ci-worker-g3auto + key: jenkins-jnlp-agent-secret + - name: JENKINS_AGENT_NAME + value: "gen3-ci-worker" + - name: JENKINS_TUNNEL + value: "jenkins-agent:50000" + - name: AWS_DEFAULT_REGION + value: us-east-1 + - name: JAVA_OPTS + value: "-Xmx3072m" + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_secret_access_key + - name: GOOGLE_EMAIL_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: email + - name: GOOGLE_PASSWORD_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: password + - name: GOOGLE_EMAIL_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: email + - name: GOOGLE_PASSWORD_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: password + - name: GOOGLE_APP_CREDS_JSON + valueFrom: + secretKeyRef: + name: jenkins-g3auto + key: google_app_creds.json + resources: + limits: + cpu: 0.9 + memory: 4096Mi + ephemeral-storage: 500Mi + imagePullPolicy: Always + volumeMounts: + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.crt" + subPath: "service.crt" + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.key" + subPath: "service.key" + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + - name: dockersock + mountPath: "/var/run/docker.sock" + imagePullPolicy: Always + volumes: + - name: cert-volume + secret: + secretName: "cert-jenkins-service" + - name: ca-volume + secret: + secretName: "service-ca" + - name: dockersock + hostPath: + path: /var/run/docker.sock diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml new file mode 100644 index 000000000..047e4e966 --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml @@ -0,0 +1,12 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: datadir-jenkins-ci + annotations: + volume.beta.kubernetes.io/storage-class: gp2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 200Gi diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml index ee838bae6..08365f811 100644 --- a/kube/services/jenkins2/jenkins2-deploy.yaml +++ b/kube/services/jenkins2/jenkins2-deploy.yaml @@ -48,7 +48,7 @@ spec: # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b # - name: jenkins - GEN3_JENKINS_IMAGE + GEN3_JENKINS2_IMAGE ports: - containerPort: 8080 name: http diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml index ecc83335c..f6be4dd23 100644 --- a/kube/services/jobs/cedar-ingestion-job.yaml +++ b/kube/services/jobs/cedar-ingestion-job.yaml @@ -1,19 +1,22 @@ # # run with: -# gen3 job run cedar-ingestion \ -# SUBMISSION_USER $submission_user \ -# CEDAR_DIRECTORY_ID $cedar_directory_id \ -# -# SUBMISSION_USER(optional) -# e-mail of user-account to submit the data to MDS, must have MDS admin and CEDAR polices granted. Default: "cdis.autotest@gmail.com" +# gen3 job run cedar-ingestion [CEDAR_DIRECTORY_ID $cedar_directory_id] # # CEDAR_DIRECTORY_ID -# ID of CEDAR directory where instances will be pulled from, only needs its UUID part. For example: "123e4567-e89b-12d3-a456-426614174000" +# The directory id will be read from 'directory_id.txt' in the +# 'cedar-g3auto' secret. +# You can override the secret value with an optional command line argument. +# # The deployed CEDAR wrapper services must be able to read from this directory. # -# Example -# gen3 job run cedar-ingestion CEDAR_DIRECTORY_ID 123e4567-e89b-12d3-a456-426614174000 SUBMISSION_USER cdis.autotest@gmail.com +# ACCESS TOKENS +# Access tokens will be generated for an existing fence-client, cedar_ingest_client. +# The client_id and client_secret will be read from +# 'cedar_client_credentials.json' in the 'cedar-g3auto' secret. +# +# The fence-client must have MDS admin and CEDAR polices granted. # + apiVersion: batch/v1 kind: Job metadata: @@ -44,36 +47,13 @@ spec: - ONDEMAND serviceAccountName: useryaml-job volumes: - - name: yaml-merge - configMap: - name: "fence-yaml-merge" - name: shared-data emptyDir: {} -# ----------------------------------------------------------------------------- -# DEPRECATED! Remove when all commons are no longer using local_settings.py -# for fence. -# ----------------------------------------------------------------------------- - - name: old-config-volume - secret: - secretName: "fence-secret" - - name: creds-volume - secret: - secretName: "fence-creds" - - name: config-helper - configMap: - name: config-helper - - name: json-secret-volume + - name: cedar-client-volume-g3auto secret: - secretName: "fence-json-secret" -# ----------------------------------------------------------------------------- - - name: config-volume - secret: - secretName: "fence-config" - - name: fence-jwt-keys - secret: - secretName: "fence-jwt-keys" - containers: - - name: awshelper + secretName: cedar-g3auto # the secret name in kube + initContainers: + - name: cedar image: quay.io/cdis/awshelper:master imagePullPolicy: Always ports: @@ -84,10 +64,18 @@ spec: configMapKeyRef: name: global key: hostname - - name: SUBMISSION_USER - GEN3_SUBMISSION_USER|-value: "cdis.autotest@gmail.com"-| - name: CEDAR_DIRECTORY_ID GEN3_CEDAR_DIRECTORY_ID|-value: ""-| + - name: CEDAR_DIRECTORY_ID_SECRET + valueFrom: + secretKeyRef: + name: cedar-g3auto + key: "directory_id.txt" + - name: CEDAR_CLIENT_CREDENTIALS + valueFrom: + secretKeyRef: + name: cedar-g3auto + key: "cedar_client_credentials.json" volumeMounts: - name: shared-data mountPath: /mnt/shared @@ -95,117 +83,75 @@ spec: limits: cpu: 1 memory: 5Gi + command: ["/bin/bash" ] args: - "-c" - | if [[ -z "$CEDAR_DIRECTORY_ID" ]]; then - echo -e "CEDAR_DIRECTORY_ID is required" 1>&2 - exit 1 + if [[ ! -z "$CEDAR_DIRECTORY_ID_SECRET" ]]; then + echo "CEDAR_DIRECTORY_ID is from g3auto secret" + export CEDAR_DIRECTORY_ID=$CEDAR_DIRECTORY_ID_SECRET + else + echo -e "ERROR: CEDAR_DIRECTORY_ID must be in secret or on command line" 1>&2 + exit 0 + fi + else + echo "CEDAR_DIRECTORY_ID is from command line parameter" + fi + + if [[ ! -z "$CEDAR_CLIENT_CREDENTIALS" ]]; then + export CEDAR_CLIENT_ID=$(echo $CEDAR_CLIENT_CREDENTIALS | jq -r .client_id) + export CEDAR_CLIENT_SECRET=$(echo $CEDAR_CLIENT_CREDENTIALS | jq -r .client_secret) + else + echo -e "Could not read cedar-client credentials" 1>&2 + exit 0 fi - let count=0 - while [[ ! -f /mnt/shared/access_token.txt && $count -lt 50 ]]; do - echo "Waiting for /mnt/shared/access_token.txt"; - sleep 2 - let count=$count+1 - done + pip install pydash export GEN3_HOME="$HOME/cloud-automation" - export ACCESS_TOKEN="$(cat /mnt/shared/access_token.txt)" - python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --access_token $ACCESS_TOKEN --directory $CEDAR_DIRECTORY_ID --hostname $HOSTNAME - echo "All done - exit status $?" - - name: fence - GEN3_FENCE_IMAGE - imagePullPolicy: Always - env: - - name: PYTHONPATH - value: /var/www/fence - - name: SUBMISSION_USER - GEN3_SUBMISSION_USER|-value: "cdis.autotest@gmail.com"-| - - name: TOKEN_EXPIRATION - value: "3600" - - name: FENCE_PUBLIC_CONFIG - valueFrom: - configMapKeyRef: - name: manifest-fence - key: fence-config-public.yaml - optional: true - volumeMounts: -# ----------------------------------------------------------------------------- -# DEPRECATED! Remove when all commons are no longer using local_settings.py -# for fence. -# ----------------------------------------------------------------------------- - - name: "old-config-volume" - readOnly: true - mountPath: "/var/www/fence/local_settings.py" - subPath: local_settings.py - - name: "creds-volume" - readOnly: true - mountPath: "/var/www/fence/creds.json" - subPath: creds.json - - name: "config-helper" - readOnly: true - mountPath: "/var/www/fence/config_helper.py" - subPath: config_helper.py - - name: "json-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_credentials.json" - subPath: fence_credentials.json -# ----------------------------------------------------------------------------- - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config-secret.yaml" - subPath: fence-config.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/jwt-keys.tar" - subPath: "jwt-keys.tar" - - name: shared-data - mountPath: /mnt/shared - command: ["/bin/bash" ] - args: + python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --directory $CEDAR_DIRECTORY_ID --cedar_client_id $CEDAR_CLIENT_ID --cedar_client_secret $CEDAR_CLIENT_SECRET --hostname $HOSTNAME + status=$? + if [[ $status -ne 0 ]]; then + echo "WARNING: non zero exit code: $status" + else + echo "All done - exit code: $status" + touch /mnt/shared/success + fi + containers: + - name: awshelper + env: + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + - name: gen3Env + valueFrom: + configMapKeyRef: + name: manifest-global + key: hostname + GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: - "-c" - | - echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" - python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml - if [ -f /fence/jwt-keys.tar ]; then - cd /fence - tar xvf jwt-keys.tar - if [ -d jwt-keys ]; then - mkdir -p keys - mv jwt-keys/* keys/ - fi + if [[ ! "$slackWebHook" =~ ^http ]]; then + echo "Slack webhook not set" + exit 0 fi - echo "generate access token" - echo "fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION" - tempFile="$(mktemp -p /tmp token.txt_XXXXXX)" - success=false - count=0 - sleepTime=10 - # retry loop - while [[ $count -lt 3 && $success == false ]]; do - if fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION > "$tempFile"; then - echo "fence-create success!" - tail -1 "$tempFile" > /mnt/shared/access_token.txt - # base64 --decode complains about invalid characters - don't know why - awk -F . '{ print $2 }' /mnt/shared/access_token.txt | base64 --decode 2> /dev/null - success=true - else - echo "fence-create failed!" - cat "$tempFile" - echo "sleep for $sleepTime, then retry" - sleep "$sleepTime" - let sleepTime=$sleepTime+$sleepTime - fi - let count=$count+1 - done - if [[ $success != true ]]; then - echo "Giving up on fence-create after $count retries - failed to create valid access token" + if ! [ -f /mnt/shared/success ]; then + success="FAILED" + color="ff0000" + else + success="SUCCESS" + color="2EB67D" fi - echo "" - echo "All Done - always succeed to avoid k8s retries" + echo "Sending ${success} message to slack..." + payload="{\"attachments\": [{\"fallback\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"color\": \"#${color}\",\"title\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"text\": \"Pod name: ${HOSTNAME}\",\"ts\": \"$(date +%s)\"}]}" + echo "Payload=${payload}" + curl -X POST --data-urlencode "payload=${payload}" "${slackWebHook}" restartPolicy: Never diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml index aef52c75c..1c2ad4284 100644 --- a/kube/services/jobs/distribute-licenses-job.yaml +++ b/kube/services/jobs/distribute-licenses-job.yaml @@ -48,10 +48,11 @@ spec: configMapKeyRef: name: manifest-hatchery key: "user-namespace" - - name: GEN3_LICENSE_SECRET_NAME - value: stata-workspace-gen3-license - - name: GEN3_LICENSE_KEY - value: licenseSecrets + - name: GEN3_STATA_LICENSE + valueFrom: + secretKeyRef: + name: stata-workspace-gen3-license-g3auto + key: "stata_license.txt" command: ["python"] args: - "-c" @@ -100,19 +101,10 @@ spec: used_licenses.sort() print(f"Licenses currently in use: {used_licenses}") - # The license keys should be stored in a kubernetes secret. + # The Gen3 Stata license strings should be stored in a kubernetes secret using g3auto. # The format of the secret is one license string per line. # The license strings are generated with 'stinit' using the information in a license PDF. - # The secret can be generated from a temporary file with a kubectl command, eg - # kubectl create secret generic GEN3_LICENSE_SECRET_NAME --from-file=GEN3_LICENSE_KEY=/path/to/file.lic - - # Get license from kubernetes secret - print("Ready to read secret") - secret_name = os.environ['GEN3_LICENSE_SECRET_NAME'] - secret_key = os.environ['GEN3_LICENSE_KEY'] - license_secrets = os.popen( - f"kubectl get secret {secret_name} --template={{{{.data.{secret_key}}}}} | base64 -d" - ).read() + license_secrets = os.environ['GEN3_STATA_LICENSE'] license_secrets = license_secrets.strip() licenses = license_secrets.split("\n") diff --git a/kube/services/jobs/ecr-access-job.yaml b/kube/services/jobs/ecr-access-job.yaml new file mode 100644 index 000000000..89bb49d6d --- /dev/null +++ b/kube/services/jobs/ecr-access-job.yaml @@ -0,0 +1,83 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: ecr-access +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + restartPolicy: Never + serviceAccountName: ecr-access-job-sa + securityContext: + fsGroup: 1000 + containers: + - name: awshelper + GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| + imagePullPolicy: Always + resources: + limits: + cpu: 0.5 + memory: 1Gi + env: + - name: SLACK_WEBHOOK + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + optional: true + - name: HOSTNAME + valueFrom: + configMapKeyRef: + name: global + key: hostname + - name: PAY_MODELS_DYNAMODB_TABLE + valueFrom: + configMapKeyRef: + name: manifest-hatchery + key: pay-models-dynamodb-table + optional: true + - name: ECR_ACCESS_JOB_ARN + valueFrom: + configMapKeyRef: + name: manifest-global + key: ecr-access-job-role-arn + optional: true + command: ["/bin/bash"] + args: + - "-c" + - | + cd cloud-automation/files/scripts/ + echo Installing requirements... + pip3 install -r ecr-access-job-requirements.txt + python3 ecr-access-job.py + exitcode=$? + + if [[ "${SLACK_WEBHOOK}" != 'None' ]]; then + if [[ $exitcode == 1 ]]; then + curl -X POST --data-urlencode "payload={\"text\": \"JOBFAIL: ECR access job on ${HOSTNAME}\"}" "${SLACK_WEBHOOK}" + else + curl -X POST --data-urlencode "payload={\"text\": \"SUCCESS: ECR access job on ${HOSTNAME}\"}" "${SLACK_WEBHOOK}" + fi + fi + + echo "Exit code: $exitcode" + exit "$exitcode" diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml index fa201c99a..6b9b887ec 100644 --- a/kube/services/jobs/etl-job.yaml +++ b/kube/services/jobs/etl-job.yaml @@ -2,6 +2,8 @@ apiVersion: batch/v1 kind: Job metadata: + annotations: + karpenter.sh/do-not-evict: "true" name: etl spec: backoffLimit: 0 diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml index 74d7fc9a4..93eaf7652 100644 --- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml +++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml @@ -1,5 +1,5 @@ --- -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: fence-cleanup-expired-ga4gh-info diff --git a/kube/services/jobs/psql-db-dump-va-testing-job.yaml b/kube/services/jobs/psql-db-dump-va-testing-job.yaml new file mode 100644 index 000000000..8a8037e16 --- /dev/null +++ b/kube/services/jobs/psql-db-dump-va-testing-job.yaml @@ -0,0 +1,80 @@ +--- +# NOTE: This job was created specifically to dump all the databases in va-testing, in preparation for a move to second cluster +# If you aren't doing that, this probably is not the job you're looking for +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-dump-va-testing +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: dbbackup-sa + containers: + - name: pgdump + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + command: ["/bin/bash"] + args: + - "-c" + - | + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + account_id=$(aws sts get-caller-identity --query "Account" --output text) + default_bucket_name="gen3-db-backups-${account_id}" + default_databases=("fence" "indexd" "sheepdog" "peregrine" "arborist" "argo" "atlas" "metadata" "ohdsi" "omop-data" "wts") + s3_dir="va-testing-$(date +"%Y-%m-%d-%H-%M-%S")" + databases=("${default_databases[@]}") + bucket_name=$default_bucket_name + + for database in "${databases[@]}"; do + gen3_log_info "Starting database backup for ${database}" + gen3 db backup "${database}" > "${database}.sql" + + if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then + gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql" + aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql" + + if [ $? -eq 0 ]; then + gen3_log_info "Successfully uploaded ${database}.sql to S3" + else + gen3_log_err "Failed to upload ${database}.sql to S3" + fi + gen3_log_info "Deleting temporary backup file ${database}.sql" + rm -f "${database}.sql" + else + gen3_log_err "Backup operation failed for ${database}" + rm -f "${database}.sql" + fi + done + sleep 600 + restartPolicy: Never diff --git a/kube/services/jobs/psql-db-prep-dump-job.yaml b/kube/services/jobs/psql-db-prep-dump-job.yaml new file mode 100644 index 000000000..86c513b78 --- /dev/null +++ b/kube/services/jobs/psql-db-prep-dump-job.yaml @@ -0,0 +1,79 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-prep-dump +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: dbbackup-sa + containers: + - name: pgdump + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + command: [ "/bin/bash" ] + args: + - "-c" + - | + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + account_id=$(aws sts get-caller-identity --query "Account" --output text) + default_bucket_name="gen3-db-backups-${account_id}" + default_databases=("indexd" "sheepdog" "metadata") + s3_dir="$(date +"%Y-%m-%d-%H-%M-%S")" + databases=("${default_databases[@]}") + bucket_name=$default_bucket_name + + for database in "${databases[@]}"; do + gen3_log_info "Starting database backup for ${database}" + gen3 db backup "${database}" > "${database}.sql" + + if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then + gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql" + aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql" + + if [ $? -eq 0 ]; then + gen3_log_info "Successfully uploaded ${database}.sql to S3" + else + gen3_log_err "Failed to upload ${database}.sql to S3" + fi + gen3_log_info "Deleting temporary backup file ${database}.sql" + rm -f "${database}.sql" + else + gen3_log_err "Backup operation failed for ${database}" + rm -f "${database}.sql" + fi + done + sleep 600 + restartPolicy: Never + diff --git a/kube/services/jobs/psql-db-prep-restore-job.yaml b/kube/services/jobs/psql-db-prep-restore-job.yaml new file mode 100644 index 000000000..710e6f4f1 --- /dev/null +++ b/kube/services/jobs/psql-db-prep-restore-job.yaml @@ -0,0 +1,90 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-prep-restore +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: dbbackup-sa + containers: + - name: pgrestore + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + command: [ "/bin/bash" ] + args: + - "-c" + - | + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + account_id=$(aws sts get-caller-identity --query "Account" --output text) + default_bucket_name="gen3-db-backups-${account_id}" + default_databases=("indexd" "sheepdog" "metadata") + backup_directories=$(aws s3 ls "s3://${default_bucket_name}/") + newest_directory=$(echo "$backup_directories" | awk '/PRE/ {if ($2 > max) max = $2} END {print max}') + databases=("${default_databases[@]}") + bucket_name=$default_bucket_name + namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + date_str=$(date -u +%y%m%d_%H%M%S) + gen3_log_info "Database backup location in S3: ${bucket_name}/${newest_directory}" + gen3_log_info "namespace: $namespace \n\n" + + for database in "${databases[@]}"; do + gen3_log_info "Downloading database backup file s3://${default_bucket_name}/${newest_directory}${database}.sql" + aws s3 cp "s3://${default_bucket_name}/${newest_directory}${database}.sql" "${database}.sql" + server=$(gen3 db creds "$database" | jq -r '.g3FarmServer') + username=$(gen3 db creds "$database" | jq -r '.db_username') + db_name="${namespace}_${database}_${date_str}" + if [[ -z "$server" || -z "$username" ]]; then + gen3_log_info "Error: Unable to extract server name or username." + return 1 + fi + gen3 psql $database -c "create database $db_name;" 2>&1 | grep -q "permission denied" + if [ $? -eq 0 ]; then + gen3_log_info "User does not have permission to create database. Granting required permission..." + gen3 psql $server -c "alter user $username createdb;" + gen3 psql $database -c "create database $db_name;" + if [ $? -eq 0 ]; then + gen3_log_info "Database $db_name created successfully!" + else + gen3_log_info "Error creating database $db_name after granting permission." + fi + else + gen3_log_info "Database $db_name created successfully!" + fi + gen3_log_info "Starting database restore for ${database} to database $db_name" + gen3 psql "$database" -d "$db_name" -f "${database}.sql" 1>&2 + gen3_log_info "cleanup temporary backup file ${database}.sql \n\n\n" + done + sleep 600 + restartPolicy: Never diff --git a/kube/services/jobs/usersync-job.yaml b/kube/services/jobs/usersync-job.yaml index 8f148a3b0..8a5471a20 100644 --- a/kube/services/jobs/usersync-job.yaml +++ b/kube/services/jobs/usersync-job.yaml @@ -260,7 +260,7 @@ spec: exit 1 fi #----------------- - echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/useryaml"; + echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/user.yaml"; n=0 until [ $n -ge 5 ]; do echo "Download attempt $n" diff --git a/kube/services/karpenter-reconciler/application.yaml b/kube/services/karpenter-reconciler/application.yaml new file mode 100644 index 000000000..fb0fab871 --- /dev/null +++ b/kube/services/karpenter-reconciler/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: karpenter-reconciler-application + namespace: argocd +spec: + destination: + namespace: kube-system + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/karpenter-reconciler + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/karpenter-reconciler/auth.yaml b/kube/services/karpenter-reconciler/auth.yaml new file mode 100644 index 000000000..c159028ab --- /dev/null +++ b/kube/services/karpenter-reconciler/auth.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: karpenter-reconciler + namespace: argo-events +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: karpenter-admin-binding-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: karpenter-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: workflow-viewer-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: argo-argo-workflows-view + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: viewer-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: system:aggregate-to-view + apiGroup: rbac.authorization.k8s.io diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml new file mode 100644 index 000000000..4f82e9d43 --- /dev/null +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml @@ -0,0 +1,72 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: karpenter-reconciler-cronjob + namespace: argo-events +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccount: karpenter-reconciler + volumes: + - name: karpenter-templates-volume + configMap: + name: karpenter-templates + containers: + - name: karpenter-reconciler + image: quay.io/cdis/awshelper + volumeMounts: + - name: karpenter-templates-volume + mountPath: /manifests + env: + - name: PROVISIONER_TEMPLATE + value: /manifests/provisioner.yaml + - name: AWSNODETEMPLATE_TEMPLATE + value: /manifests/nodetemplate.yaml + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + if [ -z "$PROVISIONER_TEMPLATE" ]; then + PROVISIONER_TEMPLATE="provisioner.yaml" + fi + + if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then + AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + fi + + ENVIRONMENT=$(kubectl -n default get configmap global -o jsonpath="{.data.environment}") + + RAW_WORKFLOWS=$(kubectl get workflows -n argo -o yaml) + + WORKFLOWS=$(echo "${RAW_WORKFLOWS}" | yq -r '.items[] | [.metadata.name, .metadata.labels.gen3username] | join(" ")') + + WORKFLOW_ARRAY=() + + while IFS= read -r line; do + WORKFLOW_ARRAY+=("$line") + done <<< "$WORKFLOWS" + + for workflow in "${WORKFLOW_ARRAY[@]}" + do + workflow_name=$(echo "$workflow" | awk '{print $1}') + workflow_user=$(echo "$workflow" | awk '{print $2}') + + if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then + echo "No awsnodetemplate found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + fi + + if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then + echo "No provisioner found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + + fi + done + restartPolicy: OnFailure diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index 0f76a392f..6ba8b3a0f 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -11,6 +11,7 @@ spec: karpenter.sh/discovery: VPC_NAME Environment: VPC_NAME Name: eks-VPC_NAME-karpenter + purpose: default metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled @@ -23,11 +24,11 @@ spec: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 @@ -37,7 +38,14 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" + + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" power_state: diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml index b41e6441c..925e7a9a0 100644 --- a/kube/services/karpenter/nodeTemplateGPU.yaml +++ b/kube/services/karpenter/nodeTemplateGPU.yaml @@ -11,6 +11,7 @@ spec: Environment: VPC_NAME Name: eks-VPC_NAME-gpu-karpenter karpenter.sh/discovery: VPC_NAME + purpose: gpu metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled @@ -23,11 +24,11 @@ spec: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 @@ -37,6 +38,12 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" + + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml index 579ac1aa3..1c8970ad6 100644 --- a/kube/services/karpenter/nodeTemplateJupyter.yaml +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -11,6 +11,7 @@ spec: Environment: VPC_NAME Name: eks-VPC_NAME-jupyter-karpenter karpenter.sh/discovery: VPC_NAME + purpose: jupyter metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled @@ -23,11 +24,11 @@ spec: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 @@ -37,6 +38,12 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" + + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml index 60481b4fc..6e47b22f9 100644 --- a/kube/services/karpenter/nodeTemplateWorkflow.yaml +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -11,6 +11,7 @@ spec: Environment: VPC_NAME Name: eks-VPC_NAME-workflow-karpenter karpenter.sh/discovery: VPC_NAME + purpose: workflow metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled @@ -23,11 +24,11 @@ spec: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 @@ -37,6 +38,12 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" + + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml index ffdf92bd9..d93e5098a 100644 --- a/kube/services/monitoring/values.yaml +++ b/kube/services/monitoring/values.yaml @@ -1540,6 +1540,15 @@ prometheus-node-exporter: - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ service: portName: http-metrics + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate prometheus: monitor: enabled: true diff --git a/kube/services/node-monitors/application.yaml b/kube/services/node-monitors/application.yaml new file mode 100644 index 000000000..0748f7c35 --- /dev/null +++ b/kube/services/node-monitors/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: node-monitor-application + namespace: argocd +spec: + destination: + namespace: default + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/node-monitors/ + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/node-monitors/argo-monitors/application.yaml b/kube/services/node-monitors/argo-monitors/application.yaml new file mode 100644 index 000000000..fca4ace86 --- /dev/null +++ b/kube/services/node-monitors/argo-monitors/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: node-monitor-argo-application + namespace: argocd +spec: + destination: + namespace: default + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/node-monitors/argo-monitors/ + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/node-monitors/argo-monitors/argo-node-age.yaml b/kube/services/node-monitors/argo-monitors/argo-node-age.yaml new file mode 100644 index 000000000..890495ee0 --- /dev/null +++ b/kube/services/node-monitors/argo-monitors/argo-node-age.yaml @@ -0,0 +1,58 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: argo-node-age + namespace: default +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: node-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + # This is the label we want to monitor, probably will never need to change + - name: NODE_LABEL + value: purpose=workflow + # This is 3 * 3600, or 3 hours + - name: THRESHOLD_TIME + value: "10800" + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + # Get all nodes with specific label and check their age + kubectl get nodes -l "$NODE_LABEL" -o json | jq -c '.items[] | {name: .metadata.name, creationTimestamp: .metadata.creationTimestamp}' | while read node_info; do + NODE_NAME=$(echo $node_info | jq -r '.name') + CREATION_TIMESTAMP=$(echo $node_info | jq -r '.creationTimestamp') + + # Convert creation timestamp to Unix Epoch time + CREATION_EPOCH=$(date -d "$CREATION_TIMESTAMP" +%s) + + # Get current Unix Epoch time + CURRENT_EPOCH=$(date +%s) + + # Calculate node age in seconds + NODE_AGE=$(($CURRENT_EPOCH - $CREATION_EPOCH)) + + # Check if node age is greater than threshold + if [ "$NODE_AGE" -gt "$THRESHOLD_TIME" ]; then + echo "Node $NODE_NAME has been around too long, sending an alert" + # Send alert to Slack + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODE_NAME}\` is older than 3 hours!\"}" $SLACK_WEBHOOK_URL + fi + done + restartPolicy: OnFailure \ No newline at end of file diff --git a/kube/services/node-monitors/auth.yaml b/kube/services/node-monitors/auth.yaml new file mode 100644 index 000000000..72560cddc --- /dev/null +++ b/kube/services/node-monitors/auth.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-monitor + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-monitor-binding +subjects: + - kind: ServiceAccount + name: node-monitor + namespace: default +roleRef: + kind: ClusterRole + name: system:node + apiGroup: rbac.authorization.k8s.io diff --git a/kube/services/node-monitors/node-not-ready.yaml b/kube/services/node-monitors/node-not-ready.yaml new file mode 100644 index 000000000..500832fc3 --- /dev/null +++ b/kube/services/node-monitors/node-not-ready.yaml @@ -0,0 +1,48 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: node-not-ready-cron + namespace: default +spec: + schedule: "*/30 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: node-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: global + key: environment + + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/sh + + # Get nodes that show "NodeStatusNeverUpdated" + NODES=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.conditions[] | select(.type == "Ready" and .status == "Unknown")) | .metadata.name') + + if [ -n "$NODES" ]; then + echo "Nodes reporting 'NodeStatusNeverUpdated', sending an alert:" + echo "$NODES" + # Send alert to Slack + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODES}\` is stuck in "NotReady" in \`${ENVIRONMENT}\`! \"}" $SLACK_WEBHOOK_URL + else + echo "No nodes reporting 'NodeStatusNeverUpdated'" + fi + restartPolicy: OnFailure diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml index bf128920e..62265503e 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml @@ -72,7 +72,7 @@ spec: volumeMounts: - name: ohdsi-atlas-config-local readOnly: true - mountPath: /usr/share/nginx/html/atlas/js/config-local.js + mountPath: /etc/atlas/config-local.js subPath: config-local.js imagePullPolicy: Always resources: @@ -80,4 +80,4 @@ spec: cpu: 100m memory: 100Mi limits: - memory: 500Mi + memory: 500Mi diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml index 5cd46edd9..8eb01ec08 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml @@ -55,6 +55,9 @@ stringData: security_oauth_callback_api: https://atlas.$hostname/WebAPI/user/oauth/callback security_oauth_callback_urlResolver: query + security_ohdsi_custom_authorization_mode: teamproject + security_ohdsi_custom_authorization_url: $ARBORIST_URL/auth/mapping + logging_level_root: info logging_level_org_ohdsi: info logging_level_org_apache_shiro: info diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 65d6ed38c..a729ae7c4 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -83,7 +83,7 @@ spec: limits: memory: 4Gi - name: ohdsi-webapi-reverse-proxy - image: nginx:1.23 + image: 707767160287.dkr.ecr.us-east-1.amazonaws.com/gen3/nginx:1.23 ports: - containerPort: 80 volumeMounts: @@ -97,4 +97,4 @@ spec: cpu: 100m memory: 100Mi limits: - memory: 500Mi \ No newline at end of file + memory: 500Mi diff --git a/kube/services/ohif-viewer/app-config.js b/kube/services/ohif-viewer/app-config.js new file mode 100644 index 000000000..6768726f4 --- /dev/null +++ b/kube/services/ohif-viewer/app-config.js @@ -0,0 +1,209 @@ +window.config = { + routerBasename: '/ohif-viewer/', + // whiteLabeling: {}, + extensions: [], + modes: [], + customizationService: { + // Shows a custom route -access via http://localhost:3000/custom + // helloPage: '@ohif/extension-default.customizationModule.helloPage', + }, + showStudyList: true, + // some windows systems have issues with more than 3 web workers + maxNumberOfWebWorkers: 3, + // below flag is for performance reasons, but it might not work for all servers + omitQuotationForMultipartRequest: true, + showWarningMessageForCrossOrigin: true, + showCPUFallbackMessage: true, + showLoadingIndicator: true, + strictZSpacingForVolumeViewport: true, + maxNumRequests: { + interaction: 100, + thumbnail: 75, + // Prefetch number is dependent on the http protocol. For http 2 or + // above, the number of requests can be go a lot higher. + prefetch: 25, + }, + // filterQueryParam: false, + defaultDataSourceName: 'dicomweb', + /* Dynamic config allows user to pass "configUrl" query string this allows to load config without recompiling application. The regex will ensure valid configuration source */ + // dangerouslyUseDynamicConfig: { + // enabled: true, + // // regex will ensure valid configuration source and default is /.*/ which matches any character. To use this, setup your own regex to choose a specific source of configuration only. + // // Example 1, to allow numbers and letters in an absolute or sub-path only. + // // regex: /(0-9A-Za-z.]+)(\/[0-9A-Za-z.]+)*/ + // // Example 2, to restricts to either hosptial.com or othersite.com. + // // regex: /(https:\/\/hospital.com(\/[0-9A-Za-z.]+)*)|(https:\/\/othersite.com(\/[0-9A-Za-z.]+)*)/ + // regex: /.*/, + // }, + dataSources: [ + { + friendlyName: 'dcmjs DICOMWeb Server', + namespace: '@ohif/extension-default.dataSourcesModule.dicomweb', + sourceName: 'dicomweb', + configuration: { + name: 'dicomweb', + wadoUriRoot: '$DICOM_SERVER_URL/wado', + qidoRoot: '$DICOM_SERVER_URL/dicom-web', + wadoRoot: '$DICOM_SERVER_URL/dicom-web', + + qidoSupportsIncludeField: false, + supportsReject: false, + imageRendering: 'wadors', + thumbnailRendering: 'wadors', + enableStudyLazyLoad: true, + supportsFuzzyMatching: false, + supportsWildcard: true, + staticWado: true, + singlepart: 'bulkdata,video', + // whether the data source should use retrieveBulkData to grab metadata, + // and in case of relative path, what would it be relative to, options + // are in the series level or study level (some servers like series some study) + bulkDataURI: { + enabled: true, + relativeResolution: 'studies', + }, + }, + }, + { + friendlyName: 'dicomweb delegating proxy', + namespace: '@ohif/extension-default.dataSourcesModule.dicomwebproxy', + sourceName: 'dicomwebproxy', + configuration: { + name: 'dicomwebproxy', + }, + }, + { + friendlyName: 'dicom json', + namespace: '@ohif/extension-default.dataSourcesModule.dicomjson', + sourceName: 'dicomjson', + configuration: { + name: 'json', + }, + }, + { + friendlyName: 'dicom local', + namespace: '@ohif/extension-default.dataSourcesModule.dicomlocal', + sourceName: 'dicomlocal', + configuration: {}, + }, + ], + httpErrorHandler: error => { + // This is 429 when rejected from the public idc sandbox too often. + console.warn(error.status); + + // Could use services manager here to bring up a dialog/modal if needed. + console.warn('test, navigate to https://ohif.org/'); + }, + // whiteLabeling: { + // /* Optional: Should return a React component to be rendered in the "Logo" section of the application's Top Navigation bar */ + // createLogoComponentFn: function (React) { + // return React.createElement( + // 'a', + // { + // target: '_self', + // rel: 'noopener noreferrer', + // className: 'text-purple-600 line-through', + // href: '/', + // }, + // React.createElement('img', + // { + // src: './assets/customLogo.svg', + // className: 'w-8 h-8', + // } + // )) + // }, + // }, + hotkeys: [ + { + commandName: 'incrementActiveViewport', + label: 'Next Viewport', + keys: ['right'], + }, + { + commandName: 'decrementActiveViewport', + label: 'Previous Viewport', + keys: ['left'], + }, + { commandName: 'rotateViewportCW', label: 'Rotate Right', keys: ['r'] }, + { commandName: 'rotateViewportCCW', label: 'Rotate Left', keys: ['l'] }, + { commandName: 'invertViewport', label: 'Invert', keys: ['i'] }, + { + commandName: 'flipViewportHorizontal', + label: 'Flip Horizontally', + keys: ['h'], + }, + { + commandName: 'flipViewportVertical', + label: 'Flip Vertically', + keys: ['v'], + }, + { commandName: 'scaleUpViewport', label: 'Zoom In', keys: ['+'] }, + { commandName: 'scaleDownViewport', label: 'Zoom Out', keys: ['-'] }, + { commandName: 'fitViewportToWindow', label: 'Zoom to Fit', keys: ['='] }, + { commandName: 'resetViewport', label: 'Reset', keys: ['space'] }, + { commandName: 'nextImage', label: 'Next Image', keys: ['down'] }, + { commandName: 'previousImage', label: 'Previous Image', keys: ['up'] }, + // { + // commandName: 'previousViewportDisplaySet', + // label: 'Previous Series', + // keys: ['pagedown'], + // }, + // { + // commandName: 'nextViewportDisplaySet', + // label: 'Next Series', + // keys: ['pageup'], + // }, + { + commandName: 'setToolActive', + commandOptions: { toolName: 'Zoom' }, + label: 'Zoom', + keys: ['z'], + }, + // ~ Window level presets + { + commandName: 'windowLevelPreset1', + label: 'W/L Preset 1', + keys: ['1'], + }, + { + commandName: 'windowLevelPreset2', + label: 'W/L Preset 2', + keys: ['2'], + }, + { + commandName: 'windowLevelPreset3', + label: 'W/L Preset 3', + keys: ['3'], + }, + { + commandName: 'windowLevelPreset4', + label: 'W/L Preset 4', + keys: ['4'], + }, + { + commandName: 'windowLevelPreset5', + label: 'W/L Preset 5', + keys: ['5'], + }, + { + commandName: 'windowLevelPreset6', + label: 'W/L Preset 6', + keys: ['6'], + }, + { + commandName: 'windowLevelPreset7', + label: 'W/L Preset 7', + keys: ['7'], + }, + { + commandName: 'windowLevelPreset8', + label: 'W/L Preset 8', + keys: ['8'], + }, + { + commandName: 'windowLevelPreset9', + label: 'W/L Preset 9', + keys: ['9'], + }, + ], +}; diff --git a/kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf b/kube/services/revproxy/gen3.nginx.conf/argo-server.conf similarity index 86% rename from kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf rename to kube/services/revproxy/gen3.nginx.conf/argo-server.conf index cb8def3aa..1cdd4608c 100644 --- a/kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf +++ b/kube/services/revproxy/gen3.nginx.conf/argo-server.conf @@ -7,7 +7,7 @@ auth_request /gen3-authz; set $proxy_service "argo"; - set $upstream http://argo-argo-workflows-server.argo.svc.cluster.local:2746; + set $upstream SERVICE_URL; rewrite ^/argo/(.*) /$1 break; diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf new file mode 100644 index 000000000..42e9a3758 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf @@ -0,0 +1,12 @@ + location /ai { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $proxy_service "gen3-discovery-ai-service"; + set $upstream http://gen3-discovery-ai-service$des_domain; + rewrite ^/ai/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/ai/; + client_max_body_size 0; + } diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf index ac2cb75f6..37e7623de 100644 --- a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf @@ -2,6 +2,10 @@ if ($csrf_check !~ ^ok-\S.+$) { return 403 "failed csrf check"; } + + # added to avoid click-jacking attacks + add_header X-Frame-Options "SAMEORIGIN"; + set $proxy_service "frontend-framework"; set $upstream http://frontend-framework-service.$namespace.svc.cluster.local; proxy_pass $upstream; diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf index 58f0851d6..75d69c185 100644 --- a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf @@ -21,5 +21,8 @@ rewrite ^/(.*)$ /dashboard/Public/maintenance-page/index.html redirect; } + # added to avoid click-jacking attacks + add_header X-Frame-Options "SAMEORIGIN"; + proxy_pass $upstream; } diff --git a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf index db2de5886..e6d66ec12 100644 --- a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf @@ -1,4 +1,8 @@ location /guppy/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check, make sure data-portal version >= 2023.12 or >= 5.19.0"; + } + proxy_connect_timeout 600s; proxy_send_timeout 600s; proxy_read_timeout 600s; diff --git a/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf b/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf index 9a20bc832..22926bcf0 100644 --- a/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf @@ -3,11 +3,12 @@ location /ohif-viewer/ { # return 403 "failed csrf check"; # } - set $authz_resource "/services/ohif-viewer"; - set $authz_method "read"; - set $authz_service "ohif-viewer"; + # see if this can be fixed in the future for anonymous access + # set $authz_resource "/services/ohif-viewer"; + # set $authz_method "read"; + # set $authz_service "ohif-viewer"; - auth_request /gen3-authz; + # auth_request /gen3-authz; set $proxy_service "ohif-viewer"; set $upstream http://ohif-viewer-service.$namespace.svc.cluster.local; diff --git a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf index 2eb77b179..ed736189c 100644 --- a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf @@ -7,11 +7,6 @@ location /orthanc/ { set $authz_method "create"; set $authz_service "orthanc"; - if ($request_uri ~ "^/orthanc/dicom-web/studies/") { - set $authz_method "read"; - set $authz_resource "/services/orthanc/studies"; - } - auth_request /gen3-authz; proxy_set_header Authorization "Basic cHVibGljOmhlbGxv"; @@ -24,3 +19,23 @@ location /orthanc/ { # no limit to payload size so we can upload large DICOM files client_max_body_size 0; } + +location /orthanc/dicom-web/studies/ { + set $authz_method "read"; + set $authz_resource "/services/orthanc/studies"; + set $authz_service "orthanc"; + + auth_request /gen3-authz; + if ($request_method = POST) { + return 403; + } + proxy_set_header Authorization "Basic cHVibGljOmhlbGxv"; + + set $proxy_service "orthanc"; + set $upstream http://orthanc-service.$namespace.svc.cluster.local; + rewrite ^/orthanc/(.*) /$1 break; + proxy_pass $upstream; + + # no limit to payload size so we can upload large DICOM files + client_max_body_size 0; +} diff --git a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf index dbb24e4b2..f3686d1a6 100644 --- a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf @@ -6,6 +6,10 @@ if ($csrf_check !~ ^ok-\S.+$) { return 403 "failed csrf check"; } + + # added to avoid click-jacking attacks + add_header X-Frame-Options "SAMEORIGIN"; + set $proxy_service "frontend-framework"; # frontend framework service expects the /ff/ prefix, so no path rewrite set $upstream http://frontend-framework-service.$namespace.svc.cluster.local; diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf index 2e3a3b151..d0e14f49b 100644 --- a/kube/services/revproxy/nginx.conf +++ b/kube/services/revproxy/nginx.conf @@ -236,7 +236,7 @@ server { # This overrides the individual services # set $allow_origin "*"; - if ($http_origin) { + if ($http_origin = "https://$host") { set $allow_origin "$http_origin"; } diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 9d5caab1b..9f10ce90b 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -21,6 +21,7 @@ spec: app: revproxy # allow access from workspaces userhelper: "yes" + internet: "yes" GEN3_DATE_LABEL spec: affinity: diff --git a/packer/configs/web_wildcard_whitelist b/packer/configs/web_wildcard_whitelist index c58eeefe8..621dec3d5 100644 --- a/packer/configs/web_wildcard_whitelist +++ b/packer/configs/web_wildcard_whitelist @@ -44,4 +44,5 @@ .yahooapis.com .cloudfront.net .docker.io +.blob.core.windows.net .googleapis.com