diff --git a/.ansible-lint b/.ansible-lint index 0c9cf06f..b922c78a 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -1,17 +1,53 @@ -var_naming_pattern: "^[a-zA-Z0-9_]*$" +--- +# .ansible-lint -parseable: true +profile: moderate +verbosity: 1 +strict: true +# Enforce variable names to follow pattern below, in addition to Ansible own +# requirements, like avoiding python identifiers. To disable add `var-naming` +# to skip_list. +var_naming_pattern: ^[a-zA-Z0-9_]*$ + +use_default_rules: true + +# Ansible-lint is able to recognize and load skip rules stored inside +# `.ansible-lint-ignore` (or `.config/ansible-lint-ignore.txt`) files. +# To skip a rule just enter filename and tag, like "playbook.yml package-latest" +# on a new line. +skip_list: + - role-name # DAC roles names contain dashes, can be ignored + - yaml[line-length] # it's easier to understand/debug the underlying command when it's not broken up + - name[template] # task name uses Jina template, this can be ignored + - var-naming + +# Ansible-lint does not automatically load rules that have the 'opt-in' tag. +# You must enable opt-in rules by listing each rule 'id' below. +enable_list: + - args + - empty-string-compare + - no-log-password + - no-same-owner + - yaml + +# exclude_paths included in this file are parsed relative to this file's location +# and not relative to the CWD of execution. CLI arguments passed to the --exclude +# option are parsed relative to the CWD of execution. exclude_paths: - .git/ - .gitignore - .cache/ - roles/istio + - roles/vdm/tasks/deploy.yaml # TODO schema[tasks] error for a docker 'Deploy BLT - Deploy SAS Viya' task + - .github/workflows # non ansible files -skip_list: - - unnamed-task - - role-name - - var-naming +# Offline mode disables installation of requirements.yml and schema refreshing +offline: false + +# Define required Ansible's variables to satisfy syntax check +extra_vars: + deployment_type: vsphere warn_list: - experimental diff --git a/.github/workflows/linter-analysis.yaml b/.github/workflows/linter-analysis.yaml index 742e3f4b..0f78ffeb 100644 --- a/.github/workflows/linter-analysis.yaml +++ b/.github/workflows/linter-analysis.yaml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run Hadolint Action uses: jbergstroem/hadolint-gh-action@v1.11.0 @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 # .shellcheckrc is read from the current dir - name: Copy Config to Parent Level Directory @@ -42,7 +42,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 # The latest ansible/ansible-lint-action removed the # ability to specify configs from other dirs diff --git a/.gitignore b/.gitignore index a8470946..5ace483b 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ .galaxy_install_info ## ignore ansible-vars.yml +.pre-commit-config.yaml ansible-vars.yml ansible-vars.yaml diff --git a/linting-configs/.ansible-lint b/linting-configs/.ansible-lint index b922c78a..a8fddcbd 100644 --- a/linting-configs/.ansible-lint +++ b/linting-configs/.ansible-lint @@ -41,6 +41,7 @@ exclude_paths: - roles/istio - roles/vdm/tasks/deploy.yaml # TODO schema[tasks] error for a docker 'Deploy BLT - Deploy SAS Viya' task - .github/workflows # non ansible files + - viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml # dark site ansible-vars.yaml file template # Offline mode disables installation of requirements.yml and schema refreshing offline: false diff --git a/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh new file mode 100644 index 00000000..9493e01e --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/00_vars.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +## set variables +AWS_ACCT_ID= +AWS_REGION= + +K8S_minor_version=25 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! +DEPLOYMENT_VERSION=main # main will pull latest release of viya4-deployment. But this can be set to a specific version if needed, example: 5.2.0 + +DOCKER_SUDO= # put sudo here, if you require sudo docker commands... else leave blank diff --git a/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh new file mode 100644 index 00000000..d4458ad2 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/01_run_all.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +. auto_scaler.sh +. cert_manager.sh +. ingress_nginx.sh +. metrics_server.sh +. nfs_subdir_external_provisioner.sh +. openldap.sh +. ebs_driver.sh diff --git a/viya4-deployment-darksite/baseline-to-ecr/README.md b/viya4-deployment-darksite/baseline-to-ecr/README.md new file mode 100644 index 00000000..c3dc4f20 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/README.md @@ -0,0 +1,13 @@ +These scripts assume your aws cli and your kubeconfig is already configured! + +Notes: +- requires helm, yq, and aws cli +- these scripts will install the helm charts and corresponding container images to ECR for each baseline item. +- it will automatically set the chart version based on the version of DAC you specify. + +## Step 1: Set your variables +- Set your variables in 00_vars.sh + +## Step 2: Run script(s) +- Option 1: run 01_run_all.sh (runs all scripts) +- Option 2: run scripts individually \ No newline at end of file diff --git a/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh new file mode 100644 index 00000000..2c33d497 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/auto_scaler.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +# account for v6.3.0+ changes - autoscaler now supports k8s 1.25 +DV=$(echo $DEPLOYMENT_VERSION | sed 's/\.//g') +if [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -ge 25 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1Support.api.chartVersion') +elif [ $DEPLOYMENT_VERSION == "main" ] && [ $K8S_minor_version -le 24 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1beta1Support.api.chartVersion') +elif [ $DV -ge 630 ] && [ $K8S_minor_version -ge 25 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1Support.api.chartVersion') +elif [ $DV -ge 630 ] && [ $K8S_minor_version -le 24 ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.autoscalerVersions.PDBv1beta1Support.api.chartVersion') +elif [ $DV -le 620 ] ; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.CLUSTER_AUTOSCALER_CHART_VERSION') +fi + +## get chart version from viya4-deployment repo +echo "**** cluster-autoscaler ****" +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add autoscaling https://kubernetes.github.io/autoscaler +helm repo update +IMG_REPO=$(helm show values autoscaling/cluster-autoscaler --version=$CHART_VERSION | yq '.image.repository') +TAG=$(helm show values autoscaling/cluster-autoscaler --version=$CHART_VERSION | yq '.image.tag') +echo "Image repo: $IMG_REPO" && echo "Image tag: $TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $IMG_REPO:$TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name cluster-autoscaler + +# push the helm chart to the ECR repo +helm pull autoscaling/cluster-autoscaler --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push cluster-autoscaler-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm cluster-autoscaler-$CHART_VERSION.tgz + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $IMG_REPO:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler:$TAG + + +# # ## auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler:$TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh new file mode 100644 index 00000000..cead0bce --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/cert_manager.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +## get chart version from viya4-deployment repo +echo "**** cert-manager ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.CERT_MANAGER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add jetstack https://charts.jetstack.io/ +helm repo update +IMG_CONTROLLER=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.image.repository') +IMG_WEBHOOK=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.webhook.image.repository') +IMG_CAINJECTOR=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.cainjector.image.repository') +IMG_STARTUP=$(helm show values jetstack/cert-manager --version=$CHART_VERSION | yq '.startupapicheck.image.repository') +echo "controller repo: $IMG_CONTROLLER" && echo "webhook repo: $IMG_WEBHOOK" && echo "cainject repo: $IMG_CAINJECTOR" && echo "startupapicheck repo: $IMG_STARTUP" +echo "*********************" + +## pull the images +$DOCKER_SUDO docker pull $IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker pull $IMG_STARTUP:v$CHART_VERSION + +# create ECR repos +aws ecr create-repository --no-cli-pager --repository-name cert-manager # this repo is used to store the helm chart +aws ecr create-repository --no-cli-pager --repository-name $IMG_CONTROLLER +aws ecr create-repository --no-cli-pager --repository-name $IMG_WEBHOOK +aws ecr create-repository --no-cli-pager --repository-name $IMG_CAINJECTOR +aws ecr create-repository --no-cli-pager --repository-name $IMG_STARTUP + +# push the helm charts to the ECR repo +helm pull jetstack/cert-manager --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push cert-manager-v$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm cert-manager-v$CHART_VERSION.tgz + +# ## update local images tags appropriately +$DOCKER_SUDO docker tag $IMG_CONTROLLER:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_WEBHOOK:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_CAINJECTOR:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker tag $IMG_STARTUP:v$CHART_VERSION $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_STARTUP:v$CHART_VERSION + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local images to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CONTROLLER:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_WEBHOOK:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_CAINJECTOR:v$CHART_VERSION +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_STARTUP:v$CHART_VERSION diff --git a/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh b/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh new file mode 100644 index 00000000..92af7851 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/ebs_driver.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +## get chart version from viya4-deployment repo +echo -e "\n**** aws-ebs-csi-driver ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.EBS_CSI_DRIVER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +## Get helm chart info +helm repo add aws-ebs-csi-driver https://kubernetes-sigs.github.io/aws-ebs-csi-driver +helm repo update +HELM_CHART=$(helm show all aws-ebs-csi-driver/aws-ebs-csi-driver --version=$CHART_VERSION) +# echo "$HELM_CHART" +IMG_REPO=$(echo "$HELM_CHART" | yq -N '.image.repository | select(. != null)') +IMG_TAG=$(echo "$HELM_CHART" | yq -N '.appVersion | select(. != null)') +PROVISIONER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.provisioner.image.repository | select(. != null)') +PROVISIONER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.provisioner.image.tag | select(. != null)') +ATTACHER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.attacher.image.repository | select(. != null)') +ATTACHER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.attacher.image.tag | select(. != null)') +SNAPSHOTTER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.snapshotter.image.repository | select(. != null)') +SNAPSHOTTER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.snapshotter.image.tag | select(. != null)') +LIVENESS_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.livenessProbe.image.repository | select(. != null)') +LIVENESS_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.livenessProbe.image.tag | select(. != null)') +RESIZER_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.resizer.image.repository | select(. != null)') +RESIZER_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.resizer.image.tag | select(. != null)') +NODEREG_REPO=$(echo "$HELM_CHART" | yq -N '.sidecars.nodeDriverRegistrar.image.repository | select(. != null)') +NODEREG_TAG=$(echo "$HELM_CHART" | yq -N '.sidecars.nodeDriverRegistrar.image.tag | select(. != null)') +echo "Driver image repo: $IMG_REPO" && echo "Image tag: v$IMG_TAG" +echo "Provisioning image repo: $PROVISIONER_REPO" && echo "Image tag: $PROVISIONER_TAG" +echo "Attacher image repo: $ATTACHER_REPO" && echo "Image tag: $ATTACHER_TAG" +echo "Snapshotter image repo: $SNAPSHOTTER_REPO" && echo "Image tag: $SNAPSHOTTER_TAG" +echo "Liveness image repo: $LIVENESS_REPO" && echo "Image tag: $LIVENESS_TAG" +echo "Resizer image repo: $RESIZER_REPO" && echo "Image tag: $RESIZER_TAG" +echo "NodeDriverRegister image repo: $NODEREG_REP" && echo "Image tag: $NODEREG_TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker pull $PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker pull $ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker pull $SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker pull $LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker pull $RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker pull $NODEREG_REPO:$NODEREG_TAG + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name aws-ebs-csi-driver # this is to house to helm chart +aws ecr create-repository --no-cli-pager --repository-name $IMG_REPO +aws ecr create-repository --no-cli-pager --repository-name $PROVISIONER_REPO +aws ecr create-repository --no-cli-pager --repository-name $ATTACHER_REPO +aws ecr create-repository --no-cli-pager --repository-name $SNAPSHOTTER_REPO +aws ecr create-repository --no-cli-pager --repository-name $LIVENESS_REPO +aws ecr create-repository --no-cli-pager --repository-name $RESIZER_REPO +aws ecr create-repository --no-cli-pager --repository-name $NODEREG_REPO + +# push the helm chart to the ECR repo +helm pull aws-ebs-csi-driver/aws-ebs-csi-driver --version=$CHART_VERSION +aws ecr get-login-password \ + --no-cli-pager \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push aws-ebs-csi-driver-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm aws-ebs-csi-driver-$CHART_VERSION.tgz + +# update local image tag appropriately +$DOCKER_SUDO docker tag $IMG_REPO:v$IMG_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker tag $PROVISIONER_REPO:$PROVISIONER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker tag $ATTACHER_REPO:$ATTACHER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker tag $SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker tag $LIVENESS_REPO:$LIVENESS_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker tag $RESIZER_REPO:$RESIZER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker tag $NODEREG_REPO:$NODEREG_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$NODEREG_REPO:$NODEREG_TAG + +# auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMG_REPO:v$IMG_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$PROVISIONER_REPO:$PROVISIONER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ATTACHER_REPO:$ATTACHER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$SNAPSHOTTER_REPO:$SNAPSHOTTER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$LIVENESS_REPO:$LIVENESS_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$RESIZER_REPO:$RESIZER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$NODEREG_REPO:$NODEREG_TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh new file mode 100644 index 00000000..3773861e --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/ingress_nginx.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +# determine chart version to use +V_CEILING=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionCeiling.value') +V_FLOOR=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionFloor.value') + +if [ $K8S_minor_version -ge $V_FLOOR ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionFloor.api.chartVersion') + echo "Helm chart version: $CHART_VERSION" +elif [ $K8S_minor_version -le $V_CEILING ]; then + CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.ingressVersions.k8sMinorVersionCeiling.api.chartVersion') + echo "Helm chart version: $CHART_VERSION" +else + echo "Error with your minor version! Exiting..." + exit 1 +fi + +## Get helm chart info +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +CONTROLLER_REGISTRY=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.registry') +CONTROLLER_IMAGE=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.image') +CONTROLLER_TAG=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.image.tag') +WEBHOOKS_REGISTRY=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.registry') +WEBHOOKS_TAG=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.tag') +WEBHOOKS_IMAGE=$(helm show values ingress-nginx/ingress-nginx --version=$CHART_VERSION | yq '.controller.admissionWebhooks.patch.image.image') +echo "controller repo: $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG" && echo "webhook repo: $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG" +echo "*********************" + + +## pull the image +$DOCKER_SUDO docker pull $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker pull $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name ingress-nginx # this repo is used to store the helm chart +aws ecr create-repository --no-cli-pager --repository-name $CONTROLLER_IMAGE +aws ecr create-repository --no-cli-pager --repository-name $WEBHOOKS_IMAGE + +# push the helm charts to the ECR repo +helm pull ingress-nginx/ingress-nginx --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push ingress-nginx-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm ingress-nginx-$CHART_VERSION.tgz + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $CONTROLLER_REGISTRY/$CONTROLLER_IMAGE:$CONTROLLER_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker tag $WEBHOOKS_REGISTRY/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$CONTROLLER_IMAGE:$CONTROLLER_TAG +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$WEBHOOKS_IMAGE:$WEBHOOKS_TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh new file mode 100644 index 00000000..b97f1f5c --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/metrics_server.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +echo "**** metrics-server ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.METRICS_SERVER_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo update +REGISTRY=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.registry') +TAG=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.tag') +IMAGE=$(helm show values bitnami/metrics-server --version=$CHART_VERSION | yq '.image.repository') +echo "Image repo: $REGISTRY/$IMAGE:$TAG" +echo "*********************" + +## pull the image +$DOCKER_SUDO docker pull $REGISTRY/$IMAGE:$TAG + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name metrics-server + +# push the helm chart to the ECR repo +helm pull bitnami/metrics-server --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push metrics-server-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm metrics-server-$CHART_VERSION.tgz + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $REGISTRY/$IMAGE:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server:$TAG + + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server:$TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh new file mode 100644 index 00000000..c584a645 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/nfs_subdir_external_provisioner.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +echo "**** nfs-subdir-external-provisioner ****" +CHART_VERSION=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/baseline/defaults/main.yml | yq '.NFS_CLIENT_CHART_VERSION') +echo "Helm chart version: $CHART_VERSION" +helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ +helm repo update +REPOSITORY=$(helm show values nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION | yq '.image.repository') +TAG=$(helm show values nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION | yq '.image.tag') +echo "Image repo: $REPOSITORY:$TAG" +echo "*****************************************" + +## pull the image +$DOCKER_SUDO docker pull $REPOSITORY:$TAG + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name nfs-subdir-external-provisioner + +# push the helm chart to the ECR repo +helm pull nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --version=$CHART_VERSION +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +helm push nfs-subdir-external-provisioner-$CHART_VERSION.tgz oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ +rm nfs-subdir-external-provisioner-$CHART_VERSION.tgz + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $REPOSITORY:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG + +# # ## auth local $DOCKER_SUDO docker to ecr +aws ecr get-login-password --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner:$TAG diff --git a/viya4-deployment-darksite/baseline-to-ecr/openldap.sh b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh new file mode 100644 index 00000000..d7341149 --- /dev/null +++ b/viya4-deployment-darksite/baseline-to-ecr/openldap.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +echo "**** openldap ****" +IMAGE=$(curl -s https://raw.githubusercontent.com/sassoftware/viya4-deployment/$DEPLOYMENT_VERSION/roles/vdm/templates/resources/openldap.yaml | yq -N '.spec.template.spec.containers[0].image | select(. != null)') +echo "Image: $IMAGE" +echo "******************" + +## pull the image +$DOCKER_SUDO docker pull $IMAGE + + +# create ECR repo +aws ecr create-repository --no-cli-pager --repository-name osixia/openldap + + +# ## update local image tag appropriately +$DOCKER_SUDO docker tag $IMAGE $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMAGE + + +# # ## auth local docker to ecr +aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +# # ## puch local image to ecr +$DOCKER_SUDO docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$IMAGE diff --git a/viya4-deployment-darksite/darksite-openldap-mod/README.md b/viya4-deployment-darksite/darksite-openldap-mod/README.md new file mode 100644 index 00000000..8912dab2 --- /dev/null +++ b/viya4-deployment-darksite/darksite-openldap-mod/README.md @@ -0,0 +1,6 @@ + +## Mod roles/vdm/templates/resources/openldap.yaml + +- Only required if using an internal OpenLDAP server. By default, the cluster will reach out to docker hub to pull this image, and in a darksite this isn't possible. +- Run the darksite-openldap-mod.sh script. +- Build the modded container using the script or manually if you'd like. \ No newline at end of file diff --git a/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh b/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh new file mode 100644 index 00000000..cd87defe --- /dev/null +++ b/viya4-deployment-darksite/darksite-openldap-mod/darksite-openldap-mod.sh @@ -0,0 +1,189 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# helper script to easily mod viya4-deployment when using openldap in a darksite + + +## check that viya4-deployment/ exists in this folder +if [ ! -d "viya4-deployment/" ] +then + echo -e "\nError: Directory viya4-deployment/ does not exists!\n" + read -p "Would you like to locally clone the viya4-deployment github repo to fix (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi + ## Get desired DAC version + read -p "What release version of DAC do you want to use? " -r IAC_VERSION + git clone --branch $IAC_VERSION https://github.com/sassoftware/viya4-deployment.git +fi + +echo +read -p "What is your aws account id? " -r AWS_ACCT_ID +read -p "What is your aws region? " -r AWS_REGION + +echo -e "\n+++Modding viya4-deployment/roles/vdm/templates/resources/openldap.yaml ..." + +tee viya4-deployment/roles/vdm/templates/resources/openldap.yaml > /dev/null << EOF +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openldap +spec: + replicas: 1 + selector: + matchLabels: + app: openldap + template: + metadata: + labels: + app: openldap + spec: + hostname: ldap-svc + imagePullSecrets: [] + containers: + - image: "${AWS_ACCT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/osixia/openldap:1.3.0" + imagePullPolicy: IfNotPresent + name: openldap + ports: + - containerPort: 389 + args: + - --copy-service + env: + - name: LDAP_TLS + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_TLS + - name: LDAP_ADMIN_PASSWORD + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_ADMIN_PASSWORD + - name: LDAP_DOMAIN + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_DOMAIN + - name: LDAP_REMOVE_CONFIG_AFTER_SETUP + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: LDAP_REMOVE_CONFIG_AFTER_SETUP + - name: DISABLE_CHOWN + valueFrom: + configMapKeyRef: + name: openldap-bootstrap-config + key: DISABLE_CHOWN + volumeMounts: + - name: bootstrap-custom + mountPath: "/container/service/slapd/assets/config/bootstrap/ldif/custom" + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: workload.sas.com/class + operator: In + values: + - stateless + matchFields: [] + weight: 100 + - preference: + matchExpressions: + - key: workload.sas.com/class + operator: NotIn + values: + - compute + - cas + - stateful + - connect + matchFields: [] + weight: 50 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.azure.com/mode + operator: NotIn + values: + - system + matchFields: [] + tolerations: + - effect: NoSchedule + key: workload.sas.com/class + operator: Equal + value: stateful + - effect: NoSchedule + key: workload.sas.com/class + operator: Equal + value: stateless + volumes: + - name: bootstrap-custom + emptyDir: {} + - name: ldap-bootstrap-config + configMap: + name: openldap-bootstrap-config + items: + - key: LDAP_USERS_CONF + path: 07-testUsers.ldif + mode: 0664 + - key: LDAP_GROUPS_CONF + path: 06-testGroups.ldif + mode: 0664 + initContainers: + - name: ldap-init + image: "${AWS_ACCT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/osixia/openldap:1.3.0" + command: + - bash + - -c + - "cp -avRL /tmp/ldif/custom/* /container/service/slapd/assets/config/bootstrap/ldif/custom/" + volumeMounts: + - name: bootstrap-custom + mountPath: "/container/service/slapd/assets/config/bootstrap/ldif/custom" + - name: ldap-bootstrap-config + mountPath: "/tmp/ldif/custom" +--- +apiVersion: v1 +kind: Service +metadata: + name: ldap-svc +spec: + ports: + - port: 389 + protocol: TCP + targetPort: 389 + name: ldap + selector: + app: openldap +EOF + +echo -e "\n+++Mod complete!" + +# build modded viya4-deployment docker container? +echo +read -p "Would you like to build the modded viya4-deployment docker container (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + read -p " What tag would you like to use for the modded container? " -r TAG + docker build -t viya4-deployment:$TAG viya4-deployment/ + echo -e "\n+++Modded docker container is: viya4-deployment:${TAG}" +fi + +# push modded docker container to ECR +echo +read -p "Would you like to push the viya4-deployment:${TAG} docker container to ECR (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 +fi + +aws ecr create-repository --no-cli-pager --repository-name viya4-deployment + +docker tag viya4-deployment:$TAG $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/viya4-deployment:$TAG + +aws ecr get-login-password --no-cli-pager --region $AWS_REGION | $DOCKER_SUDO docker login --username AWS --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +docker push $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/viya4-deployment:$TAG diff --git a/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh new file mode 100755 index 00000000..900c229e --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/01_iac_deploy.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# what is the tag +read -p "What is the tag for your viya4-iac-aws container? " -r TAG +# what is the job +read -p "What type of IaC job: plan, apply, or destroy? " -r REPLY + +# preview job +if [ $REPLY == "plan" ]; then + echo -e "\n+++Starting plan job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + plan -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate +fi + +# apply job +if [ $REPLY == "apply" ]; then + echo -e "\n+++Starting apply job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + apply -auto-approve -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate + + # Update the kubeconfig using aws cli and place here on deploy machine: ~/.kube/config + aws eks update-kubeconfig --name darksite-lab-eks + rm /home/$USER/viya/infrastructure/darksite-lab-eks-kubeconfig.conf +fi + +# destroy job +if [ $REPLY == "destroy" ]; then + read -p "Are you sure you want to continue (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi + echo -e "\n+++Starting destroy job ...\n" + docker run --rm \ + --group-add root \ + --user "$(id -u):$(id -g)" \ + --volume=$(pwd)/infrastructure:/workspace \ + viya4-iac-aws:$TAG \ + destroy -auto-approve -var-file=/workspace/terraform.tfvars \ + -state=/workspace/terraform.tfstate +fi diff --git a/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh new file mode 100755 index 00000000..eab6ae34 --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/02_dac_deploy.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# get viya4-deployment container tag +echo -e "\n" +read -p "What is your viya4-deployment container tag? " -r DOCKER_TAG + +TASKS=("baseline" "viya" "cluster-logging" "cluster-monitoring" "viya-monitoring" "install" "uninstall") + +##### FUNCTIONS ##### +function docker_run() { + echo "starting $tags job..." + docker run --rm \ + --group-add root \ + --user "$(id -u)":"$(id -g)" \ + --volume "$(pwd)"/infrastructure/ssh/id_rsa:/config/jump_svr_private_key \ + --volume "$(pwd)"/infrastructure/terraform.tfstate:/config/tfstate \ + --volume /home/ec2-user/.kube/config:/.kube/config \ + --volume "$(pwd)"/software/deployments:/data \ + --volume "$(pwd)"/software/viya_order_assets:/viya_order_assets \ + --volume "$(pwd)"/software/ansible-vars-iac.yaml:/config/config \ + --volume "$(pwd)"/software/ingress:/ingress \ + --volume "$(pwd)"/software/sitedefault.yaml:/sitedefault/sitedefault.yaml \ + viya4-deployment:$DOCKER_TAG --tags "$tags" +} + +function join_by { + local d=${1-} f=${2-} + if shift 2; then + printf %s "$f" "${@/#/$d}" + fi +} + +##### MAIN SCRIPT ##### +if [ $# -eq 0 ] +then + # what are the deploy tags + echo + echo "You didn't provide deployment tags!" + echo + echo "Tasks: baseline viya cluster-logging cluster-monitoring viya-monitoring" + echo "Actions: install uninstall" + echo + echo ' -All tasks and actions must be separated by "," ' + echo " -At least one task must be supplied. Multiple tasks are allowed. " + echo " -An action is required and must be the last and ONLY action provided." + echo + echo "Examples: baseline,viya,install" + echo " viya,uninstall " + echo + echo -n "What are your deployment tags? " + read -r REPLY +else + REPLY=$* +fi + +# split REPLY into an array +IFS=',' read -r -a array <<< "$REPLY" +# remove spaces in array elements +clean=() +for i in "${array[@]}"; do + i=${i// /} + clean+=("$i") +done + +# check if provided tasks are valid +for i in "${clean[@]}"; do + inarray=$(echo "${TASKS[@]}" | grep -ow "$i" | wc -w) + if [ $inarray == 0 ]; then + echo $i "is not a valid input." + exit 0 + fi +done + +# check that more than one tag is provided +len=${#clean[@]} +if [ $len -lt 2 ]; then + echo "Not enough tags provided!" + exit 0 +fi + +# check if install and uninstall is provided correctly +count=0 +for i in "${clean[@]}"; do + if [ $i == "install" ] || [ $i == "uninstall" ]; then + (( count++ )) + fi +done +if [ $count == 0 ]; then + echo "You didn't provide an install or uninstall action!" + exit 0 +elif [ $count -gt 1 ]; then + echo "You can only have one action: install or uninstall!" + exit 0 +fi +# check that install/uninstall is last value +last="${clean[-1]}" +if [ "$last" != "install" ] && [ "$last" != "uninstall" ]; then + echo "install or uninstall must be last tag value!" + exit 0 +fi + +# if uninstall job, double check before continuing! +if [ "$last" == "uninstall" ]; then + read -p "Are you really sure you want to continue; this action is destructive!! (y/n)? " -n 1 -r REPLY + echo # (optional) move to a new line + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi +fi + +# all checks passed so build the tags string +tags=$(join_by , "${clean[@]}") + +# run the function +docker_run + +# remove downloaded assets +for f in software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz; do + ## Check if the glob gets expanded to existing files. + ## If not, f here will be exactly the pattern above + ## and the exists test will evaluate to false. + [ -f "$f" ] && rm software/deployments/darksite-lab-eks/viya/SASViyaV4*.tgz || echo "SASViyaV4.tgz files do not exist" + + ## If one more files exist, they are all removed at once, so we can break after the first iteration + break +done + + diff --git a/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars b/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars new file mode 100755 index 00000000..856f683a --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/infrastructure/terraform.tfvars @@ -0,0 +1,111 @@ +# !NOTE! - These are only a subset of the variables in CONFIG-VARS.md provided +# as examples. Customize this file to add any variables from CONFIG-VARS.md whose +# default values you want to change. + +# **************** REQUIRED VARIABLES **************** +# These required variables' values MUST be provided by the User +prefix = "darksite-lab" +location = "" # e.g., "us-east-1" +# **************** REQUIRED VARIABLES **************** + +# Bring your own existing resources - get values from AWS console or VPC/Subnet provisioning script outputs +vpc_id = "PrivateVPCId" +subnet_ids = { # only needed if using pre-existing subnets + "public" : ["PrivateSubnetAId", "PrivateSubnetBId"], + "private" : ["PrivateSubnetAId", "PrivateSubnetBId"], + "control_plane" : ["ControlPlaneSubnetAId", "ControlPlaneSubnetBId"], + "database" : ["PrivateSubnetAId", "PrivateSubnetBId"] # only when 'create_postgres=true' +} + +security_group_id = "PrivateVpcSGId" +cluster_security_group_id = "PrivateClusterControlSGId" +workers_security_group_id = "PrivateClusterWorkersSGId" + +# !NOTE! - Without specifying your CIDR block access rules, ingress traffic +# to your cluster will be blocked by default. + +# ************** RECOMMENDED VARIABLES *************** +default_public_access_cidrs = [] # e.g., ["123.45.6.89/32"] # not required in a darksite +ssh_public_key = "/workspace/ssh/id_rsa.pub" # container path to ssh public key used for jumpserver +# ************** RECOMMENDED VARIABLES *************** + +# Tags for all tagable items in your cluster. +tags = { } # e.g., { "key1" = "value1", "key2" = "value2" } + +# Postgres config - By having this entry a database server is created. If you do not +# need an external database server remove the 'postgres_servers' +# block below. +# postgres_servers = { +# default = {}, +# } + +## Cluster config +cluster_api_mode = "private" +kubernetes_version = "1.26" +default_nodepool_node_count = 1 +default_nodepool_vm_type = "m5.2xlarge" + +## General +storage_type = "standard" +nfs_raid_disk_type = "gp3" +nfs_raid_disk_iops = "3000" + +## Cluster Node Pools config +node_pools = { + cas = { + "vm_type" = "m5.2xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 1 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=cas:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "cas" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + }, + compute = { + "vm_type" = "m5.8xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 1 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=compute:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "compute" + "launcher.sas.com/prepullImage" = "sas-programming-environment" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + }, + services = { + "vm_type" = "m5.4xlarge" + "cpu_type" = "AL2_x86_64" + "os_disk_type" = "gp3" + "os_disk_size" = 200 + "os_disk_iops" = 3000 + "min_nodes" = 0 + "max_nodes" = 5 + "node_taints" = ["workload.sas.com/class=stateful:NoSchedule"] + "node_labels" = { + "workload.sas.com/class" = "stateful" + } + "custom_data" = "" + "metadata_http_endpoint" = "enabled" + "metadata_http_tokens" = "required" + "metadata_http_put_response_hop_limit" = 1 + } +} + +# Jump Server +create_jump_vm = true +create_jump_public_ip = false diff --git a/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml b/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml new file mode 100755 index 00000000..ee7cc7d0 --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/software/ansible-vars-iac.yaml @@ -0,0 +1,200 @@ +## Cluster +NAMESPACE: viya + +## MISC +DEPLOY: true # Set to false to stop at generating the manifest +LOADBALANCER_SOURCE_RANGES: ['192.168.8.0/24'] +KUBECONFIG: /.kube/config +V4_DEPLOYMENT_OPERATOR_ENABLED: false # sas-orchestration does not phone home for entitlements (set to false for darksite) + +## Storage +V4_CFG_MANAGE_STORAGE: true +#V4_CFG_RWX_FILESTORE_PATH: "/" # NOTE: EFS is "/" but NFS is "/export" (for NFS) + +## SAS Software Order +V4_CFG_ORDER_NUMBER: # order number +V4_CFG_CADENCE_NAME: # stable or lts +V4_CFG_CADENCE_VERSION: # cadence version +## Providing the following three variables will bypass DAC using SAS Viya API (DAC 6.2.0+): +V4_CFG_DEPLOYMENT_ASSETS: /viya_order_assets/ # container path to deployment assets +V4_CFG_LICENSE: /viya_order_assets/ # container path to license file (.jwt) +V4_CFG_CERTS: /viya_order_assets/ # container path to viya certs + +## Path to sitedefault.yaml +V4_CFG_SITEDEFAULT: /sitedefault/sitedefault.yaml # container path to sitedefault.yaml + +## CR Access +V4_CFG_CR_URL: "{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/viya" # update this for your account and region + +## Ingress +V4_CFG_INGRESS_TYPE: ingress +V4_CFG_INGRESS_MODE: "private" +# if no FQDN dns registration, use the DNS of the private NLB, here is a way to get that automatically: +# V4_CFG_INGRESS_FQDN: $(kubectl get service ingress-nginx-controller -n ingress-nginx -o jsonpath={'.status.loadBalancer.ingress[0].ip'}) +V4_CFG_INGRESS_FQDN: +V4_CFG_TLS_MODE: "full-stack" # [full-stack|front-door|ingress-only|disabled] + +## Postgres +V4_CFG_POSTGRES_SERVERS: + default: + internal: true + postgres_pvc_storage_size: 10Gi + postgres_pvc_access_mode: ReadWriteOnce + postgres_storage_class: sas + backrest_storage_class: sas + +## LDAP +V4_CFG_EMBEDDED_LDAP_ENABLE: true # Note: will require the DaC tool (openldap deployment) to be modded to point to ECR for openldap container image + +## Baseline configs are specifically for repos that use OCI for helm charts (like ECR) + +## Cert-manager config +CERT_MANAGER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +CERT_MANAGER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cert-manager +CERT_MANAGER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-controller + webhook: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-webhook + cainjector: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-cainjector + startupapicheck: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/quay.io/jetstack/cert-manager-ctl + installCRDs: "true" + extraArgs: + - --enable-certificate-owner-ref=true + +## Metrics-server config +METRICS_SERVER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +METRICS_SERVER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/metrics-server +METRICS_SERVER_CONFIG: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + repository: metrics-server + apiService: + create: true + +## NGINX config +INGRESS_NGINX_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +INGRESS_NGINX_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/ingress-nginx +INGRESS_NGINX_CONFIG: + controller: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + image: ingress-nginx/controller + digest: {{ CONTROLLER_ECR_IMAGE_DIGEST }} + admissionWebhooks: + patch: + image: + registry: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com + image: ingress-nginx/kube-webhook-certgen + digest: {{ WEBHOOK_ECR_IMAGE_DIGEST }} + service: + externalTrafficPolicy: Local + sessionAffinity: None + loadBalancerSourceRanges: "{{ LOADBALANCER_SOURCE_RANGES |default(['0.0.0.0/0'], -1) }}" + config: + use-forwarded-headers: "true" + hsts-max-age: "63072000" + tcp: {} + udp: {} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "sleep 5; /usr/local/nginx/sbin/nginx -c /etc/nginx/nginx.conf -s quit; while pgrep -x nginx; do sleep 1; done"] + terminationGracePeriodSeconds: 600 + +# nfs client config +NFS_CLIENT_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +NFS_CLIENT_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner +NFS_CLIENT_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner + nfs: + server: "{{ V4_CFG_RWX_FILESTORE_ENDPOINT }}" + path: "{{ V4_CFG_RWX_FILESTORE_PATH | replace('/$', '') }}/pvs" + mountOptions: + - noatime + - nodiratime + - 'rsize=262144' + - 'wsize=262144' + storageClass: + archiveOnDelete: "false" + name: sas + +# pg-storage class config +PG_NFS_CLIENT_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +PG_NFS_CLIENT_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner +PG_NFS_CLIENT_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/nfs-subdir-external-provisioner + nfs: + server: "{{ V4_CFG_RWX_FILESTORE_ENDPOINT }}" + path: "{{ V4_CFG_RWX_FILESTORE_PATH | replace('/$', '') }}/pvs" + mountOptions: + - noatime + - nodiratime + - 'rsize=262144' + - 'wsize=262144' + storageClass: + archiveOnDelete: "false" + reclaimPolicy: "Retain" + name: pg-storage + +# auto-scaler +CLUSTER_AUTOSCALER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +CLUSTER_AUTOSCALER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cluster-autoscaler +CLUSTER_AUTOSCALER_LOCATION: {{ AWS_REGION }} +CLUSTER_AUTOSCALER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/cluster-autoscaler + awsRegion: "{{ CLUSTER_AUTOSCALER_LOCATION }}" + autoDiscovery: + clusterName: "{{ CLUSTER_NAME }}" + rbac: + serviceAccount: + name: cluster-autoscaler + annotations: + "eks.amazonaws.com/role-arn": "{{ CLUSTER_AUTOSCALER_ACCOUNT }}" + "eks.amazonaws.com/sts-regional-endpoints": “true” + extraEnv: + AWS_STS_REGIONAL_ENDPOINTS: regional + extraArgs: + aws-use-static-instance-list: true # this keeps autoscaler from going to the internet for the ec2 list on init, auto-scaler will fail in darksite without this + +# EBS CSI DRIVER +EBS_CSI_DRIVER_CHART_URL: "" # yes we want this blank because of how the ansible helm module expects OCI to be passed +EBS_CSI_DRIVER_CHART_NAME: oci://{{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/aws-ebs-csi-driver +EBS_CSI_DRIVER_LOCATION: {{ AWS_REGION }} +EBS_CSI_DRIVER_CONFIG: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver + sidecars: + provisioner: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-provisioner + attacher: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-attacher + snapshotter: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-snapshotter + livenessProbe: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/livenessprobe + resizer: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-resizer + nodeDriverRegistrar: + image: + repository: {{ AWS_ACCT_ID }}.dkr.ecr.{{ AWS_REGION }}.amazonaws.com/k8s.gcr.io/sig-storage/csi-node-driver-registrar + controller: + region: "{{ EBS_CSI_DRIVER_LOCATION }}" + serviceAccount: + create: true + name: ebs-csi-controller-sa + annotations: + "eks.amazonaws.com/role-arn": "{{ EBS_CSI_DRIVER_ACCOUNT }}" diff --git a/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml b/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml new file mode 100755 index 00000000..9c717d0a --- /dev/null +++ b/viya4-deployment-darksite/deployment-machine-assets/software/sitedefault.yaml @@ -0,0 +1,26 @@ +cacerts: +config: + application: + sas.identities.providers.ldap.connection: + host: ldap-svc + password: Password123 + port: 389 + url: ldap://${sas.identities.providers.ldap.connection.host}:${sas.identities.providers.ldap.connection.port} + userDN: cn=admin,dc=example,dc=com + sas.identities.providers.ldap.group: + baseDN: ou=groups,dc=example,dc=com + accountId: cn + member: uniqueMember + memberOf: memberOf + objectClass: groupOfUniqueNames + objectFilter: (objectClass=groupOfUniqueNames) + searchFilter: cn={0} + sas.identities.providers.ldap.user: + baseDN: ou=people,dc=example,dc=com + accountId: uid + memberOf: memberOf + objectClass: inetOrgPerson + objectFilter: (objectClass=inetOrgPerson) + searchFilter: uid={0} + sas.logon.initial.password: Password123 +config/identities/sas.identities/administrator: viya_admin diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh new file mode 100644 index 00000000..7e72bca2 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/00_vars.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +AWS_ACCT_ID= +AWS_REGION= + +K8S_minor_version=24 # K8s v1.22.X minor would be 22 ... K8s v1.21.X minor version would be 21. This must match your deployment! diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md b/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md new file mode 100644 index 00000000..3c75be84 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/README.md @@ -0,0 +1,37 @@ +# Gather some facts before running these scripts: + +## Global Variables (00_vars.sh) +1. AWS Account ID +2. AWS Region +3. K8s minor version + +## metrics-server +1. helm chart version + +## auto-scaler +1. helm chart version +2. Cluster name +3. Autoscaler ARN + +## ingress-nginx +1. helm chart version +2. controller image digest (sha256) - get this from your ECR +3. webhook image digest (sha256) - get this from your ECR +4. load balancer source ranges (must be list example: ["0.0.0.0/0"]) + +## nfs-subdir-external-provisioner +1. nfs-subdir-external-provisioner helm chart version +2. RWX filestore endpoint (IP or DNS for endpoint..) +3. RWX filestore path (don't include ../pvs as it is already appended in the script) + +## pg-nfs-provisioner +1. helm chart version +2. RWX filestore endpoint (IP or DNS for endpoint..) +3. RWX filestore path (don't include ../pvs as it is already appended in the script) + +## cert-manager +1. helm chart version (don't include the proceeding v, it is already appended in the script) + +## ebs-csi-driver +1. helm chart version +2. eks.amazonaws.com/role-arn for EBS_CSI_DRIVER \ No newline at end of file diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh new file mode 100644 index 00000000..4227ce3b --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/auto_scaler_install.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "cluster name: " cluster_name +read -p "autoscaler ARN: " autoscaler_arn +read -p "cluster-autoscaler helm chart version: " CHART_VERSION + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "Installing auto-scaler...\n\n" +helm upgrade --cleanup-on-fail \ + --install cluster-autoscaler oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cluster-autoscaler \ + --version=$CHART_VERSION \ + --values tmp.yaml + +# cleanup +unset TMP_YAML +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh new file mode 100644 index 00000000..79f87639 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/cert_manager_install.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p 'cert-manager helm chart version (do not include the proceeding "v"): ' CHART_VERSION + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing cert-manager...\n\n" + +helm upgrade --cleanup-on-fail \ + --install cert-manager oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/cert-manager \ + --version=v$CHART_VERSION \ + --values tmp.yaml \ + --namespace cert-manager \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh new file mode 100644 index 00000000..e863682e --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ebs-csi-driver.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +## installs ebs-csi-driver via helm + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "What is the aws-ebs-csi-driver helm chart version? " CHART_VERSION +read -p "What is the eks.amazonaws.com/role-arn for EBS_CSI_DRIVER? " ARN + +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "\nInstalling ingress-nginx...\n" +helm upgrade --cleanup-on-fail \ + --install aws-ebs-csi-driver oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/aws-ebs-csi-driver --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace kube-system + +# cleanup +unset TMP_YAML +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh new file mode 100644 index 00000000..2f9efe51 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/ingress_nginx_install.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +## installs this by default: +# - INGRESS_NGINX_CVE_2021_25742_PATCH +# - ingress-nginx private ingress + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "ingress-nginx helm chart version: " CHART_VERSION +read -p "controller image digest (sha256): " CONTROLLER_DIGEST +read -p "webhook image digest (sha256): " WEBHOOK_DIGEST +read -p 'load balancer source ranges? must be a list (example): ["0.0.0.0/0"] ' LB + +# handle version differences with webhook path +CHART_VERSION_INT=$(echo "${CHART_VERSION//.}") +if [ $CHART_VERSION_INT -lt 411 ]; then + WEBHOOK_PATH=jettech +elif [ $CHART_VERSION_INT -ge 411 ]; then + WEBHOOK_PATH=ingress-nginx +else + echo "Error with your helm chart versions! Exiting..." + exit 1 +fi + +read -r -d '' TMP_YAML < tmp.yaml + +# helm install +echo -e "\nInstalling ingress-nginx...\n" +helm upgrade --cleanup-on-fail \ + --install ingress-nginx oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/ingress-nginx --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace ingress-nginx \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh new file mode 100644 index 00000000..5459ac63 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/metrics_server_install.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "metrics-server helm chart version: " CHART_VERSION + +echo -e "Installing metrics-server...\n\n" + +helm upgrade --cleanup-on-fail \ + --install metrics-server oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/metrics-server --version=$CHART_VERSION \ + --set image.registry=$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com \ + --set image.repository=metrics-server \ + --set apiService.create=true + +# cleanup +unset TMP_YAML +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh new file mode 100644 index 00000000..10a0fcd8 --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/nfs_provisioner_install.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "nfs-subdir-external-provisioner helm chart version: " CHART_VERSION +read -p "RWX filestore endpoint: " ENDPOINT +read -p "RWX filestore path (don't include ../pvs): " ENDPOINT_PATH + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing nfs-subdir-external-provisioner...\n\n" + +helm upgrade --cleanup-on-fail \ + --install nfs-subdir-external-provisioner oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner \ + --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace nfs-client \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml diff --git a/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh b/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh new file mode 100644 index 00000000..51a8115c --- /dev/null +++ b/viya4-deployment-darksite/install-baseline-helm-from-ecr/pg_nfs_provisioner_install.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +source 00_vars.sh + +# helm registry login +aws ecr get-login-password \ + --region $AWS_REGION | helm registry login \ + --username AWS \ + --password-stdin $AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com + +read -p "nfs-subdir-external-provisioner helm chart version: " CHART_VERSION +read -p "RWX filestore endpoint: " ENDPOINT +read -p "RWX filestore path (don't include ../pvs): " ENDPOINT_PATH + +# output tmp.yaml +read -r -d '' TMP_YAML < tmp.yaml + +echo -e "Installing nfs-subdir-external-provisioner...\n\n" + +helm upgrade --cleanup-on-fail \ + --install nfs-subdir-external-provisioner-pg-storage oci://$AWS_ACCT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/nfs-subdir-external-provisioner \ + --version=$CHART_VERSION \ + --values tmp.yaml \ + --namespace nfs-client \ + --create-namespace + +# cleanup +unset TMP_YAML +rm tmp.yaml diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh new file mode 100644 index 00000000..0eab8247 --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/00_vars.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +NAMESPACE= # namespace used for your viya install +AWS_ACCT_ID= # your aws account ID +REGION= # your aws region +CERTS=~/viya/software/viya_order_assets/SASViyaV4_XXXX_certs.zip # path to the _certs.zip file +ASSETS=~/viya/software/viya_order_assets/SASViyaV4_XXX_XXXX-XXXX_deploymentAssets.tgz # path to the tgz assets file diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh new file mode 100644 index 00000000..16db9717 --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/01_mirrormgr-ecr.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +## mirrormgr must be installed and in $PATH prior to running this script +## aws cli should be configured prior to running this script +## place your downloaded assets in the assets/ folder + +### source variables from 00_vars.sh +source 00_vars.sh + + +# create repositories? +echo +read -p "Do you need to create the ECR repositories? (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + # check if ECR repositories exist and create + for repo in $(mirrormgr list target docker repos --deployment-data $CERTS --destination $NAMESPACE) ; do + aws ecr create-repository --repository-name $repo --region $REGION + done +fi + + +# proceed with mirroring images? +echo +read -p "Proceed with mirroring images? this will take some time... (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + # populate the repositories.. this will take some time! + mirrormgr mirror registry -p ./sas_repos \ + --deployment-data $CERTS \ + --deployment-assets $ASSETS \ + --destination https://$AWS_ACCT_ID.dkr.ecr.$REGION.amazonaws.com/$NAMESPACE \ + --username 'AWS' \ + --password $(aws ecr get-login-password --region $REGION) +fi diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh b/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh new file mode 100644 index 00000000..d8f659ea --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/02_cleanup-ecr.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# this script will help you quickly clean up viya related ECR repos + +### source variables from 00_vars.sh +source 00_vars.sh + +# get all the repos within the aws subscription +REPOS=$(aws ecr describe-repositories --region $REGION) + +# delete the SAS Viya repos +read -p "Are you sure you'd like to delete all SAS Viya repos and images (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + echo $REPOS | jq -r --arg keyword $NAMESPACE '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done +fi + +# delete the 3rd party repos +read -p "Are you sure you'd like to delete all 3rd party SAS Viya related repos and images (y/n)? " -n 1 -r REPLY +echo # (optional) move to a new line +if [[ $REPLY =~ ^[Yy]$ ]]; then + echo $REPOS | jq -r --arg keyword cert-manager '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword cluster-autoscaler '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword ingress-nginx '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword nfs-subdir-external-provisioner '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword metrics-server '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done + echo $REPOS | jq -r --arg keyword openldap '.repositories[].repositoryName | select(. | contains($keyword))' | while read -r repo; do aws ecr delete-repository --repository-name $repo --force --no-cli-pager; done +fi diff --git a/viya4-deployment-darksite/mirrormgr-to-ecr/README.md b/viya4-deployment-darksite/mirrormgr-to-ecr/README.md new file mode 100644 index 00000000..c53de24f --- /dev/null +++ b/viya4-deployment-darksite/mirrormgr-to-ecr/README.md @@ -0,0 +1,23 @@ +## Helper Script to help with mirrormgr + +SAS documentation specific to using mirrormgr for AWS ECR located [here](https://go.documentation.sas.com/doc/en/itopscdc/v_029/dplyml0phy0dkr/p0lexw9inr33ofn1tbo69twarhlx.htm). + +## Step 1: Download Order Assets +- Download order assets [here](https://my.sas.com/en/my-orders.html). Check all under "order assets". + +## Step 2: Unzip to assets/ folder +- Unzip multipleAssets zip to assets/ folder ... if following the darksite-lab: place in /home/ec2-user/viya/software/viya_order_assets + +## Step 3: Install mirrormgr +- Download [here](https://support.sas.com/en/documentation/install-center/viya/deployment-tools/4/mirror-manager.html). + +## Step 4: Update variables in 00_vars.sh + +## Step 5: Run mirrormgr-ecr.sh +- The script assumes your AWS CLI is already configured. +- This script will use `mirrormgr` to create AWS ECR repos for each viya4 image (AWS requirement). +- This script will download the viya4 images locally, then using `mirrormgr`, automatically push them to the appropriate ECR repo. + - This will take some time based on your local bandwidth. Note: the images are around ~120GiB total. + +## Helper script to help clean up ECR: cleanup-ecr.sh +- This script uses AWS CLI to delete all the SAS Viya and 3rd party repositories and images. This makes life easier when you need to clean up the AWS ECR.