diff --git a/.gitignore b/.gitignore index 91c834a..cb6c43f 100644 --- a/.gitignore +++ b/.gitignore @@ -167,3 +167,4 @@ features.conf # Kubernetes default generated dir /kubernetes/generated +/kubernetes/logs/* diff --git a/kubernetes/full-create-and-install-gke.sh b/kubernetes/full-create-and-install-gke.sh index 6f4cb27..ce4b5e7 100755 --- a/kubernetes/full-create-and-install-gke.sh +++ b/kubernetes/full-create-and-install-gke.sh @@ -12,10 +12,9 @@ WORKSPACE="$(pwd)" PROJECT_ID="$(gcloud config get-value project)" # Prepend the current username to the cluster name USERNAME=$(whoami) - +CHART_VERSION="0.7.0" # Default values DEFAULT_CLUSTER_NAME_SUFFIX="avs" -RUN_INSECURE=1 # Default value for insecure mode (false meaning secure with auth + tls) # Function to display the script usage usage() { @@ -23,7 +22,7 @@ usage() { echo "Options:" echo " --chart-location, -l If specified expects a local directory for AVS Helm chart (default: official repo)" echo " --cluster-name, -c Override the default cluster name (default: ${USERNAME}-${PROJECT_ID}-${DEFAULT_CLUSTER_NAME_SUFFIX})" - echo " --run-insecure, -r Run setup cluster without auth or tls. No argument required." + echo " --run-insecure, -i Run setup cluster without auth or tls. No argument required." echo " --help, -h Show this help message" exit 1 } @@ -33,7 +32,7 @@ while [[ "$#" -gt 0 ]]; do case $1 in --chart-location|-l) CHART_LOCATION="$2"; shift 2 ;; --cluster-name|-c) CLUSTER_NAME_OVERRIDE="$2"; shift 2 ;; - --run-insecure|-r) RUN_INSECURE=1; shift ;; # just flag no argument + --run-insecure|-i) RUN_INSECURE=1; shift ;; # just flag no argument --help|-h) usage ;; # Display the help/usage if --help or -h is passed *) echo "Unknown parameter passed: $1"; usage ;; # Unknown parameter triggers usage esac @@ -77,12 +76,12 @@ reset_build() { mv -f "$BUILD_DIR" "$temp_dir" fi mkdir -p "$BUILD_DIR/input" "$BUILD_DIR/output" "$BUILD_DIR/secrets" "$BUILD_DIR/certs" "$BUILD_DIR/manifests" - cp "$FEATURES_CONF" "$BUILD_DIR/secrets/features.conf" - if [[ "${RUN_INSECURE}" == 1 ]]; then - cp $WORKSPACE/manifests/avs-values.yaml $BUILD_DIR/manifests/avs-values.yaml - cp $WORKSPACE/manifests/aerospike-cr.yaml $BUILD_DIR/manifests/aerospike-cr.yaml - else - cp $WORKSPACE/manifests/avs-values-auth.yaml $BUILD_DIR/manifests/avs-values.yaml + cp "$FEATURES_CONF" "$BUILD_DIR/secrets/features.conf" + cp "$WORKSPACE/manifests/avs-values.yaml" "$BUILD_DIR/manifests/avs-values.yaml" + cp "$WORKSPACE/manifests/aerospike-cr.yaml" "$BUILD_DIR/manifests/aerospike-cr.yaml" + +# override aerospike-cr.yaml with secure version if run insecure not specified + if [[ "${RUN_INSECURE}" != 1 ]]; then cp $WORKSPACE/manifests/aerospike-cr-auth.yaml $BUILD_DIR/manifests/aerospike-cr.yaml fi } @@ -281,6 +280,12 @@ generate_certs() { # Function to create GKE cluster create_gke_cluster() { + if ! gcloud container clusters describe "$CLUSTER_NAME" --zone "$ZONE" &> /dev/null; then + echo "Cluster $CLUSTER_NAME does not exist. Creating..." + else + echo "Cluster $CLUSTER_NAME already exists. Skipping creation." + return + fi echo "$(date '+%Y-%m-%d %H:%M:%S') - Starting GKE cluster creation..." if ! gcloud container clusters create "$CLUSTER_NAME" \ --project "$PROJECT_ID" \ @@ -334,16 +339,29 @@ create_gke_cluster() { xargs -I {} kubectl label {} aerospike.com/node-pool=avs --overwrite echo "Setting up namespaces..." - kubectl create namespace aerospike - kubectl create namespace avs } -# Function to create Aerospike node pool and deploy AKO + setup_aerospike() { + kubectl create namespace aerospike || true # Idempotent namespace creation echo "Deploying Aerospike Kubernetes Operator (AKO)..." - curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.25.0/install.sh | bash -s v0.25.0 - kubectl create -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml + + if ! kubectl get ns olm &> /dev/null; then + echo "Installing OLM..." + curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.25.0/install.sh | bash -s v0.25.0 + else + echo "OLM is already installed in olm namespace. Skipping installation." + fi + + # Check if the subscription already exists + if ! kubectl get subscription my-aerospike-kubernetes-operator --namespace operators &> /dev/null; then + echo "Installing AKO subscription..." + kubectl create -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml + else + echo "AKO subscription already exists. Skipping installation." + fi + echo "Waiting for AKO to be ready..." while true; do @@ -378,7 +396,7 @@ setup_aerospike() { # Function to setup AVS node pool and namespace setup_avs() { - + kubectl create namespace avs echo "Setting secrets for AVS cluster..." kubectl --namespace avs create secret generic auth-secret --from-literal=password='admin123' @@ -386,6 +404,7 @@ setup_avs() { --from-file="$BUILD_DIR/certs" kubectl --namespace avs create secret generic aerospike-secret \ --from-file="$BUILD_DIR/secrets" + } # Function to optionally deploy Istio @@ -404,23 +423,35 @@ deploy_istio() { kubectl apply -f manifests/istio/gateway.yaml kubectl apply -f manifests/istio/avs-virtual-service.yaml - } +} get_reverse_dns() { INGRESS_IP=$(kubectl get svc istio-ingress -n istio-ingress -o jsonpath='{.status.loadBalancer.ingress[0].ip}') REVERSE_DNS_AVS=$(dig +short -x $INGRESS_IP) echo "Reverse DNS: $REVERSE_DNS_AVS" } + # Function to deploy AVS Helm chart deploy_avs_helm_chart() { echo "Deploying AVS Helm chart..." helm repo add aerospike-helm https://artifact.aerospike.io/artifactory/api/helm/aerospike-helm helm repo update - if [ -z "$CHART_LOCATION" ]; then - helm install avs-app --values $BUILD_DIR/manifests/avs-values.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.6.0 --wait - else - helm install avs-app --values $BUILD_DIR/manifests/avs-values.yaml --namespace avs "$CHART_LOCATION" --wait - fi +# Installs AVS query nodes + helm install avs-app aerospike-helm/aerospike-vector-search\ + --set replicaCount=2 \ + --set aerospikeVectorSearchConfig.cluster.node-roles[0]=query \ + --values $BUILD_DIR/manifests/avs-values.yaml \ + --namespace avs\ + --version $CHART_VERSION\ + --atomic --wait +# Install AVS index-update node + helm install avs-app-update aerospike-helm/aerospike-vector-search\ + --set replicaCount=1 \ + --set aerospikeVectorSearchConfig.cluster.node-roles[0]=index-update \ + --values $BUILD_DIR/manifests/avs-values.yaml \ + --namespace avs\ + --version $CHART_VERSION\ + --atomic --wait } # Function to setup monitoring @@ -461,12 +492,12 @@ main() { print_env reset_build create_gke_cluster + setup_aerospike deploy_istio get_reverse_dns if [[ "${RUN_INSECURE}" != 1 ]]; then generate_certs fi - setup_aerospike setup_avs deploy_avs_helm_chart setup_monitoring diff --git a/kubernetes/logs/avs-insecure b/kubernetes/logs/avs-insecure new file mode 100644 index 0000000..e2b266f --- /dev/null +++ b/kubernetes/logs/avs-insecure @@ -0,0 +1,379 @@ ++ trap 'echo "Error: $? at line $LINENO" >&2' ERR +++ pwd ++ WORKSPACE=/home/joem/src/aerospike-vector/kubernetes +++ gcloud config get-value project ++ PROJECT_ID=performance-eco +++ whoami ++ USERNAME=joem ++ CHART_VERSION=0.7.0 ++ DEFAULT_CLUSTER_NAME_SUFFIX=avs ++ [[ 3 -gt 0 ]] ++ case $1 in ++ RUN_INSECURE=1 ++ shift ++ [[ 2 -gt 0 ]] ++ case $1 in ++ CLUSTER_NAME_OVERRIDE=avs-insecure2 ++ shift 2 ++ [[ 0 -gt 0 ]] ++ main ++ set_env_variables ++ '[' -n avs-insecure2 ']' ++ export CLUSTER_NAME=joem-avs-insecure2 ++ CLUSTER_NAME=joem-avs-insecure2 ++ export NODE_POOL_NAME_AEROSPIKE=aerospike-pool ++ NODE_POOL_NAME_AEROSPIKE=aerospike-pool ++ export NODE_POOL_NAME_AVS=avs-pool ++ NODE_POOL_NAME_AVS=avs-pool ++ export ZONE=us-central1-c ++ ZONE=us-central1-c ++ export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf ++ FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf ++ export BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated ++ BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated ++ export REVERSE_DNS_AVS ++ print_env ++ echo 'Environment Variables:' +Environment Variables: ++ echo 'export PROJECT_ID=performance-eco' +export PROJECT_ID=performance-eco ++ echo 'export CLUSTER_NAME=joem-avs-insecure2' +export CLUSTER_NAME=joem-avs-insecure2 ++ echo 'export NODE_POOL_NAME_AEROSPIKE=aerospike-pool' +export NODE_POOL_NAME_AEROSPIKE=aerospike-pool ++ echo 'export NODE_POOL_NAME_AVS=avs-pool' +export NODE_POOL_NAME_AVS=avs-pool ++ echo 'export ZONE=us-central1-c' +export ZONE=us-central1-c ++ echo 'export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf' +export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf ++ echo 'export CHART_LOCATION=' +export CHART_LOCATION= ++ echo 'export RUN_INSECURE=1' +export RUN_INSECURE=1 ++ reset_build ++ '[' -d /home/joem/src/aerospike-vector/kubernetes/generated ']' +++ mktemp -d /tmp/avs-deploy-previous.XXXXXX ++ temp_dir=/tmp/avs-deploy-previous.seR081 ++ mv -f /home/joem/src/aerospike-vector/kubernetes/generated /tmp/avs-deploy-previous.seR081 ++ mkdir -p /home/joem/src/aerospike-vector/kubernetes/generated/input /home/joem/src/aerospike-vector/kubernetes/generated/output /home/joem/src/aerospike-vector/kubernetes/generated/secrets /home/joem/src/aerospike-vector/kubernetes/generated/certs /home/joem/src/aerospike-vector/kubernetes/generated/manifests ++ cp /home/joem/src/aerospike-vector/kubernetes/features.conf /home/joem/src/aerospike-vector/kubernetes/generated/secrets/features.conf ++ docker run --rm -v /home/joem/src/aerospike-vector/kubernetes:/workdir -w /workdir mikefarah/yq e '.aerospikeVectorSearchConfig.cluster *= (load("manifests/avs-values-role-query.yaml"))' /workdir/manifests/avs-values.yaml ++ docker run --rm -v /home/joem/src/aerospike-vector/kubernetes:/workdir -w /workdir mikefarah/yq e '.aerospikeVectorSearchConfig.cluster *= (load("manifests/avs-values-role-update.yaml"))' /workdir/manifests/avs-values.yaml ++ cp /home/joem/src/aerospike-vector/kubernetes/manifests/aerospike-cr.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/ ++ [[ 1 != 1 ]] ++ create_gke_cluster ++ gcloud container clusters describe joem-avs-insecure2 --zone us-central1-c ++ echo 'Cluster joem-avs-insecure2 does not exist. Creating...' +Cluster joem-avs-insecure2 does not exist. Creating... +++ date '+%Y-%m-%d %H:%M:%S' ++ echo '2024-12-05 16:57:32 - Starting GKE cluster creation...' +2024-12-05 16:57:32 - Starting GKE cluster creation... ++ gcloud container clusters create joem-avs-insecure2 --project performance-eco --zone us-central1-c --num-nodes 1 --disk-type pd-standard --disk-size 100 +Note: The Kubelet readonly port (10255) is now deprecated. Please update your workloads to use the recommended alternatives. See https://cloud.google.com/kubernetes-engine/docs/how-to/disable-kubelet-readonly-port for ways to check usage and for migration instructions. +Note: Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most 1008 node(s). +Creating cluster joem-avs-insecure2 in us-central1-c... +.........................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................done. +Created [https://container.googleapis.com/v1/projects/performance-eco/zones/us-central1-c/clusters/joem-avs-insecure2]. +To inspect the contents of your cluster, go to: https://console.cloud.google.com/kubernetes/workload_/gcloud/us-central1-c/joem-avs-insecure2?project=performance-eco +kubeconfig entry generated for joem-avs-insecure2. +NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS +joem-avs-insecure2 us-central1-c 1.30.5-gke.1699000 35.188.115.25 e2-medium 1.30.5-gke.1699000 1 RUNNING ++ echo 'GKE cluster created successfully.' +GKE cluster created successfully. ++ echo 'Creating Aerospike node pool...' +Creating Aerospike node pool... ++ gcloud container node-pools create aerospike-pool --cluster joem-avs-insecure2 --project performance-eco --zone us-central1-c --num-nodes 3 --local-ssd-count 2 --disk-type pd-standard --disk-size 100 --machine-type n2d-standard-32 +Creating node pool aerospike-pool... +.......................................................................................................................................................................................................................................................................................................................................................................................................................done. +Created [https://container.googleapis.com/v1/projects/performance-eco/zones/us-central1-c/clusters/joem-avs-insecure2/nodePools/aerospike-pool]. +NAME MACHINE_TYPE DISK_SIZE_GB NODE_VERSION +aerospike-pool n2d-standard-32 100 1.30.5-gke.1699000 ++ echo 'Aerospike node pool added successfully.' +Aerospike node pool added successfully. ++ echo 'Labeling Aerospike nodes...' +Labeling Aerospike nodes... ++ xargs -I '{}' kubectl label '{}' aerospike.com/node-pool=default-rack --overwrite ++ kubectl get nodes -l cloud.google.com/gke-nodepool=aerospike-pool -o name +node/gke-joem-avs-insecure2-aerospike-pool-e5f993df-c6nd labeled +node/gke-joem-avs-insecure2-aerospike-pool-e5f993df-g3np labeled +node/gke-joem-avs-insecure2-aerospike-pool-e5f993df-pxzw labeled ++ echo 'Adding AVS node pool...' +Adding AVS node pool... ++ gcloud container node-pools create avs-pool --cluster joem-avs-insecure2 --project performance-eco --zone us-central1-c --num-nodes 3 --disk-type pd-standard --disk-size 100 --machine-type n2d-standard-32 +Creating node pool avs-pool... +.............................................................................................................................................................................................................................................................................................................................................................................................done. +Created [https://container.googleapis.com/v1/projects/performance-eco/zones/us-central1-c/clusters/joem-avs-insecure2/nodePools/avs-pool]. +NAME MACHINE_TYPE DISK_SIZE_GB NODE_VERSION +avs-pool n2d-standard-32 100 1.30.5-gke.1699000 ++ echo 'AVS node pool added successfully.' +AVS node pool added successfully. ++ echo 'Labeling AVS nodes...' +Labeling AVS nodes... ++ kubectl get nodes -l cloud.google.com/gke-nodepool=avs-pool -o name ++ xargs -I '{}' kubectl label '{}' aerospike.com/node-pool=avs --overwrite +node/gke-joem-avs-insecure2-avs-pool-30c30d84-5jmx labeled +node/gke-joem-avs-insecure2-avs-pool-30c30d84-5xhh labeled +node/gke-joem-avs-insecure2-avs-pool-30c30d84-lkrf labeled ++ echo 'Setting up namespaces...' +Setting up namespaces... ++ setup_aerospike ++ kubectl create namespace aerospike +namespace/aerospike created ++ echo 'Deploying Aerospike Kubernetes Operator (AKO)...' +Deploying Aerospike Kubernetes Operator (AKO)... ++ kubectl get ns olm ++ echo 'Installing OLM...' +Installing OLM... ++ curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.25.0/install.sh ++ bash -s v0.25.0 +customresourcedefinition.apiextensions.k8s.io/catalogsources.operators.coreos.com created +customresourcedefinition.apiextensions.k8s.io/clusterserviceversions.operators.coreos.com created +customresourcedefinition.apiextensions.k8s.io/installplans.operators.coreos.com created +customresourcedefinition.apiextensions.k8s.io/olmconfigs.operators.coreos.com created +customresourcedefinition.apiextensions.k8s.io/operatorconditions.operators.coreos.com created +customresourcedefinition.apiextensions.k8s.io/operatorgroups.operators.coreos.com created +customresourcedefinition.apiextensions.k8s.io/operators.operators.coreos.com created +customresourcedefinition.apiextensions.k8s.io/subscriptions.operators.coreos.com created +customresourcedefinition.apiextensions.k8s.io/catalogsources.operators.coreos.com condition met +customresourcedefinition.apiextensions.k8s.io/clusterserviceversions.operators.coreos.com condition met +customresourcedefinition.apiextensions.k8s.io/installplans.operators.coreos.com condition met +customresourcedefinition.apiextensions.k8s.io/olmconfigs.operators.coreos.com condition met +customresourcedefinition.apiextensions.k8s.io/operatorconditions.operators.coreos.com condition met +customresourcedefinition.apiextensions.k8s.io/operatorgroups.operators.coreos.com condition met +customresourcedefinition.apiextensions.k8s.io/operators.operators.coreos.com condition met +customresourcedefinition.apiextensions.k8s.io/subscriptions.operators.coreos.com condition met +namespace/olm created +namespace/operators created +serviceaccount/olm-operator-serviceaccount created +clusterrole.rbac.authorization.k8s.io/system:controller:operator-lifecycle-manager created +clusterrolebinding.rbac.authorization.k8s.io/olm-operator-binding-olm created +olmconfig.operators.coreos.com/cluster created +deployment.apps/olm-operator created +deployment.apps/catalog-operator created +clusterrole.rbac.authorization.k8s.io/aggregate-olm-edit created +clusterrole.rbac.authorization.k8s.io/aggregate-olm-view created +operatorgroup.operators.coreos.com/global-operators created +operatorgroup.operators.coreos.com/olm-operators created +clusterserviceversion.operators.coreos.com/packageserver created +catalogsource.operators.coreos.com/operatorhubio-catalog created +Waiting for deployment "olm-operator" rollout to finish: 0 of 1 updated replicas are available... +deployment "olm-operator" successfully rolled out +deployment "catalog-operator" successfully rolled out +Package server phase: Installing +Package server phase: Succeeded +deployment "packageserver" successfully rolled out ++ kubectl get subscription my-aerospike-kubernetes-operator --namespace operators ++ echo 'Installing AKO subscription...' +Installing AKO subscription... ++ kubectl create -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml +subscription.operators.coreos.com/my-aerospike-kubernetes-operator created ++ echo 'Waiting for AKO to be ready...' +Waiting for AKO to be ready... ++ true ++ kubectl --namespace operators get deployment/aerospike-operator-controller-manager ++ echo 'AKO setup is still in progress...' +AKO setup is still in progress... ++ sleep 10 ++ true ++ kubectl --namespace operators get deployment/aerospike-operator-controller-manager ++ echo 'AKO setup is still in progress...' +AKO setup is still in progress... ++ sleep 10 ++ true ++ kubectl --namespace operators get deployment/aerospike-operator-controller-manager ++ echo 'AKO setup is still in progress...' +AKO setup is still in progress... ++ sleep 10 ++ true ++ kubectl --namespace operators get deployment/aerospike-operator-controller-manager ++ echo 'AKO is ready.' +AKO is ready. ++ kubectl --namespace operators wait --for=condition=available --timeout=180s deployment/aerospike-operator-controller-manager +deployment.apps/aerospike-operator-controller-manager condition met ++ break ++ echo 'Granting permissions to the target namespace...' +Granting permissions to the target namespace... ++ kubectl --namespace aerospike create serviceaccount aerospike-operator-controller-manager +serviceaccount/aerospike-operator-controller-manager created ++ kubectl create clusterrolebinding aerospike-cluster --clusterrole=aerospike-cluster --serviceaccount=aerospike:aerospike-operator-controller-manager +clusterrolebinding.rbac.authorization.k8s.io/aerospike-cluster created ++ echo 'Setting secrets for Aerospike cluster...' +Setting secrets for Aerospike cluster... ++ kubectl --namespace aerospike create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets +secret/aerospike-secret created ++ kubectl --namespace aerospike create secret generic auth-secret --from-literal=password=admin123 +secret/auth-secret created ++ kubectl --namespace aerospike create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs +secret/aerospike-tls created ++ echo 'Adding storage class...' +Adding storage class... ++ kubectl apply -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/master/config/samples/storage/gce_ssd_storage_class.yaml +storageclass.storage.k8s.io/ssd created ++ echo 'Deploying Aerospike cluster...' +Deploying Aerospike cluster... ++ kubectl apply -f /home/joem/src/aerospike-vector/kubernetes/generated/manifests/aerospike-cr.yaml +aerospikecluster.asdb.aerospike.com/aerocluster created ++ deploy_istio ++ echo 'Deploying Istio' +Deploying Istio ++ helm repo add istio https://istio-release.storage.googleapis.com/charts +"istio" has been added to your repositories ++ helm repo update +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "jetstack" chart repository +...Successfully got an update from the "istio" chart repository +...Successfully got an update from the "aerospike-io" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ ++ helm install istio-base istio/base --namespace istio-system --set defaultRevision=default --create-namespace --wait +NAME: istio-base +LAST DEPLOYED: Thu Dec 5 17:07:34 2024 +NAMESPACE: istio-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +Istio base successfully installed! + +To learn more about the release, try: + $ helm status istio-base -n istio-system + $ helm get all istio-base -n istio-system ++ helm install istiod istio/istiod --namespace istio-system --create-namespace --wait +NAME: istiod +LAST DEPLOYED: Thu Dec 5 17:07:46 2024 +NAMESPACE: istio-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +"istiod" successfully installed! + +To learn more about the release, try: + $ helm status istiod -n istio-system + $ helm get all istiod -n istio-system + +Next steps: + * Deploy a Gateway: https://istio.io/latest/docs/setup/additional-setup/gateway/ + * Try out our tasks to get started on common configurations: + * https://istio.io/latest/docs/tasks/traffic-management + * https://istio.io/latest/docs/tasks/security/ + * https://istio.io/latest/docs/tasks/policy-enforcement/ + * Review the list of actively supported releases, CVE publications and our hardening guide: + * https://istio.io/latest/docs/releases/supported-releases/ + * https://istio.io/latest/news/security/ + * https://istio.io/latest/docs/ops/best-practices/security/ + +For further documentation see https://istio.io website ++ helm install istio-ingress istio/gateway --values ./manifests/istio/istio-ingressgateway-values.yaml --namespace istio-ingress --create-namespace --wait +NAME: istio-ingress +LAST DEPLOYED: Thu Dec 5 17:07:59 2024 +NAMESPACE: istio-ingress +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +"istio-ingress" successfully installed! + +To learn more about the release, try: + $ helm status istio-ingress -n istio-ingress + $ helm get all istio-ingress -n istio-ingress + +Next steps: + * Deploy an HTTP Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/ingress-control/ + * Deploy an HTTPS Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/secure-ingress/ ++ kubectl apply -f manifests/istio/gateway.yaml +gateway.networking.istio.io/avs-gw created ++ kubectl apply -f manifests/istio/avs-virtual-service.yaml +virtualservice.networking.istio.io/avs-vs created ++ get_reverse_dns +++ kubectl get svc istio-ingress -n istio-ingress -o 'jsonpath={.status.loadBalancer.ingress[0].ip}' ++ INGRESS_IP=34.28.28.145 +++ dig +short -x 34.28.28.145 ++ REVERSE_DNS_AVS=145.28.28.34.bc.googleusercontent.com. ++ echo 'Reverse DNS: 145.28.28.34.bc.googleusercontent.com.' +Reverse DNS: 145.28.28.34.bc.googleusercontent.com. ++ [[ 1 != 1 ]] ++ setup_avs ++ kubectl create namespace avs +namespace/avs created ++ echo 'Setting secrets for AVS cluster...' +Setting secrets for AVS cluster... ++ kubectl --namespace avs create secret generic auth-secret --from-literal=password=admin123 +secret/auth-secret created ++ kubectl --namespace avs create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs +secret/aerospike-tls created ++ kubectl --namespace avs create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets +secret/aerospike-secret created ++ deploy_avs_helm_chart ++ echo 'Deploying AVS Helm chart...' +Deploying AVS Helm chart... ++ helm repo add aerospike-helm https://artifact.aerospike.io/artifactory/api/helm/aerospike-helm +"aerospike-helm" has been added to your repositories ++ helm repo update +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "istio" chart repository +...Successfully got an update from the "jetstack" chart repository +...Successfully got an update from the "aerospike-io" chart repository +...Successfully got an update from the "aerospike-helm" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ ++ helm install avs-app-query --set replicaCount=2 --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values-update.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.7.0 --atomic --wait +NAME: avs-app-query +LAST DEPLOYED: Thu Dec 5 17:09:16 2024 +NAMESPACE: avs +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: + ++ helm install avs-app-update --set replicaCount=1 --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values-query.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.7.0 --atomic --wait +NAME: avs-app-update +LAST DEPLOYED: Thu Dec 5 17:09:41 2024 +NAMESPACE: avs +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: + ++ setup_monitoring ++ echo 'Adding monitoring setup...' +Adding monitoring setup... ++ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +"prometheus-community" has been added to your repositories ++ helm repo update +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "istio" chart repository +...Successfully got an update from the "jetstack" chart repository +...Successfully got an update from the "aerospike-helm" chart repository +...Successfully got an update from the "aerospike-io" chart repository +...Successfully got an update from the "prometheus-community" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ ++ helm install monitoring-stack prometheus-community/kube-prometheus-stack --namespace monitoring --create-namespace +NAME: monitoring-stack +LAST DEPLOYED: Thu Dec 5 17:10:10 2024 +NAMESPACE: monitoring +STATUS: deployed +REVISION: 1 +NOTES: +kube-prometheus-stack has been installed. Check its status by running: + kubectl --namespace monitoring get pods -l "release=monitoring-stack" + +Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. ++ echo 'Applying additional monitoring manifests...' +Applying additional monitoring manifests... ++ kubectl apply -f manifests/monitoring/aerospike-exporter-service.yaml +service/aerospike-exporter created ++ kubectl apply -f manifests/monitoring/aerospike-servicemonitor.yaml +servicemonitor.monitoring.coreos.com/aerospike-monitor created ++ kubectl apply -f manifests/monitoring/avs-servicemonitor.yaml +servicemonitor.monitoring.coreos.com/avs-monitor created ++ print_final_instructions ++ echo Your new deployment is available at 145.28.28.34.bc.googleusercontent.com.. +Your new deployment is available at 145.28.28.34.bc.googleusercontent.com.. ++ echo Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. +Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. ++ [[ 1 != 1 ]] ++ echo 'Setup Complete!' +Setup Complete! diff --git a/kubernetes/logs/avs-real-insecure b/kubernetes/logs/avs-real-insecure new file mode 100644 index 0000000..d929231 --- /dev/null +++ b/kubernetes/logs/avs-real-insecure @@ -0,0 +1,276 @@ ++ trap 'echo "Error: $? at line $LINENO" >&2' ERR +++ pwd ++ WORKSPACE=/home/joem/src/aerospike-vector/kubernetes +++ gcloud config get-value project ++ PROJECT_ID=performance-eco +++ whoami ++ USERNAME=joem ++ CHART_VERSION=0.7.0 ++ DEFAULT_CLUSTER_NAME_SUFFIX=avs ++ [[ 3 -gt 0 ]] ++ case $1 in ++ RUN_INSECURE=1 ++ shift ++ [[ 2 -gt 0 ]] ++ case $1 in ++ CLUSTER_NAME_OVERRIDE=avs-real-insecure ++ shift 2 ++ [[ 0 -gt 0 ]] ++ main ++ set_env_variables ++ '[' -n avs-real-insecure ']' ++ export CLUSTER_NAME=joem-avs-real-insecure ++ CLUSTER_NAME=joem-avs-real-insecure ++ export NODE_POOL_NAME_AEROSPIKE=aerospike-pool ++ NODE_POOL_NAME_AEROSPIKE=aerospike-pool ++ export NODE_POOL_NAME_AVS=avs-pool ++ NODE_POOL_NAME_AVS=avs-pool ++ export ZONE=us-central1-c ++ ZONE=us-central1-c ++ export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf ++ FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf ++ export BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated ++ BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated ++ export REVERSE_DNS_AVS ++ print_env ++ echo 'Environment Variables:' +Environment Variables: ++ echo 'export PROJECT_ID=performance-eco' +export PROJECT_ID=performance-eco ++ echo 'export CLUSTER_NAME=joem-avs-real-insecure' +export CLUSTER_NAME=joem-avs-real-insecure ++ echo 'export NODE_POOL_NAME_AEROSPIKE=aerospike-pool' +export NODE_POOL_NAME_AEROSPIKE=aerospike-pool ++ echo 'export NODE_POOL_NAME_AVS=avs-pool' +export NODE_POOL_NAME_AVS=avs-pool ++ echo 'export ZONE=us-central1-c' +export ZONE=us-central1-c ++ echo 'export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf' +export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf ++ echo 'export CHART_LOCATION=' +export CHART_LOCATION= ++ echo 'export RUN_INSECURE=1' +export RUN_INSECURE=1 ++ reset_build ++ '[' -d /home/joem/src/aerospike-vector/kubernetes/generated ']' +++ mktemp -d /tmp/avs-deploy-previous.XXXXXX ++ temp_dir=/tmp/avs-deploy-previous.9tWhiW ++ mv -f /home/joem/src/aerospike-vector/kubernetes/generated /tmp/avs-deploy-previous.9tWhiW ++ mkdir -p /home/joem/src/aerospike-vector/kubernetes/generated/input /home/joem/src/aerospike-vector/kubernetes/generated/output /home/joem/src/aerospike-vector/kubernetes/generated/secrets /home/joem/src/aerospike-vector/kubernetes/generated/certs /home/joem/src/aerospike-vector/kubernetes/generated/manifests ++ cp /home/joem/src/aerospike-vector/kubernetes/features.conf /home/joem/src/aerospike-vector/kubernetes/generated/secrets/features.conf ++ cp /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values-auth.yaml /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values-role-query.yaml /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values-role-update.yaml /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/ ++ cp /home/joem/src/aerospike-vector/kubernetes/manifests/aerospike-cr.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/ ++ [[ 1 != 1 ]] ++ create_gke_cluster ++ gcloud container clusters describe joem-avs-real-insecure --zone us-central1-c ++ echo 'Cluster joem-avs-real-insecure already exists. Skipping creation.' +Cluster joem-avs-real-insecure already exists. Skipping creation. ++ return ++ setup_aerospike ++ kubectl create namespace aerospike +namespace/aerospike created ++ echo 'Deploying Aerospike Kubernetes Operator (AKO)...' +Deploying Aerospike Kubernetes Operator (AKO)... ++ kubectl get ns olm ++ echo 'OLM is already installed in olm namespace. Skipping installation.' +OLM is already installed in olm namespace. Skipping installation. ++ kubectl get subscription my-aerospike-kubernetes-operator --namespace operators ++ echo 'Installing AKO subscription...' +Installing AKO subscription... ++ kubectl create -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml +subscription.operators.coreos.com/my-aerospike-kubernetes-operator created ++ echo 'Waiting for AKO to be ready...' +Waiting for AKO to be ready... ++ true ++ kubectl --namespace operators get deployment/aerospike-operator-controller-manager ++ echo 'AKO is ready.' +AKO is ready. ++ kubectl --namespace operators wait --for=condition=available --timeout=180s deployment/aerospike-operator-controller-manager +deployment.apps/aerospike-operator-controller-manager condition met ++ break ++ echo 'Granting permissions to the target namespace...' +Granting permissions to the target namespace... ++ kubectl --namespace aerospike create serviceaccount aerospike-operator-controller-manager +serviceaccount/aerospike-operator-controller-manager created ++ kubectl create clusterrolebinding aerospike-cluster --clusterrole=aerospike-cluster --serviceaccount=aerospike:aerospike-operator-controller-manager +clusterrolebinding.rbac.authorization.k8s.io/aerospike-cluster created ++ echo 'Setting secrets for Aerospike cluster...' +Setting secrets for Aerospike cluster... ++ kubectl --namespace aerospike create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets +secret/aerospike-secret created ++ kubectl --namespace aerospike create secret generic auth-secret --from-literal=password=admin123 +secret/auth-secret created ++ kubectl --namespace aerospike create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs +secret/aerospike-tls created ++ echo 'Adding storage class...' +Adding storage class... ++ kubectl apply -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/master/config/samples/storage/gce_ssd_storage_class.yaml +storageclass.storage.k8s.io/ssd created ++ echo 'Deploying Aerospike cluster...' +Deploying Aerospike cluster... ++ kubectl apply -f /home/joem/src/aerospike-vector/kubernetes/generated/manifests/aerospike-cr.yaml +aerospikecluster.asdb.aerospike.com/aerocluster created ++ deploy_istio ++ echo 'Deploying Istio' +Deploying Istio ++ helm repo add istio https://istio-release.storage.googleapis.com/charts +"istio" has been added to your repositories ++ helm repo update +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "istio" chart repository +...Successfully got an update from the "jetstack" chart repository +...Successfully got an update from the "aerospike-io" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ ++ helm install istio-base istio/base --namespace istio-system --set defaultRevision=default --create-namespace --wait +NAME: istio-base +LAST DEPLOYED: Wed Dec 4 21:02:03 2024 +NAMESPACE: istio-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +Istio base successfully installed! + +To learn more about the release, try: + $ helm status istio-base -n istio-system + $ helm get all istio-base -n istio-system ++ helm install istiod istio/istiod --namespace istio-system --create-namespace --wait +NAME: istiod +LAST DEPLOYED: Wed Dec 4 21:02:12 2024 +NAMESPACE: istio-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +"istiod" successfully installed! + +To learn more about the release, try: + $ helm status istiod -n istio-system + $ helm get all istiod -n istio-system + +Next steps: + * Deploy a Gateway: https://istio.io/latest/docs/setup/additional-setup/gateway/ + * Try out our tasks to get started on common configurations: + * https://istio.io/latest/docs/tasks/traffic-management + * https://istio.io/latest/docs/tasks/security/ + * https://istio.io/latest/docs/tasks/policy-enforcement/ + * Review the list of actively supported releases, CVE publications and our hardening guide: + * https://istio.io/latest/docs/releases/supported-releases/ + * https://istio.io/latest/news/security/ + * https://istio.io/latest/docs/ops/best-practices/security/ + +For further documentation see https://istio.io website ++ helm install istio-ingress istio/gateway --values ./manifests/istio/istio-ingressgateway-values.yaml --namespace istio-ingress --create-namespace --wait +NAME: istio-ingress +LAST DEPLOYED: Wed Dec 4 21:02:22 2024 +NAMESPACE: istio-ingress +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +"istio-ingress" successfully installed! + +To learn more about the release, try: + $ helm status istio-ingress -n istio-ingress + $ helm get all istio-ingress -n istio-ingress + +Next steps: + * Deploy an HTTP Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/ingress-control/ + * Deploy an HTTPS Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/secure-ingress/ ++ kubectl apply -f manifests/istio/gateway.yaml +gateway.networking.istio.io/avs-gw created ++ kubectl apply -f manifests/istio/avs-virtual-service.yaml +virtualservice.networking.istio.io/avs-vs created ++ get_reverse_dns +++ kubectl get svc istio-ingress -n istio-ingress -o 'jsonpath={.status.loadBalancer.ingress[0].ip}' ++ INGRESS_IP=34.173.147.65 +++ dig +short -x 34.173.147.65 ++ REVERSE_DNS_AVS=65.147.173.34.bc.googleusercontent.com. ++ echo 'Reverse DNS: 65.147.173.34.bc.googleusercontent.com.' +Reverse DNS: 65.147.173.34.bc.googleusercontent.com. ++ [[ 1 != 1 ]] ++ setup_avs ++ kubectl create namespace avs +namespace/avs created ++ echo 'Setting secrets for AVS cluster...' +Setting secrets for AVS cluster... ++ kubectl --namespace avs create secret generic auth-secret --from-literal=password=admin123 +secret/auth-secret created ++ kubectl --namespace avs create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs +secret/aerospike-tls created ++ kubectl --namespace avs create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets +secret/aerospike-secret created ++ deploy_avs_helm_chart ++ echo 'Deploying AVS Helm chart...' +Deploying AVS Helm chart... ++ helm repo add aerospike-helm https://artifact.aerospike.io/artifactory/api/helm/aerospike-helm +"aerospike-helm" has been added to your repositories ++ helm repo update +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "jetstack" chart repository +...Successfully got an update from the "istio" chart repository +...Successfully got an update from the "aerospike-io" chart repository +...Successfully got an update from the "aerospike-helm" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ ++ '[' -z '' ']' ++ helm install avs-app-query --set replicaCount=2 --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values.yaml --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values-role-query.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.7.0 --atomic --wait +NAME: avs-app-query +LAST DEPLOYED: Wed Dec 4 21:03:39 2024 +NAMESPACE: avs +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: + ++ helm install avs-app-update --set replicaCount=1 --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values.yaml --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values-role-update.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.7.0 --atomic --wait +NAME: avs-app-update +LAST DEPLOYED: Wed Dec 4 21:04:03 2024 +NAMESPACE: avs +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: + ++ setup_monitoring ++ echo 'Adding monitoring setup...' +Adding monitoring setup... ++ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +"prometheus-community" has been added to your repositories ++ helm repo update +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "jetstack" chart repository +...Successfully got an update from the "istio" chart repository +...Successfully got an update from the "aerospike-io" chart repository +...Successfully got an update from the "aerospike-helm" chart repository +...Successfully got an update from the "prometheus-community" chart repository +...Successfully got an update from the "stable" chart repository +Update Complete. ⎈Happy Helming!⎈ ++ helm install monitoring-stack prometheus-community/kube-prometheus-stack --namespace monitoring --create-namespace +NAME: monitoring-stack +LAST DEPLOYED: Wed Dec 4 21:04:33 2024 +NAMESPACE: monitoring +STATUS: deployed +REVISION: 1 +NOTES: +kube-prometheus-stack has been installed. Check its status by running: + kubectl --namespace monitoring get pods -l "release=monitoring-stack" + +Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. ++ echo 'Applying additional monitoring manifests...' +Applying additional monitoring manifests... ++ kubectl apply -f manifests/monitoring/aerospike-exporter-service.yaml +service/aerospike-exporter created ++ kubectl apply -f manifests/monitoring/aerospike-servicemonitor.yaml +servicemonitor.monitoring.coreos.com/aerospike-monitor created ++ kubectl apply -f manifests/monitoring/avs-servicemonitor.yaml +servicemonitor.monitoring.coreos.com/avs-monitor created ++ print_final_instructions ++ echo Your new deployment is available at 65.147.173.34.bc.googleusercontent.com.. +Your new deployment is available at 65.147.173.34.bc.googleusercontent.com.. ++ echo Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. +Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. ++ [[ 1 != 1 ]] ++ echo 'Setup Complete!' +Setup Complete! diff --git a/kubernetes/manifests/monitoring/avs-servicemonitor.yaml b/kubernetes/manifests/monitoring/avs-servicemonitor.yaml index 92cf271..64950de 100644 --- a/kubernetes/manifests/monitoring/avs-servicemonitor.yaml +++ b/kubernetes/manifests/monitoring/avs-servicemonitor.yaml @@ -7,12 +7,16 @@ metadata: release: monitoring-stack # Ensure this matches the Helm release name spec: selector: - matchLabels: - app: avs-app-aerospike-vector-search + matchExpressions: + - key: app + operator: In + values: + - avs-app-aerospike-vector-search + - avs-app-update-aerospike-vector-search namespaceSelector: matchNames: - avs endpoints: - port: manage-5040 path: /manage/rest/v1/prometheus - interval: 15s + interval: 15s \ No newline at end of file diff --git a/kubernetes/uninstall-gke.sh b/kubernetes/uninstall-gke.sh index fc21b73..f70e9b1 100755 --- a/kubernetes/uninstall-gke.sh +++ b/kubernetes/uninstall-gke.sh @@ -1,5 +1,8 @@ #!/bin/bash +# This script uninstalls the resources created by the full-create-and-install-gke.sh script. +# It handles the removal of deployments, services, namespaces, node pools, and optionally the GKE cluster itself. + set -eo pipefail if [ -n "$DEBUG" ]; then set -x; fi trap 'echo "Error: $? at line $LINENO" >&2' ERR @@ -11,7 +14,27 @@ USERNAME=$(whoami) # Default values DEFAULT_CLUSTER_NAME_SUFFIX="avs" -RUN_INSECURE=1 # Default value for insecure mode (false meaning secure with auth + tls) +DESTROY_CLUSTER=1 # Default is to destroy the cluster + +# Function to display the script usage +usage() { + echo "Usage: $0 [options]" + echo "Options:" + echo " --cluster-name, -c Override the default cluster name (default: ${USERNAME}-${PROJECT_ID}-${DEFAULT_CLUSTER_NAME_SUFFIX})" + echo " --keep-cluster, -k Do not destroy the GKE cluster. No argument required." + echo " --help, -h Show this help message" + exit 1 +} + +# Parse command line arguments +while [[ "$#" -gt 0 ]]; do + case $1 in + --cluster-name|-c) CLUSTER_NAME_OVERRIDE="$2"; shift 2 ;; + --keep-cluster|-k) DESTROY_CLUSTER=0; shift ;; + --help|-h) usage ;; + *) echo "Unknown parameter passed: $1"; usage ;; + esac +done # Function to print environment variables for verification print_env() { @@ -21,15 +44,10 @@ print_env() { echo "export NODE_POOL_NAME_AEROSPIKE=$NODE_POOL_NAME_AEROSPIKE" echo "export NODE_POOL_NAME_AVS=$NODE_POOL_NAME_AVS" echo "export ZONE=$ZONE" - echo "export FEATURES_CONF=$FEATURES_CONF" - echo "export CHART_LOCATION=$CHART_LOCATION" - echo "export RUN_INSECURE=$RUN_INSECURE" } - # Function to set environment variables set_env_variables() { - # Use provided cluster name or fallback to the default if [ -n "$CLUSTER_NAME_OVERRIDE" ]; then export CLUSTER_NAME="${USERNAME}-${CLUSTER_NAME_OVERRIDE}" @@ -40,98 +58,93 @@ set_env_variables() { export NODE_POOL_NAME_AEROSPIKE="aerospike-pool" export NODE_POOL_NAME_AVS="avs-pool" export ZONE="us-central1-c" - export FEATURES_CONF="$WORKSPACE/features.conf" export BUILD_DIR="$WORKSPACE/generated" - export REVERSE_DNS_AVS } + destroy_monitoring() { - echo "Removing monitoring setup..." - kubectl delete -f manifests/monitoring/avs-servicemonitor.yaml - kubectl delete -f manifests/monitoring/aerospike-servicemonitor.yaml - kubectl delete -f manifests/monitoring/aerospike-exporter-service.yaml - - echo "Uninstalling monitoring stack..." - helm uninstall monitoring-stack --namespace monitoring - kubectl delete namespace monitoring - helm repo remove prometheus-community + if kubectl get ns monitoring &> /dev/null; then + kubectl delete -f manifests/monitoring/avs-servicemonitor.yaml --namespace monitoring || true + kubectl delete -f manifests/monitoring/aerospike-servicemonitor.yaml --namespace monitoring || true + kubectl delete -f manifests/monitoring/aerospike-exporter-service.yaml --namespace monitoring || true + helm uninstall monitoring-stack --namespace monitoring || true + kubectl delete ns monitoring || true + fi + helm repo remove prometheus-community || true } + destroy_avs_helm_chart() { - echo "Destroying AVS Helm chart..." - helm uninstall avs-app --namespace avs - helm repo remove aerospike-helm + helm uninstall avs-app-query --namespace avs || true + helm uninstall avs-app-update --namespace avs || true + helm uninstall avs-gke --namespace avs || true # For backwards compatibility + helm repo remove aerospike-helm || true } destroy_istio() { - echo "Destroying Istio setup..." - - kubectl delete -f manifests/istio/avs-virtual-service.yaml - kubectl delete -f manifests/istio/gateway.yaml - - helm uninstall istio-ingress --namespace istio-ingress - helm uninstall istiod --namespace istio-system - helm uninstall istio-base --namespace istio-system + kubectl delete -f manifests/istio/avs-virtual-service.yaml --namespace istio-ingress || true + kubectl delete -f manifests/istio/gateway.yaml || true - kubectl delete namespace istio-ingress - kubectl delete namespace istio-system + helm uninstall istio-ingress --namespace istio-ingress || true + helm uninstall istiod --namespace istio-system || true + helm uninstall istio-base --namespace istio-system || true - helm repo remove istio + kubectl delete ns istio-ingress || true + kubectl delete ns istio-system || true + helm repo remove istio || true } destroy_avs() { - echo "Destroying AVS secrets..." - - kubectl delete secret auth-secret --namespace avs - kubectl delete secret aerospike-tls --namespace avs - kubectl delete secret aerospike-secret --namespace avs - kubectl delete namespace avs + kubectl delete secret auth-secret --namespace avs || true + kubectl delete secret aerospike-tls --namespace avs || true + kubectl delete secret aerospike-secret --namespace avs || true + kubectl delete ns avs || true } destroy_aerospike() { - echo "Destroying Aerospike setup..." - - kubectl delete -f $BUILD_DIR/manifests/aerospike-cr.yaml - - kubectl delete -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/refs/heads/master/config/samples/storage/eks_ssd_storage_class.yaml + kubectl delete -f "$BUILD_DIR/manifests/aerospike-cr.yaml" --namespace aerospike || true + kubectl delete -f "https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/master/config/samples/storage/gce_ssd_storage_class.yaml" || true - kubectl delete secret aerospike-secret --namespace aerospike - kubectl delete secret auth-secret --namespace aerospike - kubectl delete secret aerospike-tls --namespace aerospike + kubectl delete secret aerospike-secret --namespace aerospike || true + kubectl delete secret auth-secret --namespace aerospike || true + kubectl delete secret aerospike-tls --namespace aerospike || true - kubectl delete serviceaccount aerospike-operator-controller-manager --namespace aerospike - kubectl delete clusterrolebinding aerospike-cluster + kubectl delete serviceaccount aerospike-operator-controller-manager --namespace aerospike || true + kubectl delete clusterrolebinding aerospike-cluster || true - kubectl delete -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml - - kubectl delete namespace aerospike + kubectl delete -f "https://operatorhub.io/install/aerospike-kubernetes-operator.yaml" || true + kubectl delete ns aerospike || true } destroy_gke_cluster() { - echo "GKE cluster destruction..." - - gcloud container node-pools delete "$NODE_POOL_NAME_AVS" \ - --cluster "$CLUSTER_NAME" \ - --project "$PROJECT_ID" \ - --zone "$ZONE" \ - --quiet - - gcloud container node-pools delete "$NODE_POOL_NAME_AEROSPIKE" \ - --cluster "$CLUSTER_NAME" \ - --project "$PROJECT_ID" \ - --zone "$ZONE" \ - --quiet - - gcloud container clusters delete "$CLUSTER_NAME" \ - --project "$PROJECT_ID" \ - --zone "$ZONE" \ - --quiet + if [[ "$DESTROY_CLUSTER" -eq 1 ]]; then + echo "GKE cluster destruction..." + + gcloud container node-pools delete "$NODE_POOL_NAME_AVS" \ + --cluster "$CLUSTER_NAME" \ + --project "$PROJECT_ID" \ + --zone "$ZONE" \ + --quiet || true + + gcloud container node-pools delete "$NODE_POOL_NAME_AEROSPIKE" \ + --cluster "$CLUSTER_NAME" \ + --project "$PROJECT_ID" \ + --zone "$ZONE" \ + --quiet || true + + gcloud container clusters delete "$CLUSTER_NAME" \ + --project "$PROJECT_ID" \ + --zone "$ZONE" \ + --quiet || true + else + echo "Skipping GKE cluster destruction due to --keep-cluster flag." + fi } - main() { set_env_variables print_env + destroy_monitoring destroy_avs_helm_chart destroy_istio @@ -140,4 +153,5 @@ main() { destroy_gke_cluster } -main \ No newline at end of file +main +