Skip to content

Commit

Permalink
Updates for 0.4.0 k8s (#35)
Browse files Browse the repository at this point in the history
* Updates to 0.4.0 along with an attempt at keeping l7 istio changes separate.

* Fixed issue with load balancer never being set to true

* Service monitor updates needed to detect proximus nodes from promtheus
  • Loading branch information
arrowplum authored May 28, 2024
1 parent 843ccc0 commit 0cb12fa
Show file tree
Hide file tree
Showing 11 changed files with 163 additions and 46 deletions.
67 changes: 46 additions & 21 deletions kubernetes/full-create-and-install.sh
Original file line number Diff line number Diff line change
@@ -1,27 +1,33 @@
#!/bin/bash

# This script sets up a GKE cluster with specific configurations for Aerospike and Proximus node pools.
# This script sets up a GKE cluster with specific configurations for Aerospike and AVS node pools.
# It handles the creation of the cluster, node pools, labeling, tainting of nodes, and deployment of necessary operators and configurations.
# Additionally, it sets up monitoring using Prometheus and deploys a specific Helm chart for Proximus.
# Additionally, it sets up monitoring using Prometheus and deploys a specific Helm chart for AVS.

# Function to print environment variables for verification
set -eo pipefail
if [ -n "$DEBUG" ]; then set -x; fi
trap 'echo "Error: $? at line $LINENO" >&2' ERR

print_env() {
echo "Environment Variables:"
echo "export PROJECT_ID=$PROJECT_ID"
echo "export CLUSTER_NAME=$CLUSTER_NAME"
echo "export NODE_POOL_NAME_AEROSPIKE=$NODE_POOL_NAME_AEROSPIKE"
echo "export NODE_POOL_NAME_PROXIMUS=$NODE_POOL_NAME_PROXIMUS"
echo "export NODE_POOL_NAME_AVS=$NODE_POOL_NAME_AVS"
echo "export ZONE=$ZONE"
echo "export FEATURES_CONF=$FEATURES_CONF"
echo "export AEROSPIKE_CR=$AEROSPIKE_CR"
}

# Set environment variables for the GKE cluster setup
export PROJECT_ID="performance-eco"
export CLUSTER_NAME="my-world-eco"
export PROJECT_ID="$(gcloud config get-value project)"
export CLUSTER_NAME="${PROJECT_ID}-modern-world"
export NODE_POOL_NAME_AEROSPIKE="aerospike-pool"
export NODE_POOL_NAME_PROXIMUS="proximus-pool"
export NODE_POOL_NAME_AVS="avs-pool"
export ZONE="us-central1-c"
#export HELM_CHART="aerospike/aerospike-avs"
export HELM_CHART="/home/joem/src/helm-charts/aerospike-vector-search"
export FEATURES_CONF="./features.conf"
export AEROSPIKE_CR="./manifests/ssd_storage_cluster_cr.yaml"

Expand Down Expand Up @@ -97,40 +103,59 @@ kubectl apply -f https://raw.githubusercontent.com/aerospike/aerospike-kubernete

echo "Deploying Aerospike cluster..."
kubectl apply -f "$AEROSPIKE_CR"

##############################################
# Proximus name space
# AVS name space
##############################################

echo "Adding Proximus node pool..."
if ! gcloud container node-pools create "$NODE_POOL_NAME_PROXIMUS" \
echo "Adding avs node pool..."
if ! gcloud container node-pools create "$NODE_POOL_NAME_AVS" \
--cluster "$CLUSTER_NAME" \
--project "$PROJECT_ID" \
--zone "$ZONE" \
--num-nodes 3 \
--disk-type "pd-standard" \
--disk-size "100" \
--machine-type "e2-highmem-4"; then
echo "Failed to create Proximus node pool"
echo "Failed to create avs node pool"
exit 1
else
echo "Proximus node pool added successfully."
echo "avs node pool added successfully."
fi

echo "Labeling Proximus nodes..."
kubectl get nodes -l cloud.google.com/gke-nodepool="$NODE_POOL_NAME_PROXIMUS" -o name | \
xargs -I {} kubectl label {} aerospike.com/node-pool=proximus --overwrite
echo "Labeling avs nodes..."
kubectl get nodes -l cloud.google.com/gke-nodepool="$NODE_POOL_NAME_AVS" -o name | \
xargs -I {} kubectl label {} aerospike.com/node-pool=avs --overwrite



echo "Setup complete. Cluster and node pools are configured."

kubectl create namespace proximus
kubectl create namespace avs

echo "Setting secrets for avs cluster..."
kubectl --namespace avs create secret generic aerospike-secret --from-file=features.conf="$FEATURES_CONF"
kubectl --namespace avs create secret generic auth-secret --from-literal=password='admin123'


# echo "Deploying Istio"
# helm repo add istio https://istio-release.storage.googleapis.com/charts
# helm repo update

# helm install istio-base istio/base --namespace istio-system --set defaultRevision=default --create-namespace --wait
# helm install istiod istio/istiod --namespace istio-system --create-namespace --wait
# helm install istio-ingress istio/gateway \
# --values "manifests/istio-ingressgateway-values.yaml" \
# --namespace istio-ingress \
# --create-namespace \
# --wait

# kubectl apply -f "manifests/gateway.yaml"
# kubectl apply -f "manifests/virtual-service-vector-search.yaml"


echo "Setting secrets for proximus cluster..."
kubectl --namespace proximus create secret generic aerospike-secret --from-file=features.conf="$FEATURES_CONF"
kubectl --namespace proximus create secret generic auth-secret --from-literal=password='admin123'

helm install proximus-gke --values "manifests/proximus-gke-values.yaml" --namespace proximus aerospike/aerospike-proximus --wait
helm install avs-gke --values "manifests/avs-gke-values.yaml" --namespace avs $HELM_CHART --wait


##############################################
Expand All @@ -152,6 +177,6 @@ echo "To expose grafana ports publically 'kubectl apply -f helpers/EXPOSE-GRAFAN
echo "To find the exposed port with 'kubectl get svc -n monitoring' "


#To run the quote search sample app on your new cluster you can use
# helm install sematic-search-app aerospike/quote-semantic-search --namespace proximus --values manifests/sematic-search-values.yaml --wait
echo To run the quote search sample app on your new cluster you can use
echo helm install sematic-search-app aerospike/quote-semantic-search --namespace avs --values manifests/sematic-search-values.yaml --wait

Original file line number Diff line number Diff line change
@@ -1,23 +1,23 @@

replicaCount: 3

proximusConfig:
aerospikeVectorSearchConfig:
heartbeat:
seeds:
- address: proximus-gke-aerospike-proximus-0.proximus-gke-aerospike-proximus.proximus.svc.cluster.local
- address: avs-gke-aerospike-vector-search-0.avs-gke-aerospike-vector-search.avs.svc.cluster.local
port: 5001
- address: proximus-gke-aerospike-proximus-1.proximus-gke-aerospike-proximus.proximus.svc.cluster.local
- address: avs-gke-aerospike-vector-search-1.avs-gke-aerospike-vector-search.avs.svc.cluster.local
port: 5001
- address: proximus-gke-aerospike-proximus-2.proximus-gke-aerospike-proximus.proximus.svc.cluster.local
- address: avs-gke-aerospike-vector-search-2.avs-gke-aerospike-vector-search.avs.svc.cluster.local
port: 5001

interconnect:
ports:
5001:
addresses:
0.0.0.0
aerospike:
metadata-namespace: "proximus-meta"
metadata-namespace: "avs-meta"
seeds:
- aerocluster-0-0.aerocluster.aerospike.svc.cluster.local:
port: 3000
Expand All @@ -30,16 +30,27 @@ proximusConfig:
metrics-ticker: info
root: info
com.aerospike.vector.embedded.client: debug
client: debug
ticker-interval: 10

service:
enabled: true
annotations:
networking.gke.io/load-balancer-type: "Internal"
networking.gke.io/load-balancer-type: "External"
ports:
- name: "svc-port"
port: 5000
targetPort: 5000
# service:
# enabled: false
# type: LoadBalancer
# annotations:
# cloud.google.com/l4-rbs: "enabled"
# # networking.gke.io/load-balancer-type: "Internal"
# ports:
# - name: "svc-port"
# port: 5000
# targetPort: 5000

# schedule proximus nodes
affinity:
Expand All @@ -50,7 +61,7 @@ service:
- key: aerospike.com/node-pool
operator: In
values:
- "proximus"
- "aerospike-vector-search"
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
Expand All @@ -59,4 +70,4 @@ service:
- key: "app.kubernetes.io/name"
operator: In
values:
- "aerospike-proximus"
- "aerospike-vector-search"
23 changes: 23 additions & 0 deletions kubernetes/manifests/gateway.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: avs-gw
namespace: aerospike
spec:
selector:
istio: ingress
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
- port:
number: 5000
name: grpc
protocol: GRPC
hosts:
- "*"
tls:
mode: PASSTHROUGH
17 changes: 17 additions & 0 deletions kubernetes/manifests/istio-ingressgateway-values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
service:
type: LoadBalancer
annotations:
cloud.google.com/l4-rbs: "enabled"
ports:
- name: http
port: 80
targetPort: 80
- name: https
port: 443
targetPort: 443
- name: status-port
port: 15021
targetPort: 15021
- name: grpc
port: 5000
targetPort: 5000
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: proximus-monitor
name: avs-monitor
namespace: monitoring
labels:
release: monitoring-stack # Ensure this matches the Helm release name
spec:
selector:
matchLabels:
app: proximus-gke-aerospike-proximus
app: avs-gke-aerospike-vector-search
namespaceSelector:
matchNames:
- aerospike
- avs
endpoints:
- port: manage-5040
path: /manage/rest/v1/prometheus
Expand Down
5 changes: 5 additions & 0 deletions kubernetes/manifests/quote-search-gke-values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
quoteSearchConfig:
avsHost: "avs-gke-aerospike-vector-search-lb.svc.cluster.local"
avsIsLoadbalancer: "True"
avsNamespace: "avs-meta"

5 changes: 0 additions & 5 deletions kubernetes/manifests/sematic-search-values.yaml

This file was deleted.

14 changes: 14 additions & 0 deletions kubernetes/manifests/servicemonitor.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: avs-service-monitor
namespace: aerospike
spec:
selector:
matchLabels:
app.kubernetes.io/name: aerospike-vector-search
endpoints:
- port: "manage-5040"
interval: 10s
path: "/manage/rest/v1/prometheus"
8 changes: 4 additions & 4 deletions kubernetes/manifests/ssd_storage_cluster_cr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ spec:
storageClass: ssd
volumeMode: Filesystem
size: 1Gi
- name: proximus-meta
- name: avs-meta
aerospike:
path: /proximus/dev/xvdf
path: /avs/dev/xvdf
source:
persistentVolume:
storageClass: ssd
Expand Down Expand Up @@ -91,12 +91,12 @@ spec:
devices:
- /test/dev/xvdf

- name: proximus-meta
- name: avs-meta
nsup-period: 600
nsup-threads: 2
evict-tenths-pct: 5
replication-factor: 2
storage-engine:
type: device
devices:
- /proximus/dev/xvdf
- /avs/dev/xvdf
29 changes: 29 additions & 0 deletions kubernetes/manifests/virtual-service-vector-search.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: avs-vs
namespace: aerospike
spec:
hosts:
- "*"
gateways:
- avs-gw
http:
- match:
- uri:
prefix: /
port: 80
route:
- destination:
port:
number: 8080
host: quote-search-quote-semantic-search.aerospike.svc.cluster.local
- match:
- uri:
prefix: /
port: 5000
route:
- destination:
port:
number: 5000
host: avs-gke-aerospike-vector-search.aerospike.svc.cluster.local
6 changes: 2 additions & 4 deletions quote-semantic-search/quote-search/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,11 @@

def get_bool_env(name, default):
env = os.environ.get(name)

if env is None:
return default
env = env.lower()

env = env.lower()

if os.environ.get(name) in ["true", "1"]:
if env in ["true", "1"]:
return True
else:
return False
Expand Down

0 comments on commit 0cb12fa

Please sign in to comment.