From f9b1a14c4aba40f0189427977aabc510e827bb67 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Wed, 6 Mar 2024 10:56:19 +0800 Subject: [PATCH 1/7] chore: update mogdb role probe (#350) --- addons/mogdb/templates/clusterdefinition.yaml | 52 ++++++++++++------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/addons/mogdb/templates/clusterdefinition.yaml b/addons/mogdb/templates/clusterdefinition.yaml index 6f3bd8693..e326e29ea 100644 --- a/addons/mogdb/templates/clusterdefinition.yaml +++ b/addons/mogdb/templates/clusterdefinition.yaml @@ -17,25 +17,25 @@ spec: workloadType: Stateful rsmSpec: roles: - - name: primary + - name: Primary accessMode: ReadWrite isLeader: true - - name: standby + - name: Standby accessMode: Readonly isLeader: false roleProbe: customHandler: - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} command: + - /bin/bash + - -c - | - original_role=$(su -l omm -c "gsql -h 127.0.0.1 -p 26000 -U ${KB_RSM_USERNAME} -W \"${KB_RSM_PASSWORD}\" -dpostgres -t -c 'select local_role from pg_stat_get_wal_senders() limit 1'") - if [ -z "$original_role" ]; then - original_role=$(su -l omm -c "gsql -h 127.0.0.1 -p 26000 -U ${KB_RSM_USERNAME} -W \"${KB_RSM_PASSWORD}\" -dpostgres -t -c 'select local_role from pg_stat_get_wal_receiver() limit 1'") + role=$(gsql -h 127.0.0.1 -p 26000 -U ${MOGDB_USER} -W "${MOGDB_PASSWORD}" -dpostgres -t -c "select local_role from pg_stat_get_stream_replications() limit 1" | xargs echo -n) + if [ "$role"x == "Normal"x ]; then + echo -n "Primary" + else + echo -n $role fi - - role=$(echo ${original_role} | tr 'A-Z' 'a-z') - echo -n "$role" - characterType: mogdb configSpecs: - name: mogdb-configuration @@ -98,47 +98,47 @@ spec: set -ex MogDB_Role= REPL_CONN_INFO= - + cat >>/home/omm/.profile <<-EOF export OG_SUBNET="0.0.0.0/0" export PGHOST="/var/lib/mogdb/tmp" export PGPORT="$PGPORT" EOF [[ -d "$PGHOST" ]] || (mkdir -p $PGHOST && chown omm $PGHOST) - + hostname=`hostname` [[ "$hostname" =~ -([0-9]+)$ ]] || exit 1 - ordinal=${BASH_REMATCH[1]} + ordinal=${BASH_REMATCH[1]} if [[ $ordinal -eq 0 ]];then MogDB_Role="primary" else MogDB_Role="standby" - + if [ -n "$PGPORT" ];then ha_port=$(expr $PGPORT + 1) ha_service_port=$(expr $PGPORT + 2) else ha_port=$(expr 5432 + 1) - ha_service_port=$(expr 5432 + 2) + ha_service_port=$(expr 5432 + 2) fi - + cat >/tmp/replconninfo.conf for ((i=0; i<$ordinal; i++)); do echo "$hostname $PodIP" |ncat --send-only ${KB_CLUSTER_COMP_NAME}-${i}.${KB_CLUSTER_COMP_NAME}-headless 6543 remote_ip=`ping ${KB_CLUSTER_COMP_NAME}-${i}.${KB_CLUSTER_COMP_NAME}-headless -c 1 | sed '1{s/[^(]*(//;s/).*//;q}'` - + echo "replconninfo$((i+1)) = 'localhost=$PodIP localport=${ha_port} localservice=${ha_service_port} remotehost=$remote_ip remoteport=${ha_port} remoteservice=${ha_service_port}'" >> /tmp/replconninfo.conf done - + #echo "$hostname $PodIP" |ncat --send-only ${KB_CLUSTER_COMP_NAME}-0.${KB_CLUSTER_COMP_NAME}-headless 6543 #remote_ip=`ping ${KB_CLUSTER_COMP_NAME}-0.${KB_CLUSTER_COMP_NAME}-headless -c 1 | sed '1{s/[^(]*(//;s/).*//;q}'` - + # REPL_CONN_INFO="replconninfo${ordinal} = 'localhost=$PodIP localport=${ha_port} localservice=${ha_service_port} remotehost=$remote_ip remoteport=${ha_port} remoteservice=${ha_service_port}'" fi - + [[ -n "$REPL_CONN_INFO" ]] && export REPL_CONN_INFO source /home/omm/.profile - + cp /home/omm/conf/* /tmp/ chmod 777 /tmp/postgresql.conf /tmp/pg_hba.conf exec bash /kb-scripts/setup.sh -M "$MogDB_Role" @@ -177,6 +177,18 @@ spec: fieldRef: apiVersion: v1 fieldPath: status.podIP + - name: MOGDB_USER + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: username + optional: false + - name: MOGDB_PASSWORD + valueFrom: + secretKeyRef: + name: $(CONN_CREDENTIAL_SECRET_NAME) + key: password + optional: false volumeMounts: - name: data mountPath: /var/lib/mogdb From ec9fe1878b3c8fc20a7ac4258a21d9b47c7475c9 Mon Sep 17 00:00:00 2001 From: zhangtao <111836083+sophon-zt@users.noreply.github.com> Date: Wed, 6 Mar 2024 15:05:01 +0800 Subject: [PATCH 2/7] fix: dynamic parameters reconfigure may not take effect (#345) (#346) --- addons/postgresql/templates/configconstraint-12.yaml | 2 ++ addons/postgresql/templates/configconstraint-14.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/addons/postgresql/templates/configconstraint-12.yaml b/addons/postgresql/templates/configconstraint-12.yaml index f5a6a0f1b..77ed610fb 100644 --- a/addons/postgresql/templates/configconstraint-12.yaml +++ b/addons/postgresql/templates/configconstraint-12.yaml @@ -12,6 +12,8 @@ spec: scriptConfigMapRef: patroni-reload-script namespace: {{ .Release.Namespace }} + dynamicActionCanBeMerged: false + # update patroni master selector: matchLabels: diff --git a/addons/postgresql/templates/configconstraint-14.yaml b/addons/postgresql/templates/configconstraint-14.yaml index 1c2dd87c3..4bbcb904a 100644 --- a/addons/postgresql/templates/configconstraint-14.yaml +++ b/addons/postgresql/templates/configconstraint-14.yaml @@ -12,6 +12,8 @@ spec: scriptConfigMapRef: patroni-reload-script namespace: {{ .Release.Namespace }} + dynamicActionCanBeMerged: false + # update patroni master selector: matchLabels: From 8a02472294950f8eb0e0ab830f365f2a389b6b7d Mon Sep 17 00:00:00 2001 From: yabinji <109273041+yabinji@users.noreply.github.com> Date: Thu, 7 Mar 2024 10:42:26 +0800 Subject: [PATCH 3/7] fix: add optional role string (#355) --- addons/mogdb/scripts/switchover.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/addons/mogdb/scripts/switchover.sh b/addons/mogdb/scripts/switchover.sh index 41f8a9109..b7ce64471 100644 --- a/addons/mogdb/scripts/switchover.sh +++ b/addons/mogdb/scripts/switchover.sh @@ -5,7 +5,7 @@ set -x # do switchover echo "INFO: doing switchover.." echo "INFO: candidate: ${candidate}" -kubectl exec -it mc-mogdb-1 -c mogdb -- gosu omm gs_ctl switchover +kubectl exec -it ${candidate} -c mogdb -- gosu omm gs_ctl switchover # check if switchover successfully. echo "INFO: start to check if switchover successfully, timeout is 60s" @@ -15,7 +15,7 @@ while true; do if [ ! -z ${candidate} ]; then # if candidate specified, only check it role=$(kubectl get pod ${candidate} -ojson | jq -r '.metadata.labels["kubeblocks.io/role"]') - if [ "$role" == "primary" ] || [ "$role" == "leader" ] || [ "$role" == "master" ]; then + if [ "$role" == "Primary" ] || [ "$role" == "primary" ] || [ "$role" == "leader" ] || [ "$role" == "master" ]; then echo "INFO: switchover successfully, ${candidate} is ${role}" exit 0 fi @@ -25,7 +25,7 @@ while true; do for podName in ${pods}; do if [ "${podName}" != "${primary}" ];then role=$(kubectl get pod ${podName} -ojson | jq -r '.metadata.labels["kubeblocks.io/role"]') - if [ "$role" == "primary" ] || [ "$role" == "leader" ] || [ "$role" == "master" ]; then + if [ "$role" == "Primary" ] || [ "$role" == "primary" ] || [ "$role" == "leader" ] || [ "$role" == "master" ]; then echo "INFO: switchover successfully, ${podName} is ${role}" exit 0 fi From e882f496298d38a9de1072bdb531e40c7cf8f07f Mon Sep 17 00:00:00 2001 From: Shanshan Date: Thu, 7 Mar 2024 17:43:25 +0800 Subject: [PATCH 4/7] chore: update mogdb switchover with scripts (#356) --- addons/mogdb-cluster/templates/rbac.yaml | 23 +++++++--- addons/mogdb-cluster/values.schema.json | 8 ++-- addons/mogdb-cluster/values.yaml | 12 +++--- addons/mogdb/templates/swithover.yaml | 53 +++++++++++++++++++----- 4 files changed, 69 insertions(+), 27 deletions(-) diff --git a/addons/mogdb-cluster/templates/rbac.yaml b/addons/mogdb-cluster/templates/rbac.yaml index a4602eb4e..082fc24f6 100644 --- a/addons/mogdb-cluster/templates/rbac.yaml +++ b/addons/mogdb-cluster/templates/rbac.yaml @@ -1,15 +1,28 @@ +{{- include "kblib.rbac" . }} --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole +kind: Role metadata: - name: kubeblocks-switchover-pod-role + name: {{ printf "%s-switchover-role" (include "kblib.clusterName" .) }} labels: - app.kubernetes.io/instance: kubeblocks - app.kubernetes.io/name: kubeblocks + {{- include "kblib.clusterLabels" . | nindent 4 }} app.kubernetes.io/required-by: pod rules: - apiGroups: [""] resources: ["pods/exec"] verbs: ["create"] --- -{{- include "kblib.rbac" . }} \ No newline at end of file +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ printf "%s-switchover" (include "kblib.clusterName" .) }} + labels: + {{- include "kblib.clusterLabels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ printf "%s-switchover-role" (include "kblib.clusterName" .) }} +subjects: + - kind: ServiceAccount + name: {{ printf "kb-%s" (include "kblib.clusterName" .) }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/addons/mogdb-cluster/values.schema.json b/addons/mogdb-cluster/values.schema.json index e4c5c672a..92230cc5b 100644 --- a/addons/mogdb-cluster/values.schema.json +++ b/addons/mogdb-cluster/values.schema.json @@ -27,8 +27,8 @@ "number", "string" ], - "default": 0.5, - "minimum": 0.5, + "default": 1, + "minimum": 1, "maximum": 64, "multipleOf": 0.5 }, @@ -39,8 +39,8 @@ "number", "string" ], - "default": 0.5, - "minimum": 0.5, + "default": 1, + "minimum": 1, "maximum": 1000 }, "storage": { diff --git a/addons/mogdb-cluster/values.yaml b/addons/mogdb-cluster/values.yaml index 14059a4e9..d43b6d8ed 100644 --- a/addons/mogdb-cluster/values.yaml +++ b/addons/mogdb-cluster/values.yaml @@ -8,19 +8,19 @@ version: mogdb-5.0.5 ## @param mode postgresql cluster topology mode, standalone, replication ## -mode: standalone +mode: replication ## @param replicas specify cluster replicas ## -replicas: 1 +replicas: 2 ## @param cpu ## -cpu: 0.5 +cpu: 1 ## @param memory, the unit is Gi ## -memory: 0.5 +memory: 1 ## @param requests.cpu if not set, use cpu ## @param requests.memory, if not set, use memory @@ -31,6 +31,4 @@ requests: ## @param storage size, the unit is Gi ## -storage: 20 - -customRBAC: true \ No newline at end of file +storage: 20 \ No newline at end of file diff --git a/addons/mogdb/templates/swithover.yaml b/addons/mogdb/templates/swithover.yaml index 3431f9f5c..f913e8915 100644 --- a/addons/mogdb/templates/swithover.yaml +++ b/addons/mogdb/templates/swithover.yaml @@ -18,7 +18,7 @@ spec: type: string candidate: description: | - candidate instance name(pod Name). if candidate is not empty, will promote it to primary. + candidate instance name(pod Name). if candidate is not empty, will promote it to primary. otherwise promote a randomly selected pod to primary. type: string type: object @@ -31,13 +31,44 @@ spec: image: docker.io/apecloud/kubeblocks-tools:latest imagePullPolicy: IfNotPresent command: - - sh - - /scripts/switchover.sh - volumeMounts: - - name: scripts - mountPath: /scripts - volumes: - - name: scripts - configMap: - name: mogdb-scripts - defaultMode: 0777 \ No newline at end of file + - /bin/sh + - -c + - | + set -x + # do switchover + echo "INFO: doing switchover.." + echo "INFO: candidate: ${candidate}" + kubectl exec -it ${candidate} -c mogdb -- gosu omm gs_ctl switchover + + # check if switchover successfully. + echo "INFO: start to check if switchover successfully, timeout is 60s" + executedUnix=$(date +%s) + while true; do + sleep 5 + if [ ! -z ${candidate} ]; then + # if candidate specified, only check it + role=$(kubectl get pod ${candidate} -ojson | jq -r '.metadata.labels["kubeblocks.io/role"]') + if [ "$role" == "Primary" ] || [ "$role" == "primary" ] || [ "$role" == "leader" ] || [ "$role" == "master" ]; then + echo "INFO: switchover successfully, ${candidate} is ${role}" + exit 0 + fi + else + # check if the candidate instance has been promote to primary + pods=$(kubectl get pod -l apps.kubeblocks.io/component-name=${KB_COMP_NAME},app.kubernetes.io/instance=${KB_CLUSTER_NAME} | awk 'NR > 1 {print $1}') + for podName in ${pods}; do + if [ "${podName}" != "${primary}" ];then + role=$(kubectl get pod ${podName} -ojson | jq -r '.metadata.labels["kubeblocks.io/role"]') + if [ "$role" == "Primary" ] || [ "$role" == "primary" ] || [ "$role" == "leader" ] || [ "$role" == "master" ]; then + echo "INFO: switchover successfully, ${podName} is ${role}" + exit 0 + fi + fi + done + fi + currentUnix=$(date +%s) + diff_time=$((${currentUnix}-${executedUnix})) + if [ ${diff_time} -ge 60 ]; then + echo "ERROR: switchover failed." + exit 1 + fi + done \ No newline at end of file From 687e18731fb2aafc23ba0c2749f32d92550cbf94 Mon Sep 17 00:00:00 2001 From: xuriwuyun Date: Fri, 8 Mar 2024 11:18:37 +0800 Subject: [PATCH 5/7] feat: mysql replication support audit log (#357) --- addons/mysql/Chart.yaml | 2 +- addons/mysql/config/mysql5.7-config.tpl | 9 ++++++++ addons/mysql/config/mysql8.0-config.tpl | 14 +++++++++++ addons/mysql/templates/clusterdefinition.yaml | 4 +++- addons/mysql/templates/clusterversion.yaml | 23 +++++++++++++++---- addons/mysql/values.yaml | 2 +- 6 files changed, 47 insertions(+), 7 deletions(-) diff --git a/addons/mysql/Chart.yaml b/addons/mysql/Chart.yaml index cac3c2bab..9cabe854f 100644 --- a/addons/mysql/Chart.yaml +++ b/addons/mysql/Chart.yaml @@ -11,7 +11,7 @@ version: 0.9.0 # From a user's perspective, the version number of the frontend # proxy of the MySQL being used is not relevant. # appVersion: "8.0.33" -appVersion: "5.7.42" +appVersion: "5.7.44" home: https://kubeblocks.io/ icon: https://kubeblocks.io/img/logo.png diff --git a/addons/mysql/config/mysql5.7-config.tpl b/addons/mysql/config/mysql5.7-config.tpl index 334139e95..65eeb2978 100644 --- a/addons/mysql/config/mysql5.7-config.tpl +++ b/addons/mysql/config/mysql5.7-config.tpl @@ -127,6 +127,15 @@ max_binlog_size=134217728 # binlog_transaction_dependency_tracking=WRITESET #Default Commit Order, Aws not set log_slave_updates=ON +# audit log +loose_audit_log_handler=FILE # FILE, SYSLOG +loose_audit_log_file={{ $data_root }}/auditlog/audit.log +loose_audit_log_buffer_size=1Mb +loose_audit_log_policy=ALL # ALL, LOGINS, QUERIES, NONE +loose_audit_log_strategy=ASYNCHRONOUS +loose_audit_log_rotate_on_size=10485760 +loose_audit_log_rotations=5 + # replay log # relay_log_info_repository=TABLE # From mysql8.0.23 is deprecated. diff --git a/addons/mysql/config/mysql8.0-config.tpl b/addons/mysql/config/mysql8.0-config.tpl index 13c991f8e..378e72113 100644 --- a/addons/mysql/config/mysql8.0-config.tpl +++ b/addons/mysql/config/mysql8.0-config.tpl @@ -166,6 +166,20 @@ relay_log_recovery=ON relay_log=relay-bin relay_log_index=relay-bin.index +# audit log +loose_audit_log_handler=FILE # FILE, SYSLOG +loose_audit_log_file={{ $data_root }}/auditlog/audit.log +loose_audit_log_buffer_size=1Mb +loose_audit_log_policy=ALL # ALL, LOGINS, QUERIES, NONE +loose_audit_log_strategy=ASYNCHRONOUS +loose_audit_log_rotate_on_size=10485760 +loose_audit_log_rotations=5 + +# semi sync, it works +# loose_rpl-semi-sync-source-enabled = 1 +# loose_rpl_semi_sync_source_timeout = 0 +# loose_rpl-semi-sync-replica-enabled = 1 + pid-file=/var/run/mysqld/mysqld.pid socket=/var/run/mysqld/mysqld.sock diff --git a/addons/mysql/templates/clusterdefinition.yaml b/addons/mysql/templates/clusterdefinition.yaml index 4eb163607..2d7b2d20c 100644 --- a/addons/mysql/templates/clusterdefinition.yaml +++ b/addons/mysql/templates/clusterdefinition.yaml @@ -64,7 +64,9 @@ spec: - command: - bash - -c - - "mkdir -p /var/lib/mysql/{log,binlog}; chown -R mysql:root /var/lib/mysql;" + - | + mkdir -p {{ .Values.dataMountPath }}/{log,binlog,auditlog} + cp /usr/lib/mysql/plugin/ {{ .Values.dataMountPath }}/plugin -r imagePullPolicy: Always name: init-data volumeMounts: diff --git a/addons/mysql/templates/clusterversion.yaml b/addons/mysql/templates/clusterversion.yaml index 9e0f6fb1c..1080e041a 100644 --- a/addons/mysql/templates/clusterversion.yaml +++ b/addons/mysql/templates/clusterversion.yaml @@ -21,7 +21,7 @@ spec: defaultMode: 0444 versionsContext: initContainers: - - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + - image: {{ .Values.image.registry | default "docker.io" }}/apecloud/mysql_audit_log:{{ .Values.image.tag }} name: init-data containers: - name: mysql @@ -30,7 +30,15 @@ spec: command: - bash - -c - - "/scripts/docker-entrypoint.sh mysqld --server-id $(( ${KB_POD_NAME##*-} + 1)) --ignore-db-dir=lost+found --plugin-load-add=rpl_semi_sync_master=semisync_master.so --plugin-load-add=rpl_semi_sync_slave=semisync_slave.so" + - | + mv {{ .Values.dataMountPath }}/plugin/audit_log.so /usr/lib64/mysql/plugin/ + rm -rf {{ .Values.dataMountPath }}/plugin + chown -R mysql:root {{ .Values.dataMountPath }} + /scripts/docker-entrypoint.sh mysqld --server-id $(( ${KB_POD_NAME##*-} + 1)) \ + --ignore-db-dir=lost+found \ + --plugin-load-add=rpl_semi_sync_master=semisync_master.so \ + --plugin-load-add=rpl_semi_sync_slave=semisync_slave.so \ + --plugin-load-add=audit_log=audit_log.so systemAccountSpec: cmdExecutorConfig: image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} @@ -50,7 +58,7 @@ spec: - componentDefRef: mysql versionsContext: initContainers: - - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:8.0.33 + - image: {{ .Values.image.registry | default "docker.io" }}/apecloud/mysql_audit_log:8.0.33 name: init-data containers: - name: mysql @@ -59,7 +67,14 @@ spec: command: - bash - -c - - "docker-entrypoint.sh mysqld --server-id $(( ${KB_POD_NAME##*-} + 1)) --plugin-load-add=rpl_semi_sync_source=semisync_source.so --plugin-load-add=rpl_semi_sync_replica=semisync_replica.so" + - | + mv {{ .Values.dataMountPath }}/plugin/audit_log.so /usr/lib64/mysql/plugin/ + rm -rf {{ .Values.dataMountPath }}/plugin + chown -R mysql:root {{ .Values.dataMountPath }} + docker-entrypoint.sh mysqld --server-id $(( ${KB_POD_NAME##*-} + 1)) \ + --plugin-load-add=rpl_semi_sync_source=semisync_source.so \ + --plugin-load-add=rpl_semi_sync_replica=semisync_replica.so \ + --plugin-load-add=audit_log=audit_log.so systemAccountSpec: cmdExecutorConfig: image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:8.0.33 diff --git a/addons/mysql/values.yaml b/addons/mysql/values.yaml index 775c2a429..82d7a0746 100644 --- a/addons/mysql/values.yaml +++ b/addons/mysql/values.yaml @@ -8,7 +8,7 @@ image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. #tag: 8.0.33 - tag: 5.7.42 + tag: 5.7.44 xtraBackupRepository: perconalab/percona-xtrabackup ## MySQL Cluster parameters From d808afb3ed37ff032a64e80f1f39fdeef023e1d7 Mon Sep 17 00:00:00 2001 From: wangyelei Date: Mon, 11 Mar 2024 09:30:40 +0800 Subject: [PATCH 6/7] chore: support pg pitr (#361) --- .../dataprotection/common-scripts.sh | 119 ++++++++++++++ .../postgresql-fetch-wal-log.sh | 54 ++++++ .../dataprotection/postgresql-pitr-backup.sh | 154 ++++++++++++++++++ .../dataprotection/postgresql-pitr-restore.sh | 35 ++++ .../templates/actionset-postgresql-pitr.yaml | 67 ++++++++ .../templates/backuppolicytemplate.yaml | 34 ++++ 6 files changed, 463 insertions(+) create mode 100644 addons/postgresql/dataprotection/common-scripts.sh create mode 100644 addons/postgresql/dataprotection/postgresql-fetch-wal-log.sh create mode 100644 addons/postgresql/dataprotection/postgresql-pitr-backup.sh create mode 100644 addons/postgresql/dataprotection/postgresql-pitr-restore.sh create mode 100644 addons/postgresql/templates/actionset-postgresql-pitr.yaml diff --git a/addons/postgresql/dataprotection/common-scripts.sh b/addons/postgresql/dataprotection/common-scripts.sh new file mode 100644 index 000000000..9e1009e92 --- /dev/null +++ b/addons/postgresql/dataprotection/common-scripts.sh @@ -0,0 +1,119 @@ +# log info file +function DP_log() { + msg=$1 + local curr_date=$(date -u '+%Y-%m-%d %H:%M:%S') + echo "${curr_date} INFO: $msg" +} + +# log error info +function DP_error_log() { + msg=$1 + local curr_date=$(date -u '+%Y-%m-%d %H:%M:%S') + echo "${curr_date} ERROR: $msg" +} + +# Get file names without extensions based on the incoming file path +function DP_get_file_name_without_ext() { + local fileName=$1 + local file_without_ext=${fileName%.*} + echo $(basename ${file_without_ext}) +} + +# Save backup status info file for syncing progress. +# timeFormat: %Y-%m-%dT%H:%M:%SZ +function DP_save_backup_status_info() { + local totalSize=$1 + local startTime=$2 + local stopTime=$3 + local timeZone=$4 + local extras=$5 + local timeZoneStr="" + if [ ! -z ${timeZone} ]; then + timeZoneStr=",\"timeZone\":\"${timeZone}\"" + fi + if [ -z "${stopTime}" ];then + echo "{\"totalSize\":\"${totalSize}\"}" > ${DP_BACKUP_INFO_FILE} + elif [ -z "${startTime}" ];then + echo "{\"totalSize\":\"${totalSize}\",\"extras\":[${extras}],\"timeRange\":{\"end\":\"${stopTime}\"${timeZoneStr}}}" > ${DP_BACKUP_INFO_FILE} + else + echo "{\"totalSize\":\"${totalSize}\",\"extras\":[${extras}],\"timeRange\":{\"start\":\"${startTime}\",\"end\":\"${stopTime}\"${timeZoneStr}}}" > ${DP_BACKUP_INFO_FILE} + fi +} + + +# Clean up expired logfiles. +# Default interval is 60s +# Default rootPath is / +function DP_purge_expired_files() { + local currentUnix="${1:?missing current unix}" + local last_purge_time="${2:?missing last_purge_time}" + local root_path=${3:-"/"} + local interval_seconds=${4:-60} + local diff_time=$((${currentUnix}-${last_purge_time})) + if [[ -z ${DP_TTL_SECONDS} || ${diff_time} -lt ${interval_seconds} ]]; then + return + fi + expiredUnix=$((${currentUnix}-${DP_TTL_SECONDS})) + files=$(datasafed list -f --recursive --older-than ${expiredUnix} ${root_path} ) + for file in ${files[@]} + do + datasafed rm ${file} + echo ${file} + done +} + +# analyze the start time of the earliest file from the datasafed backend. +# Then record the file name into dp_oldest_file.info. +# If the oldest file is no changed, exit the process. +# This can save traffic consumption. +function DP_analyze_start_time_from_datasafed() { + local oldest_file="${1:?missing oldest file}" + local get_start_time_from_file="${2:?missing get_start_time_from_file function}" + local datasafed_pull="${3:?missing datasafed_pull function}" + local info_file="${KB_BACKUP_WORKDIR}/dp_oldest_file.info" + mkdir -p ${KB_BACKUP_WORKDIR} && cd ${KB_BACKUP_WORKDIR} + if [ -f ${info_file} ]; then + last_oldest_file=$(cat ${info_file}) + last_oldest_file_name=$(DP_get_file_name_without_ext ${last_oldest_file}) + if [ "$last_oldest_file" == "${oldest_file}" ]; then + # oldest file no changed. + ${get_start_time_from_file} $last_oldest_file_name + return + fi + # remove last oldest file + if [ -f ${last_oldest_file_name} ];then + rm -rf ${last_oldest_file_name} + fi + fi + # pull file + ${datasafed_pull} ${oldest_file} + # record last oldest file + echo ${oldest_file} > ${info_file} + oldest_file_name=$(DP_get_file_name_without_ext ${oldest_file}) + ${get_start_time_from_file} ${oldest_file_name} +} + +# get the timeZone offset for location, such as Asia/Shanghai +function getTimeZoneOffset() { + local timeZone=${1:?missing time zone} + if [[ $timeZone == "+"* ]] || [[ $timeZone == "-"* ]] ; then + echo ${timeZone} + return + fi + local currTime=$(TZ=UTC date) + local utcHour=$(TZ=UTC date -d "${currTime}" +"%H") + local zoneHour=$(TZ=${timeZone} date -d "${currTime}" +"%H") + local offset=$((${zoneHour}-${utcHour})) + if [ $offset -eq 0 ]; then + return + fi + symbol="+" + if [ $offset -lt 0 ]; then + symbol="-" && offset=${offset:1} + fi + if [ $offset -lt 10 ];then + offset="0${offset}" + fi + echo "${symbol}${offset}:00" +} + diff --git a/addons/postgresql/dataprotection/postgresql-fetch-wal-log.sh b/addons/postgresql/dataprotection/postgresql-fetch-wal-log.sh new file mode 100644 index 000000000..ab6c19a11 --- /dev/null +++ b/addons/postgresql/dataprotection/postgresql-fetch-wal-log.sh @@ -0,0 +1,54 @@ + +function get_wal_name() { + local fileName=$1 + local file_without_ext=${fileName%.*} + echo $(basename $file_without_ext) +} + +function fetch-wal-log(){ + wal_destination_dir=$1 + start_wal_name=$2 + restore_time=`date -d "$3" +%s` + pitr=$4 + DP_log "PITR: $pitr" + + exit_fetch_wal=0 && mkdir -p $wal_destination_dir + for dir_name in $(datasafed list /) ; do + if [[ $exit_fetch_wal -eq 1 ]]; then + exit 0 + fi + + # check if the latest_wal_log after the start_wal_log + latest_wal=$(datasafed list ${dir_name} | tail -n 1) + latest_wal_name=$(get_wal_name ${latest_wal}) + if [[ ${latest_wal_name} < $start_wal_name ]]; then + continue + fi + + DP_log "start to fetch wal logs from ${dir_name}" + for file in $(datasafed list ${dir_name} | grep ".zst"); do + wal_name=$(get_wal_name ${file}) + if [[ $wal_name < $start_wal_name ]]; then + continue + fi + if [[ $pitr != "true" && $file =~ ".history" ]]; then + # if not restored for pitr, only fetch the current timeline log + DP_log "exit for new timeline." + exit_fetch_wal=1 + break + fi + DP_log "copying $wal_name" + # pull and decompress + datasafed pull -d zstd $file ${wal_destination_dir}/$wal_name + + # check if the wal_log contains the restore_time logs. if ture, stop fetching + latest_commit_time=$(pg_waldump ${wal_destination_dir}/$wal_name --rmgr=Transaction 2>/dev/null |tail -n 1|awk -F ' COMMIT ' '{print $2}'|awk -F ';' '{print $1}') + timestamp=`date -d "$latest_commit_time" +%s` + if [[ $latest_commit_time != "" && $timestamp > $restore_time ]]; then + DP_log "exit when reaching the target time log." + exit_fetch_wal=1 + break + fi + done + done +} \ No newline at end of file diff --git a/addons/postgresql/dataprotection/postgresql-pitr-backup.sh b/addons/postgresql/dataprotection/postgresql-pitr-backup.sh new file mode 100644 index 000000000..525cedde2 --- /dev/null +++ b/addons/postgresql/dataprotection/postgresql-pitr-backup.sh @@ -0,0 +1,154 @@ +export PGPASSWORD=${DP_DB_PASSWORD} +# use datasafed and default config +export WALG_DATASAFED_CONFIG="" +export PATH="$PATH:$DP_DATASAFED_BIN_PATH" +export DATASAFED_BACKEND_BASE_PATH="$DP_BACKUP_BASE_PATH" +export KB_BACKUP_WORKDIR=${VOLUME_DATA_DIR}/kb-backup + +PSQL="psql -h ${DP_DB_HOST} -U ${DP_DB_USER} -d postgres" +global_last_switch_wal_time=$(date +%s) +global_last_purge_time=$(date +%s) +global_switch_wal_interval=300 +global_stop_time= +global_old_size=0 + +if [[ ${SWITCH_WAL_INTERVAL_SECONDS} =~ ^[0-9]+$ ]];then + global_switch_wal_interval=${SWITCH_WAL_INTERVAL_SECONDS} +fi + +global_backup_in_secondary= +if [ "${TARGET_POD_ROLE}" == "primary" ]; then + global_backup_in_secondary=f +elif [ "${TARGET_POD_ROLE}" == "secondary" ]; then + global_backup_in_secondary=t +fi + +# clean up expired logfiles, interval is 600s +function purge_expired_files() { + local currentUnix=$(date +%s) + info=$(DP_purge_expired_files ${currentUnix} ${global_last_purge_time} / 600) + if [ ! -z "${info}" ]; then + global_last_purge_time=${currentUnix} + DP_log "cleanup expired wal-log files: ${info}" + local TOTAL_SIZE=$(datasafed stat / | grep TotalSize | awk '{print $2}') + DP_save_backup_status_info "${TOTAL_SIZE}" + fi +} + +# switch wal log +function switch_wal_log() { + local curr_time=$(date +%s) + local diff_time=$((${curr_time}-${global_last_switch_wal_time})) + if [[ ${diff_time} -lt ${global_switch_wal_interval} ]]; then + return + fi + LAST_TRANS=$(pg_waldump $(${PSQL} -Atc "select pg_walfile_name(pg_current_wal_lsn())") --rmgr=Transaction 2>/dev/null |tail -n 1) + if [ "${LAST_TRANS}" != "" ] && [ "$(find ${LOG_DIR}/archive_status/ -name '*.ready')" = "" ]; then + DP_log "start to switch wal file" + ${PSQL} -c "select pg_switch_wal()" + for i in $(seq 1 60); do + if [ "$(find ${LOG_DIR}/archive_status/ -name '*.ready')" != "" ]; then + DP_log "switch wal file successfully" + break; + fi + sleep 1 + done + fi + global_last_switch_wal_time=${curr_time} +} + +# upload wal log +function upload_wal_log() { + local TODAY_INCR_LOG=$(date +%Y%m%d); + cd ${LOG_DIR} + for i in $(ls -tr ./archive_status/ | grep .ready); do + wal_name=${i%.*} + LOG_STOP_TIME=$(pg_waldump ${wal_name} --rmgr=Transaction 2>/dev/null | grep 'desc: COMMIT' |tail -n 1|awk -F ' COMMIT ' '{print $2}'|awk -F ';' '{print $1}') + if [[ ! -z $LOG_STOP_TIME ]];then + global_stop_time=$(date -d "${LOG_STOP_TIME}" -u '+%Y-%m-%dT%H:%M:%SZ') + fi + if [ -f ${wal_name} ]; then + DP_log "upload ${wal_name}" + datasafed push -z zstd ${wal_name} "/${TODAY_INCR_LOG}/${wal_name}.zst" + mv -f ./archive_status/${i} ./archive_status/${wal_name}.done; + fi + done +} + +# get start time of the wal log +function get_wal_log_start_time() { + local file="${1:?missing wal log name to analyze}" + local START_TIME=$(pg_waldump $file --rmgr=Transaction 2>/dev/null | grep 'desc: COMMIT' |head -n 1|awk -F ' COMMIT ' '{print $2}'|awk -F ';' '{print $1}') + if [[ ! -z ${START_TIME} ]];then + START_TIME=$(date -d "${START_TIME}" -u '+%Y-%m-%dT%H:%M:%SZ') + echo $START_TIME + fi +} + +# pull wal log and decompress to KB_BACKUP_WORKDIR dir +function pull_wal_log() { + file="${1:?missing file name to pull}" + # pull and decompress + fileName=$(basename ${file}) + datasafed pull -d zstd ${file} "$(DP_get_file_name_without_ext ${fileName})" +} + +# get the start time for backup.status.timeRange +function get_start_time_for_range() { + local OLDEST_FILE=$(datasafed list -f --recursive / -o json | jq -s -r '.[] | sort_by(.mtime) | .[] | .path' |head -n 1) + if [ ! -z ${OLDEST_FILE} ]; then + START_TIME=$(DP_analyze_start_time_from_datasafed ${OLDEST_FILE} get_wal_log_start_time pull_wal_log) + echo ${START_TIME} + fi +} + +# save backup status info to sync file +function save_backup_status() { + local TOTAL_SIZE=$(datasafed stat / | grep TotalSize | awk '{print $2}') + # if no size changes, return + if [[ -z ${TOTAL_SIZE} || ${TOTAL_SIZE} -eq 0 || ${TOTAL_SIZE} == ${global_old_size} ]];then + return + fi + global_old_size=${TOTAL_SIZE} + local START_TIME=$(get_start_time_for_range) + DP_save_backup_status_info "${TOTAL_SIZE}" "${START_TIME}" "${global_stop_time}" +} + +function check_pg_process() { + local is_ok=false + for ((i=1;i<4;i++));do + is_secondary=$(${PSQL} -Atc "select pg_is_in_recovery()") + if [[ $? -eq 0 && (-z ${global_backup_in_secondary} || "${global_backup_in_secondary}" == "${is_secondary}") ]]; then + is_ok=true + break + fi + DP_error_log "target backup pod/${DP_TARGET_POD_NAME} is not OK, target role: ${TARGET_POD_ROLE}, pg_is_in_recovery: ${is_secondary}, retry detection!" + sleep 1 + done + if [[ ${is_ok} == "false" ]];then + DP_error_log "target backup pod/${DP_TARGET_POD_NAME} is not OK, target role: ${TARGET_POD_ROLE}, pg_is_in_recovery: ${is_secondary}!" + exit 1 + fi +} + +# trap term signal +trap "echo 'Terminating...' && sync && exit 0" TERM +DP_log "start to archive wal logs" +while true; do + + # check if pg process is ok + check_pg_process + + # switch wal log + switch_wal_log + + # upload wal log + upload_wal_log + + # save backup status which will be updated to `backup` CR by the sidecar + save_backup_status + + # purge the expired wal logs + purge_expired_files + sleep ${LOG_ARCHIVE_SECONDS} +done \ No newline at end of file diff --git a/addons/postgresql/dataprotection/postgresql-pitr-restore.sh b/addons/postgresql/dataprotection/postgresql-pitr-restore.sh new file mode 100644 index 000000000..ba5ec38e7 --- /dev/null +++ b/addons/postgresql/dataprotection/postgresql-pitr-restore.sh @@ -0,0 +1,35 @@ +# use datasafed and default config +export WALG_DATASAFED_CONFIG="" +export PATH="$PATH:$DP_DATASAFED_BIN_PATH" +export DATASAFED_BACKEND_BASE_PATH="$DP_BACKUP_BASE_PATH" + +if [[ -d ${DATA_DIR}.old ]] && [[ ! -d ${DATA_DIR} ]]; then + # if dataDir.old exists but dataDir not exits, retry it + mv ${DATA_DIR}.old ${DATA_DIR} + exit 0; +fi + +mkdir -p ${PITR_DIR}; + +latest_wal=$(ls ${DATA_DIR}/pg_wal -lI "*.history" | grep ^- | awk '{print $9}' | sort | tail -n 1) +start_wal_log=`basename $latest_wal` + +DP_log "fetch-wal-log ${PITR_DIR} ${start_wal_log} \"${DP_RESTORE_TIME}\" true" +fetch-wal-log ${PITR_DIR} ${start_wal_log} "${DP_RESTORE_TIME}" true + +chmod 777 -R ${PITR_DIR}; +touch ${DATA_DIR}/recovery.signal; +mkdir -p ${CONF_DIR}; +chmod 777 -R ${CONF_DIR}; +mkdir -p ${RESTORE_SCRIPT_DIR}; +echo "#!/bin/bash" > ${RESTORE_SCRIPT_DIR}/kb_restore.sh; +echo "[[ -d '${DATA_DIR}.old' ]] && mv -f ${DATA_DIR}.old/* ${DATA_DIR}/;" >> ${RESTORE_SCRIPT_DIR}/kb_restore.sh; +echo "sync;" >> ${RESTORE_SCRIPT_DIR}/kb_restore.sh; +chmod +x ${RESTORE_SCRIPT_DIR}/kb_restore.sh; +echo "restore_command='case "%f" in *history) cp ${PITR_DIR}/%f %p ;; *) mv ${PITR_DIR}/%f %p ;; esac'" > ${CONF_DIR}/recovery.conf; +echo "recovery_target_time='${DP_RESTORE_TIME}'" >> ${CONF_DIR}/recovery.conf; +echo "recovery_target_action='promote'" >> ${CONF_DIR}/recovery.conf; +echo "recovery_target_timeline='latest'" >> ${CONF_DIR}/recovery.conf; +mv ${DATA_DIR} ${DATA_DIR}.old; +DP_log "done."; +sync; \ No newline at end of file diff --git a/addons/postgresql/templates/actionset-postgresql-pitr.yaml b/addons/postgresql/templates/actionset-postgresql-pitr.yaml new file mode 100644 index 000000000..40dcd0bc5 --- /dev/null +++ b/addons/postgresql/templates/actionset-postgresql-pitr.yaml @@ -0,0 +1,67 @@ +apiVersion: dataprotection.kubeblocks.io/v1alpha1 +kind: ActionSet +metadata: + labels: + clusterdefinition.kubeblocks.io/name: postgresql + name: postgresql-pitr +spec: + backupType: Continuous + env: + - name: VOLUME_DATA_DIR + value: /home/postgres/pgdata + - name: RESTORE_SCRIPT_DIR + value: "$(VOLUME_DATA_DIR)/kb_restore" + - name: PITR_DIR + value: "$(VOLUME_DATA_DIR)/pitr" + - name: DATA_DIR + value: "$(VOLUME_DATA_DIR)/pgroot/data" + - name: CONF_DIR + value: "$(VOLUME_DATA_DIR)/conf" + - name: DP_TIME_FORMAT + value: 2006-01-02 15:04:05 MST + - name: LOG_DIR + value: $(VOLUME_DATA_DIR)/pgroot/data/pg_wal + - name: TARGET_POD_ROLE + # TODO input by backup policy + value: primary + - name: LOG_ARCHIVE_SECONDS + value: "3" + - name: SWITCH_WAL_INTERVAL_SECONDS + value: "600" + - name: IMAGE_TAG + value: 14.8.0-pgvector-v0.5.0 + restore: + prepareData: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:$(IMAGE_TAG) + command: + - bash + - -c + - | + #!/bin/bash + set -e; + {{- .Files.Get "dataprotection/common-scripts.sh" | nindent 8 }} + {{- .Files.Get "dataprotection/postgresql-fetch-wal-log.sh" | nindent 8 }} + {{- .Files.Get "dataprotection/postgresql-pitr-restore.sh" | nindent 8 }} + postReady: + - exec: + command: + - sh + - -c + - | + rm -rf /home/postgres/pgdata/conf/recovery.conf; + rm -rf /home/postgres/pgdata/pitr; + backup: + backupData: + image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:$(IMAGE_TAG) + runOnTargetPodNode: true + syncProgress: + enabled: true + intervalSeconds: 5 + command: + - bash + - -c + - | + #!/bin/bash + set -e; + {{- .Files.Get "dataprotection/common-scripts.sh" | nindent 8 }} + {{- .Files.Get "dataprotection/postgresql-pitr-backup.sh" | nindent 8 }} \ No newline at end of file diff --git a/addons/postgresql/templates/backuppolicytemplate.yaml b/addons/postgresql/templates/backuppolicytemplate.yaml index 361c692b7..73a1846b8 100644 --- a/addons/postgresql/templates/backuppolicytemplate.yaml +++ b/addons/postgresql/templates/backuppolicytemplate.yaml @@ -5,6 +5,18 @@ metadata: labels: clusterdefinition.kubeblocks.io/name: postgresql {{- include "postgresql.labels" . | nindent 4 }} + annotations: + dataprotection.kubeblocks.io/reconfigure-ref: | + { + "name": "postgresql-configuration", + "key": "postgresql.conf", + "enable": { + "archive-wal": [{"key": "archive_command","value": "''"}] + }, + "disable": { + "archive-wal": [{ "key": "archive_command","value": "'/bin/true'"}] + } + } spec: clusterDefinitionRef: postgresql backupPolicies: @@ -51,6 +63,15 @@ spec: volumeMounts: - name: data mountPath: {{ .Values.dataMountPath }} + - name: archive-wal + target: + role: primary + actionSetName: postgresql-pitr + snapshotVolumes: false + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.dataMountPath }} schedules: &backupschedules - backupMethod: pg-basebackup enabled: false @@ -64,6 +85,10 @@ spec: enabled: false cronExpression: "0 18 * * *" retentionPeriod: 7d + - backupMethod: archive-wal + enabled: false + cronExpression: "*/5 * * * *" + retentionPeriod: 8d - componentDefs: [postgresql-14, postgresql-12] target: role: secondary @@ -108,4 +133,13 @@ spec: volumeMounts: - name: data mountPath: {{ .Values.dataMountPath }} + - name: archive-wal + target: + role: primary + actionSetName: postgresql-pitr + snapshotVolumes: false + targetVolumes: + volumeMounts: + - name: data + mountPath: {{ .Values.dataMountPath }} schedules: *backupschedules \ No newline at end of file From cdf8a2b3090f386903ab3f86fa4b43c36f7a34a0 Mon Sep 17 00:00:00 2001 From: Qinyu Cai Date: Mon, 11 Mar 2024 09:53:17 +0800 Subject: [PATCH 7/7] feat: pulsar support nodeport (#358) --- addons/kafka-cluster/Chart.yaml | 2 +- addons/kafka-cluster/templates/cluster.yaml | 6 +- addons/pulsar-cluster/Chart.yaml | 2 +- addons/pulsar-cluster/templates/_helpers.tpl | 21 +- .../templates/cluster-zookeeper-separate.yaml | 2 +- addons/pulsar-cluster/templates/cluster.yaml | 75 ++++- .../templates/tests/benchmark.yaml | 2 +- addons/pulsar-cluster/templates/validate.yaml | 10 +- addons/pulsar-cluster/values.yaml | 5 +- addons/pulsar/Chart.yaml | 2 +- addons/pulsar/config/bookies-env.tpl | 4 +- addons/pulsar/config/bookies-recovery-env.tpl | 4 +- addons/pulsar/config/broker-env.tpl | 4 +- addons/pulsar/config/proxy-env.tpl | 34 ++- addons/pulsar/scripts/init-proxy.sh | 18 +- addons/pulsar/scripts/start-broker.sh | 32 +++ addons/pulsar/templates/_helpers.tpl | 2 +- .../pulsar/templates/clusterdefinition.yaml | 16 +- .../templates/componentdef-bkrecovery.yaml | 143 ++++++++++ .../templates/componentdef-bookkeeper.yaml | 174 ++++++++++++ .../pulsar/templates/componentdef-broker.yaml | 261 ++++++++++++++++++ .../pulsar/templates/componentdef-proxy.yaml | 182 ++++++++++++ .../templates/componentdef-zookeeper.yaml | 138 +++++++++ 23 files changed, 1104 insertions(+), 35 deletions(-) create mode 100644 addons/pulsar/scripts/start-broker.sh create mode 100644 addons/pulsar/templates/componentdef-bkrecovery.yaml create mode 100644 addons/pulsar/templates/componentdef-bookkeeper.yaml create mode 100644 addons/pulsar/templates/componentdef-broker.yaml create mode 100644 addons/pulsar/templates/componentdef-proxy.yaml create mode 100644 addons/pulsar/templates/componentdef-zookeeper.yaml diff --git a/addons/kafka-cluster/Chart.yaml b/addons/kafka-cluster/Chart.yaml index 550d9bf7b..56e5cc193 100644 --- a/addons/kafka-cluster/Chart.yaml +++ b/addons/kafka-cluster/Chart.yaml @@ -8,7 +8,7 @@ dependencies: alias: extra type: application -version: 0.8.0 +version: 0.8.1 appVersion: 3.3.2 home: https://github.com/apecloud/kubeblocks/tree/main/deploy/kafka-cluster diff --git a/addons/kafka-cluster/templates/cluster.yaml b/addons/kafka-cluster/templates/cluster.yaml index ec2f769c9..731babac9 100644 --- a/addons/kafka-cluster/templates/cluster.yaml +++ b/addons/kafka-cluster/templates/cluster.yaml @@ -5,7 +5,7 @@ metadata: labels: {{ include "kblib.clusterLabels" . | nindent 4 }} annotations: "kubeblocks.io/extra-env": '{"KB_KAFKA_ENABLE_SASL":"{{ $.Values.saslEnable }}","KB_KAFKA_BROKER_HEAP":"{{ $.Values.brokerHeap }}","KB_KAFKA_CONTROLLER_HEAP":"{{ $.Values.controllerHeap }}","KB_KAFKA_PUBLIC_ACCESS":"{{ $.Values.extra.publiclyAccessible }}", "KB_KAFKA_BROKER_NODEPORT": "{{ $.Values.nodePortEnabled }}"}' - {{ include "kafka-cluster.brokerAddrFeatureGate" . | nindent 4 }} + {{- include "kafka-cluster.brokerAddrFeatureGate" . | nindent 4 }} spec: clusterDefinitionRef: kafka # ref clusterdefinition.name clusterVersionRef: {{ .Values.version }} @@ -16,7 +16,11 @@ spec: serviceName: bootstrap componentSelector: broker spec: + {{- if .Values.nodePortEnabled }} + type: NodePort + {{- else }} type: ClusterIP + {{- end }} ports: - name: kafka-client targetPort: 9092 diff --git a/addons/pulsar-cluster/Chart.yaml b/addons/pulsar-cluster/Chart.yaml index 93bb6695d..9a15c9ad7 100644 --- a/addons/pulsar-cluster/Chart.yaml +++ b/addons/pulsar-cluster/Chart.yaml @@ -4,7 +4,7 @@ description: A Pulsar Cluster Helm chart for KubeBlocks. type: application -version: 0.8.0 +version: 0.8.2 # appVersion specifies the version of the Pulsar database to be created, # and this value should be consistent with an existing clusterVersion. diff --git a/addons/pulsar-cluster/templates/_helpers.tpl b/addons/pulsar-cluster/templates/_helpers.tpl index 1652581b5..d41633d79 100644 --- a/addons/pulsar-cluster/templates/_helpers.tpl +++ b/addons/pulsar-cluster/templates/_helpers.tpl @@ -66,14 +66,18 @@ Create the name of the service account to use Pulsar broker FQDN */}} {{- define "pulsar-cluster.brokerFQDN" -}} -{{ include "kblib.clusterName" . }}-broker.{{ .Release.Namespace }}.svc{{ .Values.clusterDomain }} +{{- if eq .Values.version "pulsar-3.0.2" }} +{{- include "kblib.clusterName" . }}-broker-bootstrap.{{ .Release.Namespace }}.svc{{ .Values.clusterDomain }} +{{- else }} +{{- include "kblib.clusterName" . }}-broker.{{ .Release.Namespace }}.svc{{ .Values.clusterDomain }} +{{- end }} {{- end }} {{/* Pulsar ZooKeeper service ref */}} {{- define "pulsar-zookeeper-ref"}} -{{- if .Values.serviceReference.enable }} +{{- if .Values.serviceReference.enabled }} serviceRefs: - name: pulsarZookeeper namespace: {{ .Values.serviceReference.zookeeper.namespace | default .Release.Namespace }} @@ -88,4 +92,15 @@ serviceRefs: {{- end }} {{- end}} {{- end}} -}} \ No newline at end of file +}} + +{{/* +Define Pulsar cluster annotation keys for nodeport feature gate. +*/}} +{{- define "pulsar-cluster.brokerAddrFeatureGate" -}} +kubeblocks.io/enabled-pod-ordinal-svc: broker +{{- if .Values.nodePortEnabled }} +kubeblocks.io/enabled-node-port-svc: broker +kubeblocks.io/disabled-cluster-ip-svc: broker +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/pulsar-cluster/templates/cluster-zookeeper-separate.yaml b/addons/pulsar-cluster/templates/cluster-zookeeper-separate.yaml index 5711fcec0..200db3ee7 100644 --- a/addons/pulsar-cluster/templates/cluster-zookeeper-separate.yaml +++ b/addons/pulsar-cluster/templates/cluster-zookeeper-separate.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceReference.enable }} +{{- if .Values.serviceReference.enabled }} {{- if and (not .Values.serviceReference.zookeeper.serviceDescriptor) (not .Values.serviceReference.zookeeper.cluster) }} apiVersion: apps.kubeblocks.io/v1alpha1 kind: Cluster diff --git a/addons/pulsar-cluster/templates/cluster.yaml b/addons/pulsar-cluster/templates/cluster.yaml index f844657c4..77eaaf3af 100644 --- a/addons/pulsar-cluster/templates/cluster.yaml +++ b/addons/pulsar-cluster/templates/cluster.yaml @@ -5,7 +5,13 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{ include "kblib.clusterLabels" . | nindent 4 }} {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- if eq .Values.version "pulsar-3.0.2" }} + ## Todo: use cluster api to control the rendering logic of service in component definition + {{- include "pulsar-cluster.brokerAddrFeatureGate" . | nindent 4 }} + "kubeblocks.io/extra-env": '{"KB_PULSAR_BROKER_NODEPORT": "{{ $.Values.nodePortEnabled }}"}' + {{- end }} {{- end }} spec: clusterDefinitionRef: pulsar @@ -18,10 +24,65 @@ spec: {{- with $.Values.tolerations }} tolerations: {{ . | toYaml | nindent 4 }} {{- end }} + services: + {{- if .Values.proxy.enabled }} + - name: proxy + serviceName: proxy + componentSelector: proxy + spec: + {{- if .Values.nodePortEnabled }} + type: NodePort + {{- else }} + type: ClusterIP + {{- end }} + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + {{- end }} + {{- if eq .Values.version "pulsar-3.0.2" }} + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker + spec: + {{- if .Values.nodePortEnabled }} + type: NodePort + {{- else }} + type: ClusterIP + {{- end }} + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + - name: kafka-client + port: 9092 + targetPort: 9092 + {{- if not .Values.serviceReference.enabled }} + - name: zookeeper + serviceName: zookeeper + componentSelector: zookeeper + spec: + type: ClusterIP + ports: + - name: client + port: 2181 + targetPort: 2181 + {{- end }} + {{- end }} componentSpecs: {{- if .Values.proxy.enabled }} - name: proxy componentDefRef: pulsar-proxy + {{- if eq .Values.version "pulsar-3.0.2" }} + componentDef: pulsar-proxy + {{- end }} + {{ include "pulsar-zookeeper-ref" . | nindent 6 }} monitor: {{ .Values.monitor.enabled | default false }} replicas: {{ .Values.proxy.replicaCount | default 3 }} {{- with .Values.proxy.resources }} @@ -40,6 +101,9 @@ spec: {{- end }} - name: broker componentDefRef: pulsar-broker + {{- if eq .Values.version "pulsar-3.0.2" }} + componentDef: pulsar-broker + {{- end }} {{ include "pulsar-zookeeper-ref" . | nindent 6 }} monitor: {{ .Values.monitor.enabled | default false }} replicas: {{ .Values.broker.replicaCount | default 3 }} @@ -59,6 +123,9 @@ spec: {{- if .Values.bookiesRecovery.enabled }} - name: bookies-recovery componentDefRef: bookies-recovery + {{- if eq .Values.version "pulsar-3.0.2" }} + componentDef: pulsar-bkrecovery + {{- end }} {{ include "pulsar-zookeeper-ref" . | nindent 6 }} monitor: {{ .Values.monitor.enabled | default false }} replicas: {{ .Values.bookiesRecovery.replicaCount | default 3 }} @@ -78,6 +145,9 @@ spec: {{- end }} - name: bookies componentDefRef: bookies + {{- if eq .Values.version "pulsar-3.0.2" }} + componentDef: pulsar-bookkeeper + {{- end }} {{ include "pulsar-zookeeper-ref" . | nindent 6 }} monitor: {{ .Values.monitor.enabled | default false }} {{- $bookiesReplicas := .Values.bookies.replicaCount }} @@ -124,6 +194,9 @@ spec: {{- if not .Values.serviceReference.enabled }} - name: zookeeper componentDefRef: zookeeper + {{- if eq .Values.version "pulsar-3.0.2" }} + componentDef: pulsar-zookeeper + {{- end }} monitor: {{ .Values.monitor.enabled | default false }} replicas: {{ .Values.zookeeper.replicaCount | default 3 }} {{- with .Values.zookeeper.resources }} diff --git a/addons/pulsar-cluster/templates/tests/benchmark.yaml b/addons/pulsar-cluster/templates/tests/benchmark.yaml index 751f53c37..6936dacdd 100644 --- a/addons/pulsar-cluster/templates/tests/benchmark.yaml +++ b/addons/pulsar-cluster/templates/tests/benchmark.yaml @@ -93,7 +93,7 @@ spec: - workloads/1-topic-16-partitions-1kb.yaml env: - name: BOOTSTRAP_SERVERS - value: "{{ include "pulsar-cluster.brokerFQDN" . }}:9092" + value: "{{ include "pulsar-cluster.brokerFQDN" . }}:9092" volumeMounts: - name: out mountPath: /out diff --git a/addons/pulsar-cluster/templates/validate.yaml b/addons/pulsar-cluster/templates/validate.yaml index e8d81acfd..808e99f15 100644 --- a/addons/pulsar-cluster/templates/validate.yaml +++ b/addons/pulsar-cluster/templates/validate.yaml @@ -8,11 +8,11 @@ {{ fail "Pulsar bookies replicas cannot be less than 3." }} {{- end }} {{- end }} -{{- if .Values.bookiesRecovery.replicaCount }} - {{- if lt (int .Values.bookiesRecovery.replicaCount) 3 }} - {{ fail "Pulsar bookiesRecovery replicas cannot be less than 3." }} - {{- end }} -{{- end }} +{{/*{{- if .Values.bookiesRecovery.replicaCount }}*/}} +{{/* {{- if lt (int .Values.bookiesRecovery.replicaCount) 3 }}*/}} +{{/* {{ fail "Pulsar bookiesRecovery replicas cannot be less than 3." }}*/}} +{{/* {{- end }}*/}} +{{/*{{- end }}*/}} {{- if .Values.bookies.mode }} {{- if and (ne .Values.bookies.mode "generic") (ne .Values.bookies.mode "selfVerifying") }} {{ fail "pulsar bookies mode only supported [generic,selfVerifying]" }} diff --git a/addons/pulsar-cluster/values.yaml b/addons/pulsar-cluster/values.yaml index 802a7cb9a..989804cf0 100644 --- a/addons/pulsar-cluster/values.yaml +++ b/addons/pulsar-cluster/values.yaml @@ -76,7 +76,7 @@ bookiesRecovery: ## @param bookies.replicaCount Number of Bookies replicas ## at least 3 nodes if autoRecoveryDaemonEnabled is false in bookies.conf enabled: false - replicaCount: 3 + replicaCount: 1 resources: # We usually recommend not to specify default resources and to leave this as a conscious @@ -187,3 +187,6 @@ serviceReference: ## if zookeeper service is provided by external source, please create the ServiceDescriptor Object of zookeeper in target namespace firstly, then specify the serviceDescriptor name here. ## Please do not specify both cluster and serviceDescriptor at the same time. serviceDescriptor: "" + +## @param nodePortEnabled +nodePortEnabled: false \ No newline at end of file diff --git a/addons/pulsar/Chart.yaml b/addons/pulsar/Chart.yaml index f9385e112..911fd94e4 100644 --- a/addons/pulsar/Chart.yaml +++ b/addons/pulsar/Chart.yaml @@ -4,7 +4,7 @@ description: Apache Pulsar is an open-source, distributed messaging and streamin type: application -version: 0.8.0 +version: 0.8.2 # appVersion specifies the version of the Pulsar database to be created, # and this value should be consistent with an existing clusterVersion. diff --git a/addons/pulsar/config/bookies-env.tpl b/addons/pulsar/config/bookies-env.tpl index 5f7c663de..a5c3911b1 100644 --- a/addons/pulsar/config/bookies-env.tpl +++ b/addons/pulsar/config/bookies-env.tpl @@ -33,9 +33,9 @@ PULSAR_MEM: -XX:MinRAMPercentage=25 -XX:MaxRAMPercentage=50 {{ $MaxDirectMemoryS {{- if and (index $pulsar_zk_from_service_ref.spec "endpoint") (index $pulsar_zk_from_service_ref.spec "port") }} {{- $zk_server = printf "%s:%s" $pulsar_zk_from_service_ref.spec.endpoint.value $pulsar_zk_from_service_ref.spec.port.value }} {{- else }} - {{- $zk_server = printf "%s-%s.%s.svc:2181" $clusterName $pulsar_zk_from_component.name $namespace }} + {{- $zk_server = printf "%s-zookeeper.%s.svc:2181" $clusterName $namespace }} {{- end }} {{- else }} - {{- $zk_server = printf "%s-%s.%s.svc:2181" $clusterName $pulsar_zk_from_component.name $namespace }} + {{- $zk_server = printf "%s-zookeeper.%s.svc:2181" $clusterName $namespace }} {{- end }} zkServers: {{ $zk_server }} diff --git a/addons/pulsar/config/bookies-recovery-env.tpl b/addons/pulsar/config/bookies-recovery-env.tpl index 1052971bf..089a8a414 100644 --- a/addons/pulsar/config/bookies-recovery-env.tpl +++ b/addons/pulsar/config/bookies-recovery-env.tpl @@ -38,9 +38,9 @@ PULSAR_MEM: -XX:MinRAMPercentage=25 -XX:MaxRAMPercentage=50 {{ $MaxDirectMemoryS {{- if and (index $pulsar_zk_from_service_ref.spec "endpoint") (index $pulsar_zk_from_service_ref.spec "port") }} {{- $zk_server = printf "%s:%s" $pulsar_zk_from_service_ref.spec.endpoint.value $pulsar_zk_from_service_ref.spec.port.value }} {{- else }} - {{- $zk_server = printf "%s-%s.%s.svc:2181" $clusterName $pulsar_zk_from_component.name $namespace }} + {{- $zk_server = printf "%s-zookeeper.%s.svc:2181" $clusterName $namespace }} {{- end }} {{- else }} - {{- $zk_server = printf "%s-%s.%s.svc:2181" $clusterName $pulsar_zk_from_component.name $namespace }} + {{- $zk_server = printf "%s-zookeeper.%s.svc:2181" $clusterName $namespace }} {{- end }} zkServers: {{ $zk_server }} \ No newline at end of file diff --git a/addons/pulsar/config/broker-env.tpl b/addons/pulsar/config/broker-env.tpl index 648cf04c4..d71021670 100644 --- a/addons/pulsar/config/broker-env.tpl +++ b/addons/pulsar/config/broker-env.tpl @@ -32,10 +32,10 @@ PULSAR_MEM: -XX:MinRAMPercentage=30 -XX:MaxRAMPercentage=30 {{ $MaxDirectMemoryS {{- if and (index $pulsar_zk_from_service_ref.spec "endpoint") (index $pulsar_zk_from_service_ref.spec "port") }} {{- $zk_server = printf "%s:%s" $pulsar_zk_from_service_ref.spec.endpoint.value $pulsar_zk_from_service_ref.spec.port.value }} {{- else }} - {{- $zk_server = printf "%s-%s.%s.svc:2181" $clusterName $pulsar_zk_from_component.name $namespace }} + {{- $zk_server = printf "%s-zookeeper.%s.svc:2181" $clusterName $namespace }} {{- end }} {{- else }} - {{- $zk_server = printf "%s-%s.%s.svc:2181" $clusterName $pulsar_zk_from_component.name $namespace }} + {{- $zk_server = printf "%s-zookeeper.%s.svc:2181" $clusterName $namespace }} {{- end }} zookeeperServers: {{ $zk_server }} configurationStoreServers: {{ $zk_server }} \ No newline at end of file diff --git a/addons/pulsar/config/proxy-env.tpl b/addons/pulsar/config/proxy-env.tpl index 21c858403..8b6a482bc 100644 --- a/addons/pulsar/config/proxy-env.tpl +++ b/addons/pulsar/config/proxy-env.tpl @@ -5,4 +5,36 @@ PULSAR_GC: -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX: {{- if gt $phy_memory 0 }} {{- $MaxDirectMemorySize = printf "-XX:MaxDirectMemorySize=%dm" (mul (div $phy_memory ( mul 1024 1024 10)) 6) }} {{- end }} -PULSAR_MEM: -XX:MinRAMPercentage=30 -XX:MaxRAMPercentage=30 {{ $MaxDirectMemorySize }} \ No newline at end of file +PULSAR_MEM: -XX:MinRAMPercentage=30 -XX:MaxRAMPercentage=30 {{ $MaxDirectMemorySize }} + +{{- $clusterName := $.cluster.metadata.name }} +{{- $namespace := $.cluster.metadata.namespace }} +{{- $pulsar_zk_from_service_ref := fromJson "{}" }} +{{- $pulsar_zk_from_component := fromJson "{}" }} + +{{- if index $.component "serviceReferences" }} + {{- range $i, $e := $.component.serviceReferences }} + {{- if eq $i "pulsarZookeeper" }} + {{- $pulsar_zk_from_service_ref = $e }} + {{- break }} + {{- end }} + {{- end }} +{{- end }} +{{- range $i, $e := $.cluster.spec.componentSpecs }} + {{- if eq $e.componentDefRef "zookeeper" }} + {{- $pulsar_zk_from_component = $e }} + {{- end }} +{{- end }} + +# Try to get zookeeper from service reference first, if zookeeper service reference is empty, get default zookeeper componentDef in ClusterDefinition +{{- $zk_server := "" }} +{{- if $pulsar_zk_from_service_ref }} + {{- if and (index $pulsar_zk_from_service_ref.spec "endpoint") (index $pulsar_zk_from_service_ref.spec "port") }} + {{- $zk_server = printf "%s:%s" $pulsar_zk_from_service_ref.spec.endpoint.value $pulsar_zk_from_service_ref.spec.port.value }} + {{- else }} + {{- $zk_server = printf "%s-zookeeper.%s.svc:2181" $clusterName $namespace }} + {{- end }} +{{- else }} + {{- $zk_server = printf "%s-zookeeper.%s.svc:2181" $clusterName $namespace }} +{{- end }} +metadataStoreUrl: {{ $zk_server }} \ No newline at end of file diff --git a/addons/pulsar/scripts/init-proxy.sh b/addons/pulsar/scripts/init-proxy.sh index 3e285ecf0..e3235881a 100755 --- a/addons/pulsar/scripts/init-proxy.sh +++ b/addons/pulsar/scripts/init-proxy.sh @@ -1,4 +1,16 @@ #!/bin/sh -while [ "$(curl -s -o /dev/null -w '%{http_code}' http://${brokerSVC}:80/status.html)" -ne "200" ]; do - echo "pulsar cluster isn't initialized yet..."; sleep 1; -done \ No newline at end of file +if [ -n "${metadataStoreUrl}" ]; then + echo "waiting for zookeeper to be ready..." + zkDomain="${metadataStoreUrl%%:*}" + until echo ruok | nc -q 1 ${zkDomain} 2181 | grep imok; do + sleep 1; + done; + echo "zk is ready..." +fi + +if [ -n "${brokerSVC}" ]; then + echo "waiting for broker to be ready..." + while [ "$(curl -s -o /dev/null -w '%{http_code}' http://${brokerSVC}:80/status.html)" -ne "200" ]; do + echo "pulsar cluster isn't initialized yet..."; sleep 1; + done +fi \ No newline at end of file diff --git a/addons/pulsar/scripts/start-broker.sh b/addons/pulsar/scripts/start-broker.sh new file mode 100644 index 000000000..94d8930fe --- /dev/null +++ b/addons/pulsar/scripts/start-broker.sh @@ -0,0 +1,32 @@ +#!/bin/bash +set -x + +extract_ordinal_from_pod_name() { + local pod_name="$1" + local ordinal="${pod_name##*-}" + echo "$ordinal" +} + +make_nodeport_domain() { + local nodeport_env_name="$1" + eval port="\$${nodeport_env_name}" + echo "${KB_HOST_IP}:${port}" +} + +if [[ "true" == "$KB_PULSAR_BROKER_NODEPORT" ]]; then + echo "init NodePort config:" + pod_ordinal=$(extract_ordinal_from_pod_name "$KB_POD_NAME") + nodeport_pulsar_domain=$(make_nodeport_domain "NODE_PORT_PULSAR_${pod_ordinal}") + nodeport_kafka_domain=$(make_nodeport_domain "NODE_PORT_KAFKA_${pod_ordinal}") + export PULSAR_PREFIX_advertisedListeners="cluster:pulsar://${nodeport_pulsar_domain}" + echo "[cfg]set PULSAR_PREFIX_advertisedListeners=${PULSAR_PREFIX_advertisedListeners}" + export PULSAR_PREFIX_kafkaAdvertisedListeners="CLIENT://${nodeport_kafka_domain}" + echo "[cfg]set PULSAR_PREFIX_kafkaAdvertisedListeners=${PULSAR_PREFIX_kafkaAdvertisedListeners}" +fi + +/kb-scripts/merge_pulsar_config.py conf/client.conf /opt/pulsar/conf/client.conf && \ +/kb-scripts/merge_pulsar_config.py conf/broker.conf /opt/pulsar/conf/broker.conf && \ +bin/apply-config-from-env.py conf/broker.conf && \ +bin/apply-config-from-env.py conf/client.conf && \ + +echo 'OK' > status;exec bin/pulsar broker \ No newline at end of file diff --git a/addons/pulsar/templates/_helpers.tpl b/addons/pulsar/templates/_helpers.tpl index f72678539..b17c984b9 100644 --- a/addons/pulsar/templates/_helpers.tpl +++ b/addons/pulsar/templates/_helpers.tpl @@ -72,4 +72,4 @@ Generate scripts configmap {{ $path | base }}: |- {{- $.Files.Get $path | nindent 2 }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/addons/pulsar/templates/clusterdefinition.yaml b/addons/pulsar/templates/clusterdefinition.yaml index 6ab5d0cbd..147fec768 100644 --- a/addons/pulsar/templates/clusterdefinition.yaml +++ b/addons/pulsar/templates/clusterdefinition.yaml @@ -270,14 +270,14 @@ spec: namespace: {{ .Release.Namespace }} volumeName: scripts defaultMode: 0555 - service: - ports: - - name: pulsar - port: 6650 - targetPort: pulsar - - name: http - port: 80 - targetPort: http +{{/* service:*/}} +{{/* ports:*/}} +{{/* - name: pulsar*/}} +{{/* port: 6650*/}} +{{/* targetPort: pulsar*/}} +{{/* - name: http*/}} +{{/* port: 80*/}} +{{/* targetPort: http*/}} podSpec: securityContext: runAsNonRoot: true diff --git a/addons/pulsar/templates/componentdef-bkrecovery.yaml b/addons/pulsar/templates/componentdef-bkrecovery.yaml new file mode 100644 index 000000000..51b456fea --- /dev/null +++ b/addons/pulsar/templates/componentdef-bkrecovery.yaml @@ -0,0 +1,143 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ComponentDefinition +metadata: + name: pulsar-bkrecovery + labels: + {{- include "pulsar.labels" . | nindent 4 }} +spec: + provider: kubeblocks + description: Pulsar bookies auto-recovery. + serviceKind: pulsar + serviceVersion: {{ default .Chart.AppVersion .Values.clusterVersionOverride }} + serviceRefDeclarations: + - name: pulsarZookeeper + serviceRefDeclarationSpecs: + - serviceKind: zookeeper + serviceVersion: ^3.8.\d{1,2}$ + updateStrategy: BestEffortParallel + monitor: + builtIn: false + exporterConfig: + scrapePath: /metrics + scrapePort: {{ .Values.metrics.service.port }} + configs: + - name: agamotto-configuration + templateRef: {{ include "pulsar.name" . }}-agamotto-conf-tpl + namespace: {{ .Release.Namespace }} + volumeName: agamotto-configuration + - name: bookies-recovery-env + templateRef: {{ include "pulsar.name" . }}-recovery-env-tpl + namespace: {{ .Release.Namespace }} + constraintRef: pulsar-env-constraints + keys: + - conf + asEnvFrom: + - bookies-recovery + - check-bookies + volumeName: recovery-config-env + scripts: + - name: pulsar-scripts + templateRef: {{ include "pulsar.name" . }}-scripts + namespace: {{ .Release.Namespace }} + volumeName: scripts + defaultMode: 0555 + runtime: + securityContext: + runAsNonRoot: true + runAsUser: 10000 + fsGroup: 0 + runAsGroup: 0 + initContainers: + - name: check-bookies + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.bookie "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.bookie "root" .) }}" + command: + - /kb-scripts/check-bookies.sh + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + volumeMounts: + - name: scripts + mountPath: /kb-scripts + securityContext: + privileged: true + runAsNonRoot: false + runAsUser: 0 + containers: + - name: bookies-recovery + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10000 + capabilities: + drop: + - ALL + privileged: false + runAsGroup: 0 + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.bookie "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.bookie "root" .) }}" + command: + - /kb-scripts/start-bookies-recovery.sh + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + - name: SERVICE_PORT + value: "8000" + - name: httpServerEnabled + value: "true" + - name: httpServerPort + value: "8000" + - name: prometheusStatsHttpPort + value: "8000" + - name: useHostNameAsBookieID + value: "true" + ports: + - name: http + containerPort: 8000 + - name: bookie + containerPort: 3181 + volumeMounts: + - name: scripts + mountPath: /kb-scripts + - name: recovery-config-env + mountPath: /opt/pulsar/none + - name: metrics + image: {{ .Values.images.metrics.registry | default .Values.image.registry }}/{{ .Values.images.metrics.repository }}:{{ .Values.images.metrics.tag }} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.metrics.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + env: + - name: JOB_NAME + value: recovery + - name: SERVICE_PORT + value: "8000" + - name: POD_NAME + value: $(KB_POD_NAME) + - name: POD_NAMESPACE + value: $(KB_NAMESPACE) + {{- if .Values.debugEnabled }} + - name: LOG_LEVEL + value: debug + {{- else }} + - name: LOG_LEVEL + value: info + {{- end }} + command: + - "/bin/agamotto" + - "--config=/opt/agamotto/agamotto-config.yaml" + - "--feature-gates=-pkg.translator.prometheus.NormalizeName" + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.service.port }} + volumeMounts: + - name: agamotto-configuration + mountPath: /opt/agamotto diff --git a/addons/pulsar/templates/componentdef-bookkeeper.yaml b/addons/pulsar/templates/componentdef-bookkeeper.yaml new file mode 100644 index 000000000..104dde064 --- /dev/null +++ b/addons/pulsar/templates/componentdef-bookkeeper.yaml @@ -0,0 +1,174 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ComponentDefinition +metadata: + name: pulsar-bookkeeper + labels: + {{- include "pulsar.labels" . | nindent 4 }} +spec: + provider: kubeblocks + description: Pulsar bookkeeper. + serviceKind: pulsar + serviceVersion: {{ default .Chart.AppVersion .Values.clusterVersionOverride }} + serviceRefDeclarations: + - name: pulsarZookeeper + serviceRefDeclarationSpecs: + - serviceKind: zookeeper + serviceVersion: ^3.8.\d{1,2}$ + updateStrategy: BestEffortParallel + monitor: + builtIn: false + exporterConfig: + scrapePath: /metrics + scrapePort: {{ .Values.metrics.service.port }} + configs: + - name: agamotto-configuration + templateRef: {{ include "pulsar.name" . }}-agamotto-conf-tpl + namespace: {{ .Release.Namespace }} + volumeName: agamotto-configuration + - name: bookies-env + templateRef: {{ include "pulsar.name" . }}-bookies-env-tpl + namespace: {{ .Release.Namespace }} + constraintRef: pulsar-env-constraints + volumeName: bookies-env + keys: + - conf + asEnvFrom: + - init-bookies + - bookies + - name: bookies-config + templateRef: {{ include "pulsar.name" . }}3-bookies-config-tpl + namespace: {{ .Release.Namespace }} + volumeName: pulsar-bookies-config + constraintRef: pulsar3-bookies-cc + scripts: + - name: pulsar-scripts + templateRef: {{ include "pulsar.name" . }}-scripts + namespace: {{ .Release.Namespace }} + volumeName: scripts + defaultMode: 0555 + runtime: + initContainers: + - name: init-bookies + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.bookie "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.bookie "root" .) }}" + command: + - /kb-scripts/init-bookies.sh + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - name: scripts + mountPath: /kb-scripts + containers: + - name: bookies + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.bookie "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.bookie "root" .) }}" + command: + - /kb-scripts/start-bookies.sh + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + - name: SERVICE_PORT + value: "8000" + - name: cluster_domain + value: {{ .Values.clusterDomain }} + lifecycle: + preStop: + exec: + command: [ "/kb-scripts/prestop-bookies.sh" ] + livenessProbe: + failureThreshold: 60 + httpGet: + path: /api/v1/bookie/state + port: http + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + ports: + - name: http + containerPort: 8000 + - name: bookie + containerPort: 3181 + readinessProbe: + failureThreshold: 60 + httpGet: + path: /api/v1/bookie/is_ready + port: http + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: + requests: + cpu: 200m + memory: 512Mi + securityContext: + runAsUser: 0 + runAsGroup: 10000 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /pulsar/data/bookkeeper/journal + name: journal + - mountPath: /pulsar/data/bookkeeper/ledgers + name: ledgers + - name: scripts + mountPath: /kb-scripts + - name: annotations + mountPath: /etc/annotations + - name: bookies-env + mountPath: /opt/pulsar/none + - name: pulsar-bookies-config + mountPath: /opt/pulsar/conf + - name: metrics + image: {{ .Values.images.metrics.registry | default .Values.image.registry }}/{{ .Values.images.metrics.repository }}:{{ .Values.images.metrics.tag }} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.metrics.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + env: + - name: JOB_NAME + value: bookie + - name: SERVICE_PORT + value: "8000" + - name: POD_NAME + value: $(KB_POD_NAME) + - name: POD_NAMESPACE + value: $(KB_NAMESPACE) + {{- if .Values.debugEnabled }} + - name: LOG_LEVEL + value: debug + {{- else }} + - name: LOG_LEVEL + value: info + {{- end }} + command: + - "/bin/agamotto" + - "--config=/opt/agamotto/agamotto-config.yaml" + - "--feature-gates=-pkg.translator.prometheus.NormalizeName" + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.service.port }} + volumeMounts: + - name: agamotto-configuration + mountPath: /opt/agamotto + volumes: + - name: annotations + downwardAPI: + items: + - path: "component-replicas" + fieldRef: + fieldPath: metadata.annotations['apps.kubeblocks.io/component-replicas'] \ No newline at end of file diff --git a/addons/pulsar/templates/componentdef-broker.yaml b/addons/pulsar/templates/componentdef-broker.yaml new file mode 100644 index 000000000..81ceeca22 --- /dev/null +++ b/addons/pulsar/templates/componentdef-broker.yaml @@ -0,0 +1,261 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ComponentDefinition +metadata: + name: pulsar-broker + labels: + {{- include "pulsar.labels" . | nindent 4 }} +spec: + provider: kubeblocks + description: Pulsar broker. + serviceKind: pulsar + serviceVersion: {{ default .Chart.AppVersion .Values.clusterVersionOverride }} + monitor: + builtIn: false + exporterConfig: + scrapePath: /metrics + scrapePort: {{ .Values.metrics.service.port }} + vars: + - name: NODE_PORT_PULSAR + valueFrom: + serviceVarRef: + compDef: pulsar-broker + name: nodeport + generatePodOrdinalServiceVar: true + optional: true + nodePort: + name: pulsar + option: Optional + - name: NODE_PORT_KAFKA + valueFrom: + serviceVarRef: + compDef: pulsar-broker + name: nodeport + generatePodOrdinalServiceVar: true + optional: true + nodePort: + name: kafka-client + option: Optional + services: + - name: broker-clusterip + serviceName: clusterip + generatePodOrdinalService: true + spec: + type: ClusterIP + ports: + - name: pulsar + port: 6650 + targetPort: pulsar + - name: kafka-client + port: 9092 + targetPort: kafka-client + - name: broker-nodeport + serviceName: nodeport + generatePodOrdinalService: true + spec: + type: NodePort + ports: + - name: pulsar + port: 6650 + targetPort: pulsar + - name: kafka-client + port: 9092 + targetPort: kafka-client + serviceRefDeclarations: + - name: pulsarZookeeper + serviceRefDeclarationSpecs: + - serviceKind: zookeeper + serviceVersion: ^3.8.\d{1,2}$ + updateStrategy: BestEffortParallel + configs: + - name: agamotto-configuration + templateRef: {{ include "pulsar.name" . }}-agamotto-conf-tpl + namespace: {{ .Release.Namespace }} + volumeName: agamotto-configuration + - name: broker-env + templateRef: {{ include "pulsar.name" . }}-broker-env-tpl + namespace: {{ .Release.Namespace }} + constraintRef: pulsar-env-constraints + keys: + - conf + asEnvFrom: + - init-broker-cluster + - broker + - init-pulsar-client-config + volumeName: broker-env + - name: broker-config + templateRef: {{ include "pulsar.name" . }}3-broker-config-tpl + namespace: {{ .Release.Namespace }} + constraintRef: pulsar3-brokers-cc + volumeName: pulsar-config + scripts: + - name: pulsar-scripts + templateRef: {{ include "pulsar.name" . }}-scripts + namespace: {{ .Release.Namespace }} + volumeName: scripts + defaultMode: 0555 + runtime: + securityContext: + runAsNonRoot: true + runAsUser: 10000 + fsGroup: 0 + runAsGroup: 0 + initContainers: + - name: init-broker-cluster + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.broker "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.broker "root" .) }}" + command: + - /kb-scripts/init-broker.sh + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + - name: brokerServiceUrl + value: pulsar://$(KB_CLUSTER_NAME)-broker-bootstrap.$(KB_NAMESPACE).svc{{ .Values.clusterDomain }}:6650 + - name: clusterName + value: $(KB_NAMESPACE)-$(KB_CLUSTER_COMP_NAME) + - name: webServiceUrl + value: http://$(KB_CLUSTER_NAME)-broker-bootstrap.$(KB_NAMESPACE).svc{{ .Values.clusterDomain }}:80 + volumeMounts: + - name: scripts + mountPath: /kb-scripts + - name: init-sysctl + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.broker "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.broker "root" .) }}" + command: + - /kb-scripts/init-broker-sysctl.sh + securityContext: + privileged: true + runAsNonRoot: false + runAsUser: 0 + volumeMounts: + - name: scripts + mountPath: /kb-scripts + containers: + - name: broker + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10000 + capabilities: + drop: + - ALL + privileged: false + runAsGroup: 0 + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.broker "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.broker "root" .) }}" + command: + - /kb-scripts/start-broker.sh + resources: + requests: + cpu: 200m + memory: 512Mi + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + - name: clusterName + value: $(KB_NAMESPACE)-$(KB_CLUSTER_COMP_NAME) + - name: SERVICE_PORT + value: "8080" + - name: brokerServiceUrl + value: pulsar://$(KB_CLUSTER_NAME)-broker-bootstrap.$(KB_NAMESPACE).svc{{ .Values.clusterDomain }}:6650 + - name: webServiceUrl + value: http://$(KB_CLUSTER_NAME)-broker-bootstrap.$(KB_NAMESPACE).svc{{ .Values.clusterDomain }}:80 + - name: POD_NAME + value: $(KB_POD_NAME) + - name: PULSAR_PREFIX_internalListenerName + value: cluster + - name: PULSAR_PREFIX_advertisedListeners + value: cluster:pulsar://$(POD_NAME).$(KB_CLUSTER_COMP_NAME)-headless.$(KB_NAMESPACE).svc{{ .Values.clusterDomain }}:6650 + # KoP configs: + # https://github.com/streamnative/kop/blob/branch-3.0.0.4/docs/configuration.md#configuration + - name: PULSAR_PREFIX_allowAutoTopicCreationType + value: partitioned + - name: PULSAR_PREFIX_kafkaProtocolMap + value: "CLIENT:PLAINTEXT" + - name: PULSAR_PREFIX_kafkaListeners + value: CLIENT://0.0.0.0:9092 + - name: PULSAR_PREFIX_kafkaAdvertisedListeners + value: CLIENT://$(POD_NAME).$(KB_CLUSTER_COMP_NAME)-headless.$(KB_NAMESPACE).svc{{ .Values.clusterDomain }}:9092 + ports: + - name: pulsar + containerPort: 6650 + - name: http + containerPort: 8080 + - name: kafka-client + containerPort: 9092 + livenessProbe: + failureThreshold: 30 + httpGet: + path: /status.html + port: http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 100 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /status.html + port: http + scheme: HTTP + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 100 + startupProbe: + failureThreshold: 30 + httpGet: + path: /status.html + port: http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 100 + volumeMounts: + - name: scripts + mountPath: /kb-scripts + - name: pulsar-config + mountPath: /opt/pulsar/conf + - name: broker-env + mountPath: /opt/pulsar/none + - name: metrics + image: {{ .Values.images.metrics.registry | default .Values.image.registry }}/{{ .Values.images.metrics.repository }}:{{ .Values.images.metrics.tag }} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.metrics.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + env: + - name: JOB_NAME + value: broker + - name: SERVICE_PORT + value: "8080" + - name: POD_NAME + value: $(KB_POD_NAME) + - name: POD_NAMESPACE + value: $(KB_NAMESPACE) + {{- if .Values.debugEnabled }} + - name: LOG_LEVEL + value: debug + {{- else }} + - name: LOG_LEVEL + value: info + {{- end }} + command: + - "/bin/agamotto" + - "--config=/opt/agamotto/agamotto-config.yaml" + - "--feature-gates=-pkg.translator.prometheus.NormalizeName" + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.service.port }} + volumeMounts: + - name: agamotto-configuration + mountPath: /opt/agamotto \ No newline at end of file diff --git a/addons/pulsar/templates/componentdef-proxy.yaml b/addons/pulsar/templates/componentdef-proxy.yaml new file mode 100644 index 000000000..e682951ef --- /dev/null +++ b/addons/pulsar/templates/componentdef-proxy.yaml @@ -0,0 +1,182 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ComponentDefinition +metadata: + name: pulsar-proxy + labels: + {{- include "pulsar.labels" . | nindent 4 }} +spec: + provider: kubeblocks + description: Pulsar proxy. + serviceKind: pulsar + serviceVersion: {{ default .Chart.AppVersion .Values.clusterVersionOverride }} + monitor: + builtIn: false + exporterConfig: + scrapePath: /metrics + scrapePort: {{ .Values.metrics.service.port }} + serviceRefDeclarations: + - name: pulsarZookeeper + serviceRefDeclarationSpecs: + - serviceKind: zookeeper + serviceVersion: ^3.8.\d{1,2}$ + updateStrategy: BestEffortParallel + configs: + - name: agamotto-configuration + templateRef: {{ include "pulsar.name" . }}-agamotto-conf-tpl + namespace: {{ .Release.Namespace }} + volumeName: agamotto-configuration + - name: proxy-env + templateRef: {{ include "pulsar.name" . }}-proxy-env-tpl + namespace: {{ .Release.Namespace }} + volumeName: proxy-env + constraintRef: pulsar-env-constraints + keys: + - conf + asEnvFrom: + - proxy + - check-broker + - name: proxy-config + templateRef: {{ include "pulsar.name" . }}3-proxy-config-tpl + namespace: {{ .Release.Namespace }} + volumeName: pulsar-proxy-config + constraintRef: pulsar3-proxy-cc + scripts: + - name: pulsar-scripts + templateRef: {{ include "pulsar.name" . }}-scripts + namespace: {{ .Release.Namespace }} + volumeName: scripts + defaultMode: 0555 + runtime: + securityContext: + runAsNonRoot: true + runAsUser: 10000 + fsGroup: 0 + runAsGroup: 0 + initContainers: + - name: check-broker + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.proxy "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.proxy "root" .) }}" + command: + - /kb-scripts/init-proxy.sh + volumeMounts: + - name: scripts + mountPath: /kb-scripts + - name: proxy-env + mountPath: /opt/pulsar/none + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + containers: + - name: proxy + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.proxy "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.proxy "root" .) }}" + command: + - /kb-scripts/start-proxy.sh + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10000 + capabilities: + drop: + - ALL + privileged: false + runAsGroup: 0 + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + - name: SERVICE_PORT + value: "8080" + - name: clusterName + value: $(KB_NAMESPACE)-$(KB_CLUSTER_COMP_NAME) + - name: webServicePort + value: "8080" + ## in order to avoid the kb complex service association logic involved when using brokers, + ## zk-based service discovery is used here to build the proxy. + ## but this is not secure to use service discovery(https://pulsar.apache.org/docs/3.0.x/administration-proxy/#use-service-discovery). + ## Todo: it needs to be iterated into the broker address in subsequent versions. +{{/* - name: brokerWebServiceURL*/}} +{{/* value: http://$(brokerSVC):80*/}} +{{/* - name: brokerServiceURL*/}} +{{/* value: pulsar://$(brokerSVC):6650*/}} + ports: + - name: pulsar + containerPort: 6650 + - name: http + containerPort: 8080 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /status.html + port: http + scheme: HTTP + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 100 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /status.html + port: http + scheme: HTTP + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 100 + startupProbe: + failureThreshold: 20 + httpGet: + path: /status.html + port: http + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 100 + volumeMounts: + - name: scripts + mountPath: /kb-scripts + - name: proxy-env + mountPath: /opt/pulsar/none + - name: pulsar-proxy-config + mountPath: /opt/pulsar/conf + - name: metrics + image: {{ .Values.images.metrics.registry | default .Values.image.registry }}/{{ .Values.images.metrics.repository }}:{{ .Values.images.metrics.tag }} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.metrics.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + env: + - name: JOB_NAME + value: proxy + - name: SERVICE_PORT + value: "8080" + - name: POD_NAME + value: $(KB_POD_NAME) + - name: POD_NAMESPACE + value: $(KB_NAMESPACE) + {{- if .Values.debugEnabled }} + - name: LOG_LEVEL + value: debug + {{- else }} + - name: LOG_LEVEL + value: info + {{- end }} + command: + - "/bin/agamotto" + - "--config=/opt/agamotto/agamotto-config.yaml" + - "--feature-gates=-pkg.translator.prometheus.NormalizeName" + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.service.port }} + volumeMounts: + - name: agamotto-configuration + mountPath: /opt/agamotto \ No newline at end of file diff --git a/addons/pulsar/templates/componentdef-zookeeper.yaml b/addons/pulsar/templates/componentdef-zookeeper.yaml new file mode 100644 index 000000000..0828b8c4a --- /dev/null +++ b/addons/pulsar/templates/componentdef-zookeeper.yaml @@ -0,0 +1,138 @@ +apiVersion: apps.kubeblocks.io/v1alpha1 +kind: ComponentDefinition +metadata: + name: pulsar-zookeeper + labels: + {{- include "pulsar.labels" . | nindent 4 }} +spec: + provider: kubeblocks + description: Pulsar zookeeper. + serviceKind: pulsar + serviceVersion: {{ default .Chart.AppVersion .Values.clusterVersionOverride }} + updateStrategy: BestEffortParallel + monitor: + builtIn: false + exporterConfig: + scrapePath: /metrics + scrapePort: {{ .Values.metrics.service.port }} + configs: + - name: agamotto-configuration + templateRef: {{ include "pulsar.name" . }}-agamotto-conf-tpl + namespace: {{ .Release.Namespace }} + volumeName: agamotto-configuration + - name: zookeeper-env + templateRef: {{ include "pulsar.name" . }}-zookeeper-env-tpl + namespace: {{ .Release.Namespace }} + volumeName: zookeeper-config-env + constraintRef: pulsar-env-constraints + keys: + - conf + asEnvFrom: + - zookeeper + scripts: + - name: pulsar-scripts + templateRef: {{ include "pulsar.name" . }}-scripts + namespace: {{ .Release.Namespace }} + volumeName: scripts + defaultMode: 0555 + runtime: + securityContext: + fsGroup: 0 + runAsGroup: 0 + runAsNonRoot: true + runAsUser: 10000 + containers: + - name: zookeeper + image: "{{ template "pulsar.imageFullName" (dict "image" .Values.images.v3_0_2.zookeeper "root" .) }}" + imagePullPolicy: "{{ template "pulsar.imagePullPolicy" (dict "image" .Values.images.v3_0_2.zookeeper "root" .) }}" + command: + - /kb-scripts/start-zookeeper.sh + env: + {{- if .Values.debugEnabled }} + - name: PULSAR_LOG_ROOT_LEVEL + value: DEBUG + - name: PULSAR_LOG_LEVEL + value: DEBUG + {{- end }} + - name: SERVICE_PORT + value: "8000" + - name: EXTERNAL_PROVIDED_SERVERS + value: "false" + - name: OPTS + value: "-Dlog4j2.formatMsgNoLookups=true" + ports: + - name: client + containerPort: 2181 + - name: tcp-quorum + containerPort: 2888 + - name: tcp-election + containerPort: 3888 + - name: http + containerPort: 8000 + livenessProbe: + exec: + command: + - bash + - -c + - echo ruok | nc -q 1 localhost 2181 | grep imok + failureThreshold: 10 + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + resources: + requests: + cpu: 50m + memory: 256Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsGroup: 0 + runAsNonRoot: true + runAsUser: 10000 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /pulsar/data + name: data + - mountPath: /pulsar/data-log + name: data-log + - name: scripts + mountPath: /kb-scripts + - name: zookeeper-config-env + mountPath: /opt/pulsar/none + - name: metrics + image: {{ .Values.images.metrics.registry | default .Values.image.registry }}/{{ .Values.images.metrics.repository }}:{{ .Values.images.metrics.tag }} + imagePullPolicy: {{ default "IfNotPresent" .Values.images.metrics.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + env: + - name: JOB_NAME + value: zookeeper + - name: SERVICE_PORT + value: "8000" + - name: POD_NAME + value: $(KB_POD_NAME) + - name: POD_NAMESPACE + value: $(KB_NAMESPACE) + {{- if .Values.debugEnabled }} + - name: LOG_LEVEL + value: debug + {{- else }} + - name: LOG_LEVEL + value: info + {{- end }} + command: + - "/bin/agamotto" + - "--config=/opt/agamotto/agamotto-config.yaml" + - "--feature-gates=-pkg.translator.prometheus.NormalizeName" + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.service.port }} + volumeMounts: + - name: agamotto-configuration + mountPath: /opt/agamotto \ No newline at end of file