From 97a6f79ef036cc4af92b644ea9260788d28ea609 Mon Sep 17 00:00:00 2001 From: Eleonora Zinchenko Date: Mon, 11 Nov 2024 11:56:43 +0200 Subject: [PATCH 01/17] Merge pull request #1714 from percona/release-1.18.0-push-test Fix tests for 1.18.0 release --- e2e-tests/data-at-rest-encryption/run | 2 +- e2e-tests/demand-backup-physical-sharded/run | 4 +- e2e-tests/demand-backup-physical/run | 4 +- e2e-tests/pvc-resize/run | 5 + .../compare/statefulset_mydb-rs0-oc.yml | 217 ++++++++++++++++++ e2e-tests/serviceless-external-nodes/run | 4 +- 6 files changed, 229 insertions(+), 7 deletions(-) create mode 100644 e2e-tests/serviceless-external-nodes/compare/statefulset_mydb-rs0-oc.yml diff --git a/e2e-tests/data-at-rest-encryption/run b/e2e-tests/data-at-rest-encryption/run index ff2c08b65d..f2417301e9 100755 --- a/e2e-tests/data-at-rest-encryption/run +++ b/e2e-tests/data-at-rest-encryption/run @@ -57,7 +57,7 @@ sleep 5 desc "check backup and restore -- minio" backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ +retry 3 5 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ | grep myApp.test.gz diff --git a/e2e-tests/demand-backup-physical-sharded/run b/e2e-tests/demand-backup-physical-sharded/run index c7c8dcabe5..5d218728b3 100755 --- a/e2e-tests/demand-backup-physical-sharded/run +++ b/e2e-tests/demand-backup-physical-sharded/run @@ -29,13 +29,13 @@ run_recovery_check() { local backup_name=$1 local compare_suffix=${2:-"_restore"} - wait_restore "${backup_name}" "${cluster}" "requested" "0" "1200" + wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000" echo compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix} # we don't wait for cluster readiness here because the annotation gets removed then - wait_restore "${backup_name}" "${cluster}" "ready" "0" "1800" + wait_restore "${backup_name}" "${cluster}" "ready" "0" "3000" kubectl_bin get psmdb ${cluster} -o yaml if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore" diff --git a/e2e-tests/demand-backup-physical/run b/e2e-tests/demand-backup-physical/run index 853468d29c..fb7a450def 100755 --- a/e2e-tests/demand-backup-physical/run +++ b/e2e-tests/demand-backup-physical/run @@ -29,13 +29,13 @@ run_recovery_check() { local backup_name=$1 local compare_suffix=${2:-"_restore"} - wait_restore "${backup_name}" "${cluster}" "requested" "0" "1200" + wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000" echo compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix} # we don't wait for cluster readiness here because the annotation gets removed then - wait_restore "${backup_name}" "${cluster}" "ready" "0" "1800" + wait_restore "${backup_name}" "${cluster}" "ready" "0" "3000" kubectl_bin get psmdb ${cluster} -o yaml if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then diff --git a/e2e-tests/pvc-resize/run b/e2e-tests/pvc-resize/run index 47e3f088aa..7689e35f48 100755 --- a/e2e-tests/pvc-resize/run +++ b/e2e-tests/pvc-resize/run @@ -157,6 +157,11 @@ if [[ $EKS == 1 || -n ${OPENSHIFT} ]]; then else spinup_psmdb "${cluster}-rs0" "$test_dir/conf/$cluster.yml" fi + echo "Enabling PVC resize after recreating PSMDB cluster ${cluster} " + kubectl_bin patch psmdb "${cluster}" --type=json -p='[{"op": "add", "path": "/spec/enableVolumeExpansion", "value":true }]' + sleep 10 + + wait_cluster_consistency "$cluster" fi desc 'create resourcequota' diff --git a/e2e-tests/serviceless-external-nodes/compare/statefulset_mydb-rs0-oc.yml b/e2e-tests/serviceless-external-nodes/compare/statefulset_mydb-rs0-oc.yml new file mode 100644 index 0000000000..8ec308dcd1 --- /dev/null +++ b/e2e-tests/serviceless-external-nodes/compare/statefulset_mydb-rs0-oc.yml @@ -0,0 +1,217 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: mydb + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: mydb-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: mydb +spec: + podManagementPolicy: OrderedReady + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: mydb + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: mydb-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: mydb + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=keyFile + - --keyFile=/etc/mongodb-secrets/mongodb-key + - --tlsMode=allowTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: mydb + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-mydb-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: mydb-custom-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: mydb-custom-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: mydb-custom-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: mydb-custom-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: mydb-rs0-mongod + optional: true + name: config + - name: mydb-custom-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: mydb-custom-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: mydb-custom-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: mydb-custom-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-mydb-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/serviceless-external-nodes/run b/e2e-tests/serviceless-external-nodes/run index 2d87c765bf..b7a1272737 100755 --- a/e2e-tests/serviceless-external-nodes/run +++ b/e2e-tests/serviceless-external-nodes/run @@ -22,7 +22,7 @@ apply_cluster "$test_dir/conf/main.yml" wait_for_running "$cluster-rs0" 1 compare_kubectl statefulset/mydb-rs0 -secrets_count=$(kubectl_bin get secret -o yaml | yq '.items | length') +secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length') if [[ $secrets_count != 6 ]]; then echo "It's expected to have 6 secrets. Currently have $secrets_count" exit 1 @@ -41,7 +41,7 @@ apply_cluster "$test_dir/conf/external.yml" wait_pod ${cluster}-rs0-0 wait_pod ${cluster}-rs0-1 -secrets_count=$(kubectl_bin get secret -o yaml | yq '.items | length') +secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length') if [[ $secrets_count != 6 ]]; then echo "It's expected to have 6 secrets. Currently have $secrets_count" exit 1 From dc3633e10ba0f948c9b6f60751b7b2a794e0e447 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 11 Nov 2024 12:33:58 +0200 Subject: [PATCH 02/17] support for running e2e tests against arm64 GKE nodes --- e2e-tests/arbiter/run | 5 +-- e2e-tests/conf/client.yml | 2 +- e2e-tests/conf/client_with_tls.yml | 2 +- e2e-tests/default-cr/run | 2 +- e2e-tests/finalizer/run | 6 +-- e2e-tests/functions | 63 ++++++++++++++++++++++++------ e2e-tests/init-deploy/run | 5 +-- 7 files changed, 62 insertions(+), 23 deletions(-) diff --git a/e2e-tests/arbiter/run b/e2e-tests/arbiter/run index 2721feae74..83af287fd8 100755 --- a/e2e-tests/arbiter/run +++ b/e2e-tests/arbiter/run @@ -74,9 +74,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'check arbiter without service-per-pod' check_cr_config "arbiter-rs0" diff --git a/e2e-tests/conf/client.yml b/e2e-tests/conf/client.yml index cc2a6b1e5f..1449675378 100644 --- a/e2e-tests/conf/client.yml +++ b/e2e-tests/conf/client.yml @@ -15,7 +15,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: psmdb-client - image: percona/percona-server-mongodb:4.4 + image: percona/percona-server-mongodb:4.4-multi imagePullPolicy: Always command: - sleep diff --git a/e2e-tests/conf/client_with_tls.yml b/e2e-tests/conf/client_with_tls.yml index 4b6f5e829e..bd259c26ea 100644 --- a/e2e-tests/conf/client_with_tls.yml +++ b/e2e-tests/conf/client_with_tls.yml @@ -15,7 +15,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: psmdb-client - image: percona/percona-server-mongodb:4.4 + image: percona/percona-server-mongodb:4.4-multi imagePullPolicy: Always command: ["/bin/bash","-c","cat /etc/mongodb-ssl/tls.key /etc/mongodb-ssl/tls.crt > /tmp/tls.pem && sleep 100500"] volumeMounts: diff --git a/e2e-tests/default-cr/run b/e2e-tests/default-cr/run index a0c80c3b78..53aa97c4aa 100755 --- a/e2e-tests/default-cr/run +++ b/e2e-tests/default-cr/run @@ -48,7 +48,7 @@ function main() { desc 'create secrets and start client' kubectl_bin apply -f $deploy_dir/secrets.yaml - kubectl_bin apply -f $conf_dir/client.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" kubectl_bin apply ${OPERATOR_NS:+-n $OPERATOR_NS} --server-side --force-conflicts -f $deploy_dir/crd.yaml diff --git a/e2e-tests/finalizer/run b/e2e-tests/finalizer/run index 2bd0acf3d9..fb43999a55 100755 --- a/e2e-tests/finalizer/run +++ b/e2e-tests/finalizer/run @@ -10,9 +10,9 @@ create_infra "$namespace" cluster="some-name" desc 'create secrets and start client' -kubectl_bin apply \ - -f $conf_dir/secrets_with_tls.yml \ - -f $conf_dir/client.yml +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client.yml + apply_cluster "$test_dir/conf/$cluster.yml" desc 'check if all 3 Pods started' diff --git a/e2e-tests/functions b/e2e-tests/functions index db1e9d4c6d..e9bc892fc9 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -28,6 +28,17 @@ conf_dir=$(realpath $test_dir/../conf || :) src_dir=$(realpath $test_dir/../..) logs_dir=$(realpath $test_dir/../logs || :) +archs=$(kubectl get nodes -o jsonpath='{range .items[*]}{.status.nodeInfo.architecture}{" "}{end}') + +if [[ "$ARCH" == "arm64" ]]; then + for arch in $archs; do + if [[ "$arch" != "arm64" ]]; then + echo "All nodes in the cluster must be arm64, because ARCH=$ARCH !" + exit 1 + fi + done +fi + if [[ ${ENABLE_LOGGING} == "true" ]]; then if [ ! -d "${logs_dir}" ]; then mkdir "${logs_dir}" @@ -362,6 +373,8 @@ deploy_operator() { desc 'start PSMDB operator' local cr_file + local temp_operator_yaml="$(mktemp)" + if [ -f "${test_dir}/conf/crd.yaml" ]; then cr_file="${test_dir}/conf/crd.yaml" else @@ -369,21 +382,27 @@ deploy_operator() { fi kubectl_bin apply --server-side --force-conflicts -f "${cr_file}" - if [ -n "$OPERATOR_NS" ]; then + + if [[ "$OPERATOR_NS" ]]; then apply_rbac cw-rbac yq eval ' (.spec.template.spec.containers[].image = "'${IMAGE}'") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | - ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/cw-operator.yaml \ - | kubectl_bin apply -f - + ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/cw-operator.yaml > $temp_operator_yaml else apply_rbac rbac yq eval ' (.spec.template.spec.containers[].image = "'${IMAGE}'") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | - ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/operator.yaml \ - | kubectl_bin apply -f - + ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/operator.yaml > $temp_operator_yaml fi + + if [[ "$ARCH" == "arm64" ]]; then + yq eval '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' $temp_operator_yaml | kubectl_bin apply -f - + else + kubectl_bin apply -f $temp_operator_yaml + fi + sleep 2 wait_pod $(get_operator_pod) } @@ -685,6 +704,10 @@ compare_kubectl() { (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - >${new_result} + if [[ "$ARCH" == "arm64" ]]; then + yq -i eval 'del(.spec.template.spec.tolerations)' ${new_result} + fi + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' ${new_result} if version_gt "1.22"; then @@ -1080,12 +1103,30 @@ EOF } cat_config() { - cat "$1" \ - | yq eval '(.spec | select(.image == null)).image = "'"$IMAGE_MONGOD"'"' \ - | yq eval '(.spec | select(has("pmm"))).pmm.image = "'"$IMAGE_PMM_CLIENT"'"' \ - | yq eval '(.spec | select(has("initImage"))).initImage = "'"$IMAGE"'"' \ - | yq eval '(.spec | select(has("backup"))).backup.image = "'"$IMAGE_BACKUP"'"' \ - | yq eval '.spec.upgradeOptions.apply="Never"' + local temp_cr="$(mktemp)" + + yq eval ' + (.spec | select(.image == null)).image = "'"$IMAGE_MONGOD"'" | + (.spec | select(has("pmm"))).pmm.image = "'"$IMAGE_PMM_CLIENT"'" | + (.spec | select(has("initImage"))).initImage = "'"$IMAGE"'" | + (.spec | select(has("backup"))).backup.image = "'"$IMAGE_BACKUP"'" | + .spec.upgradeOptions.apply="Never"' "$1" > $temp_cr + + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr + else + cat $temp_cr + fi +} + +apply_client() { + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' "$1" | kubectl_bin apply -f - + else + kubectl_bin apply -f "$1" + fi } apply_cluster() { diff --git a/e2e-tests/init-deploy/run b/e2e-tests/init-deploy/run index 7520d936c3..30e1a27821 100755 --- a/e2e-tests/init-deploy/run +++ b/e2e-tests/init-deploy/run @@ -11,9 +11,8 @@ max_conn=13 create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f $conf_dir/secrets_with_tls.yml \ - -f $conf_dir/client.yml +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client.yml desc 'create custom RuntimeClass' if version_gt "1.19" && [ $EKS -ne 1 ]; then From 0a03ce95eaeeafdc6fc739fed5283953eff01b44 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:16:38 +0200 Subject: [PATCH 03/17] making cert-manager able to be installed on arm64 nodes (using helm) --- e2e-tests/balancer/run | 6 ++--- e2e-tests/functions | 50 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 50 insertions(+), 6 deletions(-) diff --git a/e2e-tests/balancer/run b/e2e-tests/balancer/run index 94e03cfcc5..2c1cfe585f 100755 --- a/e2e-tests/balancer/run +++ b/e2e-tests/balancer/run @@ -55,9 +55,9 @@ main() { desc 'create first PSMDB cluster' cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml if version_gt "1.19" && [ $EKS -ne 1 ]; then $sed 's/docker/runc/g' "$conf_dir/container-rc.yaml" | kubectl_bin apply -f - diff --git a/e2e-tests/functions b/e2e-tests/functions index e9bc892fc9..c9ec2366c1 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -920,9 +920,52 @@ deploy_cert_manager() { kubectl_bin create namespace cert-manager || : kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true || : - kubectl_bin apply -f "https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml" --validate=false || : 2>/dev/null + + helm uninstall cert-manager --namespace cert-manager || : + helm repo remove jetstack || : + helm repo add jetstack https://charts.jetstack.io + + # Check and delete existing conflicting resources + kubectl_bin delete role cert-manager-cainjector:leaderelection -n kube-system --ignore-not-found=true + kubectl_bin delete role cert-manager:leaderelection -n kube-system --ignore-not-found=true + kubectl_bin delete rolebinding cert-manager-cainjector:leaderelection -n kube-system --ignore-not-found=true + kubectl_bin delete rolebinding cert-manager:leaderelection -n kube-system --ignore-not-found=true + + if [[ "$ARCH" == "arm64" ]]; then + helm install cert-manager \ + --namespace cert-manager \ + --version v${CERT_MANAGER_VER} \ + --set nodeSelector."kubernetes\.io/arch"=arm64 \ + --set tolerations[0].key="kubernetes.io/arch" \ + --set tolerations[0].operator="Equal" \ + --set tolerations[0].value="arm64" \ + --set tolerations[0].effect="NoSchedule" \ + --set startupapicheck.nodeSelector."kubernetes\.io/arch"=arm64 \ + --set startupapicheck.tolerations[0].key="kubernetes.io/arch" \ + --set startupapicheck.tolerations[0].operator="Equal" \ + --set startupapicheck.tolerations[0].value="arm64" \ + --set startupapicheck.tolerations[0].effect="NoSchedule" \ + --set cainjector.nodeSelector."kubernetes\.io/arch"=arm64 \ + --set cainjector.tolerations[0].key="kubernetes.io/arch" \ + --set cainjector.tolerations[0].operator="Equal" \ + --set cainjector.tolerations[0].value="arm64" \ + --set cainjector.tolerations[0].effect="NoSchedule" \ + --set webhook.nodeSelector."kubernetes\.io/arch"=arm64 \ + --set webhook.tolerations[0].key="kubernetes.io/arch" \ + --set webhook.tolerations[0].operator="Equal" \ + --set webhook.tolerations[0].value="arm64" \ + --set webhook.tolerations[0].effect="NoSchedule" \ + --no-hooks \ + jetstack/cert-manager + else + helm install cert-manager \ + --namespace cert-manager \ + --version v${CERT_MANAGER_VER} \ + --no-hooks \ + jetstack/cert-manager + fi + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready - sleep 120 } delete_crd() { @@ -1115,7 +1158,8 @@ cat_config() { if [[ "$ARCH" == "arm64" ]]; then yq eval '.spec.replsets[].tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | - (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr + (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr else cat $temp_cr fi From 1111fdc4b576a97c98b996c146e65629d8b02f78 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 19 Nov 2024 12:53:22 +0200 Subject: [PATCH 04/17] use apply_clien always so that client pod has proper tolerations applied --- e2e-tests/expose-sharded/run | 6 +++--- e2e-tests/ldap-tls/run | 5 ++--- e2e-tests/ldap/run | 5 ++--- e2e-tests/multi-cluster-service/run | 5 ++--- e2e-tests/non-voting/run | 6 +++--- e2e-tests/one-pod/run | 6 +++--- e2e-tests/operator-self-healing-chaos/run | 5 +++-- e2e-tests/pitr-physical/run | 7 +++---- e2e-tests/pitr-sharded/run | 7 +++---- e2e-tests/pitr/run | 7 +++---- 10 files changed, 27 insertions(+), 32 deletions(-) diff --git a/e2e-tests/expose-sharded/run b/e2e-tests/expose-sharded/run index 7a20d7cedd..90c1bc903f 100755 --- a/e2e-tests/expose-sharded/run +++ b/e2e-tests/expose-sharded/run @@ -94,9 +94,9 @@ function main() { desc 'create first PSMDB cluster' cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml + apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then diff --git a/e2e-tests/ldap-tls/run b/e2e-tests/ldap-tls/run index 0742cf71f6..77540937d7 100755 --- a/e2e-tests/ldap-tls/run +++ b/e2e-tests/ldap-tls/run @@ -152,9 +152,8 @@ main() { desc 'create secrets and start client' cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml test_mongod_openldap test_sharded_openldap diff --git a/e2e-tests/ldap/run b/e2e-tests/ldap/run index 529622e90e..9f14737740 100755 --- a/e2e-tests/ldap/run +++ b/e2e-tests/ldap/run @@ -148,9 +148,8 @@ main() { desc 'create secrets and start client' cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml test_mongod_openldap test_sharded_openldap diff --git a/e2e-tests/multi-cluster-service/run b/e2e-tests/multi-cluster-service/run index 4dc5b7560d..086d3fed02 100755 --- a/e2e-tests/multi-cluster-service/run +++ b/e2e-tests/multi-cluster-service/run @@ -80,9 +80,8 @@ create_infra "$namespace" desc 'create first PSMDB cluster' cluster="some-name" -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then diff --git a/e2e-tests/non-voting/run b/e2e-tests/non-voting/run index 088df33f7c..6b6bf33e5f 100755 --- a/e2e-tests/non-voting/run +++ b/e2e-tests/non-voting/run @@ -39,9 +39,9 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml + desc "check non-voting members" spinup_psmdb "$cluster" "$test_dir/conf/$cluster.yml" diff --git a/e2e-tests/one-pod/run b/e2e-tests/one-pod/run index 9d8266fe37..51f62d21fd 100755 --- a/e2e-tests/one-pod/run +++ b/e2e-tests/one-pod/run @@ -28,9 +28,9 @@ main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${conf_dir}/minio-secret.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + apply_client $conf_dir/client.yml deploy_minio diff --git a/e2e-tests/operator-self-healing-chaos/run b/e2e-tests/operator-self-healing-chaos/run index 8cd344ad92..ddeb9d8c76 100755 --- a/e2e-tests/operator-self-healing-chaos/run +++ b/e2e-tests/operator-self-healing-chaos/run @@ -28,8 +28,9 @@ fail_pod() { yq eval ' .metadata.name = "chaos-operator-pod-failure" | del(.spec.selector.pods.test-namespace) | - .spec.selector.pods.'$test_namespace'[0] = "'$init_pod'"' $conf_dir/chaos-pod-failure.yml \ - | kubectl apply --namespace $test_namespace -f - + .spec.selector.pods.'$test_namespace'[0] = "'$init_pod'" | + .spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' \ + $conf_dir/chaos-pod-failure.yml | kubectl apply --namespace $test_namespace -f - sleep 10 desc 'check if operator works fine: scale down from 5 to 3' diff --git a/e2e-tests/pitr-physical/run b/e2e-tests/pitr-physical/run index 49ae125d4a..249c60d2e2 100755 --- a/e2e-tests/pitr-physical/run +++ b/e2e-tests/pitr-physical/run @@ -121,10 +121,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/pitr-sharded/run b/e2e-tests/pitr-sharded/run index 94638aed9a..858ffda293 100755 --- a/e2e-tests/pitr-sharded/run +++ b/e2e-tests/pitr-sharded/run @@ -79,10 +79,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'create custom RuntimeClass' if version_gt "1.19" && [ $EKS -ne 1 ]; then diff --git a/e2e-tests/pitr/run b/e2e-tests/pitr/run index 6e839e27ae..a035958b1b 100755 --- a/e2e-tests/pitr/run +++ b/e2e-tests/pitr/run @@ -112,10 +112,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" From be2c94265e826ac524a5f44c16346f315335ec92 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 19 Nov 2024 13:00:36 +0200 Subject: [PATCH 05/17] update e2e test to use alpine/curl with tolerations for arm64 architecture --- e2e-tests/multi-cluster-service/run | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/e2e-tests/multi-cluster-service/run b/e2e-tests/multi-cluster-service/run index 086d3fed02..f8af4f98f0 100755 --- a/e2e-tests/multi-cluster-service/run +++ b/e2e-tests/multi-cluster-service/run @@ -69,8 +69,9 @@ wait_service_export() { } desc "Register Kubernetes cluster" -k8s_cluster_name=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=appropriate/curl -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name) -k8s_cluster_region=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=appropriate/curl -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location) +kubectl -n default delete pod curl || : +k8s_cluster_name=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=alpine/curl --overrides='{"spec": {"tolerations":[{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name) +k8s_cluster_region=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=alpine/curl --overrides='{"spec": {"tolerations":[{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location) gcloud container hub memberships register ${k8s_cluster_name} --gke-cluster ${k8s_cluster_region}/${k8s_cluster_name} --enable-workload-identity From 970942a67808429a05091d7331de69147389b8a1 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 19 Nov 2024 16:03:08 +0200 Subject: [PATCH 06/17] use apply_clien always so that client pod has proper tolerations applied - 02 --- e2e-tests/custom-replset-name/run | 6 +++++- e2e-tests/liveness/run | 4 +++- e2e-tests/replset-overrides/run | 2 +- e2e-tests/security-context/run | 4 +++- e2e-tests/users/run | 6 +++--- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/e2e-tests/custom-replset-name/run b/e2e-tests/custom-replset-name/run index c50d924433..9808298ab2 100755 --- a/e2e-tests/custom-replset-name/run +++ b/e2e-tests/custom-replset-name/run @@ -10,7 +10,11 @@ create_infra $namespace apply_s3_storage_secrets deploy_minio -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml + cluster="some-name" desc 'create first PSMDB cluster' diff --git a/e2e-tests/liveness/run b/e2e-tests/liveness/run index c888b037a0..cba0acba8a 100755 --- a/e2e-tests/liveness/run +++ b/e2e-tests/liveness/run @@ -9,7 +9,9 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml cluster="liveness" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/replset-overrides/run b/e2e-tests/replset-overrides/run index 2f7042f2a6..29dc6dbce0 100755 --- a/e2e-tests/replset-overrides/run +++ b/e2e-tests/replset-overrides/run @@ -97,7 +97,7 @@ test_deploy_with_overrides() { main() { create_infra ${namespace} - kubectl_bin apply -f ${conf_dir}/client.yml + apply_client $conf_dir/client.yml deploy_minio diff --git a/e2e-tests/security-context/run b/e2e-tests/security-context/run index fd1f06ba9d..2e166af207 100755 --- a/e2e-tests/security-context/run +++ b/e2e-tests/security-context/run @@ -9,7 +9,9 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml desc 'create additional service account' kubectl_bin apply -f "$test_dir/conf/service-account.yml" diff --git a/e2e-tests/users/run b/e2e-tests/users/run index bb61bd132c..3be2f8f980 100755 --- a/e2e-tests/users/run +++ b/e2e-tests/users/run @@ -14,9 +14,9 @@ create_infra $namespace deploy_minio desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${conf_dir}/minio-secret.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f ${conf_dir}/minio-secret.yml +apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" From da6966d32456075d18164803762284eec3ef8748 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 19 Nov 2024 16:12:03 +0200 Subject: [PATCH 07/17] use apply_clien always so that client pod has proper tolerations applied - 03 --- e2e-tests/custom-tls/run | 4 +- e2e-tests/data-sharded/run | 4 +- e2e-tests/functions | 51 ++++++++++++++++--- e2e-tests/tls-issue-cert-manager/run | 4 +- e2e-tests/upgrade-consistency-sharded-tls/run | 5 +- 5 files changed, 54 insertions(+), 14 deletions(-) diff --git a/e2e-tests/custom-tls/run b/e2e-tests/custom-tls/run index 9e39c37357..d4d8e91a7f 100755 --- a/e2e-tests/custom-tls/run +++ b/e2e-tests/custom-tls/run @@ -32,8 +32,8 @@ main() { destroy_cert_manager || true # We need to be sure that we are getting certificates created by the operator, not by cert-manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/data-sharded/run b/e2e-tests/data-sharded/run index 5c77ed5a78..07e40d9ac6 100755 --- a/e2e-tests/data-sharded/run +++ b/e2e-tests/data-sharded/run @@ -39,8 +39,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/functions b/e2e-tests/functions index c9ec2366c1..00069e0072 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -30,13 +30,29 @@ logs_dir=$(realpath $test_dir/../logs || :) archs=$(kubectl get nodes -o jsonpath='{range .items[*]}{.status.nodeInfo.architecture}{" "}{end}') -if [[ "$ARCH" == "arm64" ]]; then +# if [[ "$ARCH" == "arm64" ]]; then +# for arch in $archs; do +# if [[ "$arch" != "arm64" ]]; then +# echo "All nodes in the cluster must be arm64, because ARCH=$ARCH !" +# exit 1 +# fi +# done +# fi + +first_arch=$(echo $archs | awk '{print $1}') + +if [[ "$first_arch" == "amd64" || "$first_arch" == "arm64" ]]; then for arch in $archs; do - if [[ "$arch" != "arm64" ]]; then - echo "All nodes in the cluster must be arm64, because ARCH=$ARCH !" + if [[ "$arch" != "$first_arch" ]]; then + echo "All nodes in the cluster must be equal to the 1-st one: $first_arch !" exit 1 fi done + ARCH="$first_arch" + echo "================================== Using ARCH=$ARCH ==================================" +else + echo "Unsupported architecture: $first_arch" + exit 1 fi if [[ ${ENABLE_LOGGING} == "true" ]]; then @@ -428,6 +444,18 @@ deploy_operator_gh() { wait_pod "$(get_operator_pod)" } +aws_cli() { + local cmd=$1 + + kubectl_bin run -i --rm aws-cli --image=amazon/aws-cli \ + --restart=Never \ + --env=AWS_ACCESS_KEY_ID=some-access-key \ + --env=AWS_SECRET_ACCESS_KEY=some-secret-key \ + --env=AWS_DEFAULT_REGION=us-east-1 \ + --overrides='{"apiVersion": "v1","spec": {"tolerations": [{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' \ + -- --endpoint-url http://minio-service:9000 $cmd +} + deploy_minio() { desc 'install Minio' helm uninstall minio-service || : @@ -448,7 +476,20 @@ deploy_minio() { --set configPathmc=/tmp/.minio/ \ --set persistence.size=2G \ --set securityContext.enabled=false \ + --set 'tolerations[0].key=kubernetes.io/arch' \ + --set 'tolerations[0].operator=Equal' \ + --set 'tolerations[0].value=arm64' \ + --set 'tolerations[0].effect=NoSchedule' \ + --set 'postJob.tolerations[0].key=kubernetes.io/arch' \ + --set 'postJob.tolerations[0].operator=Equal' \ + --set 'postJob.tolerations[0].value=arm64' \ + --set 'postJob.tolerations[0].effect=NoSchedule' \ minio/minio + + # kubectl kustomize github.com/minio/operator?ref=v6.0.4 | yq eval ".metadata.namespace = \"${namespace}\"" - | kubectl -n ${namespace} apply -f - + # kubectl scale deployment minio-operator --replicas=1 + # kubectl patch deployment minio-operator -n ${namespace} --type='json' -p='[{"op": "add", "path": "/spec/template/spec/tolerations", "value": [{"key":"kubernetes.io/arch", "operator":"Equal", "value":"arm64", "effect":"NoSchedule"}]}]' + # MINIO_POD=$(kubectl_bin get pods --selector=name=minio-operator -o 'jsonpath={.items[].metadata.name}') MINIO_POD=$(kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}') wait_pod $MINIO_POD @@ -457,9 +498,7 @@ deploy_minio() { fi # create bucket - kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + aws_cli "s3 mb s3://operator-testing" } deploy_vault() { diff --git a/e2e-tests/tls-issue-cert-manager/run b/e2e-tests/tls-issue-cert-manager/run index 0b10d74b75..bfa6071766 100755 --- a/e2e-tests/tls-issue-cert-manager/run +++ b/e2e-tests/tls-issue-cert-manager/run @@ -29,8 +29,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml desc 'create custom cert-manager issuers and certificates' kubectl_bin apply -f "$test_dir/conf/some-name-psmdb-ca-issuer.yml" diff --git a/e2e-tests/upgrade-consistency-sharded-tls/run b/e2e-tests/upgrade-consistency-sharded-tls/run index 3eed33edd1..c8a98787b4 100755 --- a/e2e-tests/upgrade-consistency-sharded-tls/run +++ b/e2e-tests/upgrade-consistency-sharded-tls/run @@ -20,8 +20,9 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml + deploy_cmctl desc "create first PSMDB cluster 1.16.2 $CLUSTER" From 09d51517d031cbf114b2f659a00650ad157c2094 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 19 Nov 2024 19:42:36 +0200 Subject: [PATCH 08/17] use apply_clien always so that client pod has proper tolerations applied - 04 --- e2e-tests/rs-shard-migration/run | 4 +++- e2e-tests/scaling/run | 5 ++--- e2e-tests/self-healing-chaos/run | 5 ++--- e2e-tests/service-per-pod/run | 5 ++--- e2e-tests/storage/run | 7 +++---- e2e-tests/version-service/run | 5 +++-- 6 files changed, 15 insertions(+), 16 deletions(-) diff --git a/e2e-tests/rs-shard-migration/run b/e2e-tests/rs-shard-migration/run index 7020b091a6..15c840d389 100755 --- a/e2e-tests/rs-shard-migration/run +++ b/e2e-tests/rs-shard-migration/run @@ -18,7 +18,9 @@ function main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml + cluster="some-name" CLUSTER_SIZE=3 diff --git a/e2e-tests/scaling/run b/e2e-tests/scaling/run index 4246858b75..56ce1d5de8 100755 --- a/e2e-tests/scaling/run +++ b/e2e-tests/scaling/run @@ -9,9 +9,8 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f $conf_dir/secrets.yml \ - -f $conf_dir/client.yml +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster='some-name-rs0' desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/self-healing-chaos/run b/e2e-tests/self-healing-chaos/run index 1380150331..35f30f4308 100755 --- a/e2e-tests/self-healing-chaos/run +++ b/e2e-tests/self-healing-chaos/run @@ -23,9 +23,8 @@ check_pod_restarted() { setup_cluster() { desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/secrets.yml \ - -f $conf_dir/client.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" apply_cluster $conf_dir/$cluster.yml diff --git a/e2e-tests/service-per-pod/run b/e2e-tests/service-per-pod/run index 9a785a393f..3fa44ab55a 100755 --- a/e2e-tests/service-per-pod/run +++ b/e2e-tests/service-per-pod/run @@ -79,9 +79,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'check ClusterIP' check_cr_config "cluster-ip-rs0" diff --git a/e2e-tests/storage/run b/e2e-tests/storage/run index 4dff4779f6..6ba8ac2fa6 100755 --- a/e2e-tests/storage/run +++ b/e2e-tests/storage/run @@ -47,10 +47,9 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml \ - -f $test_dir/conf/hostpath-helper.yml + kubectl_bin apply -f $conf_dir/secrets.yml + kubectl_bin apply -f $test_dir/conf/hostpath-helper.yml + apply_client $conf_dir/client.yml desc 'check emptydir' check_cr_config "emptydir-rs0" diff --git a/e2e-tests/version-service/run b/e2e-tests/version-service/run index fbef8220bd..08eeabc3d7 100755 --- a/e2e-tests/version-service/run +++ b/e2e-tests/version-service/run @@ -14,7 +14,7 @@ function check_telemetry_transfer() { cluster="minimal-cluster" desc 'create secrets and start client' - kubectl_bin apply -f $conf_dir/client.yml + apply_client $conf_dir/client.yml yq eval '.metadata.name = "'${cluster}'"' $conf_dir/secrets.yml | kubectl_bin apply -f - desc "create PSMDB minimal cluster $cluster" @@ -151,7 +151,8 @@ for i in "${!cases[@]}"; do cluster="${cases[$i]}" expected_image="${expected_images[$i]}" - kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'create PSMDB cluster' tmp_file=$(mktemp) From d49669e2af2161e8fe0d91295a0562bc3fc760ea Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 19 Nov 2024 19:42:42 +0200 Subject: [PATCH 09/17] update deploy_cmctl function to handle arm64 architecture tolerations --- e2e-tests/functions | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/e2e-tests/functions b/e2e-tests/functions index 00069e0072..86a5af3e3b 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -1568,11 +1568,18 @@ renew_certificate() { } deploy_cmctl() { - local service_account="cmctl" + local temp_cr="$(mktemp)" + + $sed -e "s/percona-server-mongodb-operator/cmctl/g" "${src_dir}/deploy/rbac.yaml" \ + | yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' > $temp_cr + + if [[ "$ARCH" == "arm64" ]]; then + yq eval '(.spec.template.spec.tolerations // []) += [{"key": "node.kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]' $temp_cr | + kubectl_bin apply -f - + else + kubectl_bin apply -f $temp_cr + fi - $sed -e "s/percona-server-mongodb-operator/$service_account/g" "${src_dir}/deploy/rbac.yaml" \ - | yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' \ - | kubectl_bin apply -f - kubectl_bin apply -f "$conf_dir/cmctl.yml" } From f56e773245ae9deef6ea7e38e7d1e3a97b21433d Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Wed, 20 Nov 2024 12:56:45 +0200 Subject: [PATCH 10/17] use apply_clien always so that client pod has proper tolerations applied - 05 --- e2e-tests/cross-site-sharded/run | 5 +-- e2e-tests/custom-users-roles-sharded/run | 9 ++--- e2e-tests/custom-users-roles/run | 8 ++-- e2e-tests/data-at-rest-encryption/run | 3 +- e2e-tests/demand-backup-eks-credentials/run | 5 +-- e2e-tests/demand-backup-physical-sharded/run | 8 ++-- e2e-tests/demand-backup-physical/run | 8 ++-- e2e-tests/demand-backup-sharded/run | 10 ++--- e2e-tests/demand-backup/run | 8 ++-- e2e-tests/expose-sharded/run | 6 +-- e2e-tests/finalizer/run | 3 +- e2e-tests/ignore-labels-annotations/run | 40 ++++++++++---------- e2e-tests/ldap-tls/run | 1 - e2e-tests/ldap/run | 1 - e2e-tests/mongod-major-upgrade-sharded/run | 5 ++- e2e-tests/mongod-major-upgrade/run | 5 ++- e2e-tests/multi-cluster-service/run | 7 ++-- e2e-tests/non-voting/run | 1 - e2e-tests/operator-self-healing-chaos/run | 5 +-- e2e-tests/pvc-resize/run | 7 ++-- e2e-tests/recover-no-primary/run | 9 +++-- e2e-tests/replset-overrides/run | 14 +++---- e2e-tests/scheduled-backup/run | 5 +-- e2e-tests/serviceless-external-nodes/run | 17 +++++---- e2e-tests/smart-update/run | 3 +- e2e-tests/split-horizon/run | 10 ++--- e2e-tests/upgrade-consistency/run | 4 +- e2e-tests/upgrade-sharded/run | 6 +-- e2e-tests/upgrade/run | 6 +-- e2e-tests/users/run | 2 +- e2e-tests/version-service/run | 5 ++- 31 files changed, 113 insertions(+), 113 deletions(-) diff --git a/e2e-tests/cross-site-sharded/run b/e2e-tests/cross-site-sharded/run index f19d19941c..f91288325e 100755 --- a/e2e-tests/cross-site-sharded/run +++ b/e2e-tests/cross-site-sharded/run @@ -37,9 +37,8 @@ desc "create main cluster" create_infra "$namespace" desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml desc "create main PSMDB cluster $main_cluster." apply_cluster "$test_dir/conf/$main_cluster.yml" diff --git a/e2e-tests/custom-users-roles-sharded/run b/e2e-tests/custom-users-roles-sharded/run index 78f8114bd0..4e767ebf7a 100755 --- a/e2e-tests/custom-users-roles-sharded/run +++ b/e2e-tests/custom-users-roles-sharded/run @@ -38,10 +38,9 @@ create_infra "$namespace" mongosUri="userAdmin:userAdmin123456@$cluster-mongos.$namespace" desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${test_dir}/conf/app-user-secrets.yml" - +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $test_dir/conf/app-user-secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -85,7 +84,7 @@ kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ "key": "userTwoPassKey" }, "roles": [ - {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } diff --git a/e2e-tests/custom-users-roles/run b/e2e-tests/custom-users-roles/run index 92dc628aaf..330e2d5c0e 100755 --- a/e2e-tests/custom-users-roles/run +++ b/e2e-tests/custom-users-roles/run @@ -27,9 +27,9 @@ cluster="some-name-rs0" create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${test_dir}/conf/app-user-secrets.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $test_dir/conf/app-user-secrets.yml +apply_client $conf_dir/client.yml mongoUri="userAdmin:userAdmin123456@$cluster.$namespace" @@ -57,7 +57,7 @@ kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ "key": "userTwoPassKey" }, "roles": [ - {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } diff --git a/e2e-tests/data-at-rest-encryption/run b/e2e-tests/data-at-rest-encryption/run index f2417301e9..ce659d5e8e 100755 --- a/e2e-tests/data-at-rest-encryption/run +++ b/e2e-tests/data-at-rest-encryption/run @@ -13,7 +13,8 @@ deploy_minio apply_s3_storage_secrets desc 'create secrets and start client' -kubectl_bin apply -f "$conf_dir/secrets.yml" -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster='some-name' desc "create PSMDB cluster $cluster" diff --git a/e2e-tests/demand-backup-eks-credentials/run b/e2e-tests/demand-backup-eks-credentials/run index 96ffed793e..4a59214f9c 100755 --- a/e2e-tests/demand-backup-eks-credentials/run +++ b/e2e-tests/demand-backup-eks-credentials/run @@ -14,9 +14,8 @@ fi create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/demand-backup-physical-sharded/run b/e2e-tests/demand-backup-physical-sharded/run index 5d218728b3..f621feb6be 100755 --- a/e2e-tests/demand-backup-physical-sharded/run +++ b/e2e-tests/demand-backup-physical-sharded/run @@ -65,11 +65,13 @@ apply_s3_storage_secrets ### Case 1: Backup and restore on sharded cluster desc 'Testing on sharded cluster' +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client_with_tls.yml + echo "Creating PSMDB cluster" cluster="some-name" -kubectl_bin apply -f "${conf_dir}/secrets.yml" -apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" -kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" +apply_cluster $test_dir/conf/$cluster-sharded.yml echo "check if all pods started" wait_for_running ${cluster}-rs0 3 diff --git a/e2e-tests/demand-backup-physical/run b/e2e-tests/demand-backup-physical/run index fb7a450def..5fefe78f6e 100755 --- a/e2e-tests/demand-backup-physical/run +++ b/e2e-tests/demand-backup-physical/run @@ -61,11 +61,13 @@ apply_s3_storage_secrets desc 'Testing on not sharded cluster' +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client_with_tls.yml + echo "Creating PSMDB cluster" cluster="some-name" -kubectl_bin apply -f "${conf_dir}/secrets.yml" -apply_cluster "${test_dir}/conf/${cluster}.yml" -kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" +apply_cluster $test_dir/conf/$cluster.yml echo "check if all pods started" wait_for_running ${cluster}-rs0 3 diff --git a/e2e-tests/demand-backup-sharded/run b/e2e-tests/demand-backup-sharded/run index 94456ba08a..fc298ed598 100755 --- a/e2e-tests/demand-backup-sharded/run +++ b/e2e-tests/demand-backup-sharded/run @@ -19,11 +19,9 @@ create_infra "$namespace" deploy_minio -desc 'create first PSMDB cluster' -cluster="some-name" -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -34,6 +32,8 @@ else kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi +desc 'create first PSMDB cluster' +cluster="some-name" apply_cluster "$test_dir/conf/$cluster-rs0.yml" desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 diff --git a/e2e-tests/demand-backup/run b/e2e-tests/demand-backup/run index 4c8e810e89..c616616b7e 100755 --- a/e2e-tests/demand-backup/run +++ b/e2e-tests/demand-backup/run @@ -116,11 +116,11 @@ create_infra $namespace deploy_minio -desc 'create secrets and start client' cluster="some-name-rs0" -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets diff --git a/e2e-tests/expose-sharded/run b/e2e-tests/expose-sharded/run index 90c1bc903f..81d258ba83 100755 --- a/e2e-tests/expose-sharded/run +++ b/e2e-tests/expose-sharded/run @@ -91,13 +91,11 @@ function expose_cluster() { function main() { create_infra "$namespace" - desc 'create first PSMDB cluster' - cluster="some-name" + desc 'create secrets and start client' kubectl_bin apply -f $conf_dir/secrets.yml apply_client $conf_dir/client.yml - apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then cat "$conf_dir/container-rc.yaml" | $sed 's/docker/runc/g' | kubectl_bin apply -f - @@ -107,6 +105,8 @@ function main() { kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi + desc 'create first PSMDB cluster' + cluster="some-name" apply_cluster "$test_dir/conf/$cluster-rs0.yml" desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 diff --git a/e2e-tests/finalizer/run b/e2e-tests/finalizer/run index fb43999a55..ad8bfe5b6e 100755 --- a/e2e-tests/finalizer/run +++ b/e2e-tests/finalizer/run @@ -7,13 +7,12 @@ test_dir=$(realpath "$(dirname "$0")") . "${test_dir}/../functions" create_infra "$namespace" -cluster="some-name" desc 'create secrets and start client' kubectl_bin apply -f $conf_dir/secrets_with_tls.yml apply_client $conf_dir/client.yml - +cluster="some-name" apply_cluster "$test_dir/conf/$cluster.yml" desc 'check if all 3 Pods started' wait_for_running "$cluster-rs0" 3 diff --git a/e2e-tests/ignore-labels-annotations/run b/e2e-tests/ignore-labels-annotations/run index 9c6666107a..a3380e9b56 100755 --- a/e2e-tests/ignore-labels-annotations/run +++ b/e2e-tests/ignore-labels-annotations/run @@ -45,16 +45,16 @@ check_service() { # `notIgnoredLabel` and `notIgnoredAnnotation` should be deleted kubectl_bin patch "service/$svc_name" --type=json --patch '[ { - "op": "add", - "path": "/metadata/labels", + "op": "add", + "path": "/metadata/labels", "value": { "notIgnoredLabel": "true", "ignoredLabel": "true" } }, { - "op": "add", - "path": "/metadata/annotations", + "op": "add", + "path": "/metadata/annotations", "value": { "notIgnoredAnnotation": "true", "ignoredAnnotation": "true" @@ -85,15 +85,15 @@ check_service() { desc "adding labels and annotations to $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "crLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "crAnnotation": "true", } @@ -105,11 +105,11 @@ check_service() { desc "removing labels and annotations from $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "remove", + "op": "remove", "path": "'$expose_path'/labels" }, { - "op": "remove", + "op": "remove", "path": "'$expose_path'/annotations" }]' sleep 5 @@ -119,16 +119,16 @@ check_service() { desc "adding other labels and annotations to $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "otherCrLabel": "true", "secondCrLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "otherCrAnnotation": "true", "secondCrAnnotation": "true", @@ -141,15 +141,15 @@ check_service() { desc "adding removing one labels from $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "otherCrLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "otherCrAnnotation": "true", } @@ -162,11 +162,11 @@ check_service() { # When `labels` and `annotations` are not set, old metadata should stay kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "remove", + "op": "remove", "path": "/spec/ignoreAnnotations", }, { - "op": "remove", + "op": "remove", "path": "/spec/ignoreLabels", }]' diff --git a/e2e-tests/ldap-tls/run b/e2e-tests/ldap-tls/run index 77540937d7..3b5df7d723 100755 --- a/e2e-tests/ldap-tls/run +++ b/e2e-tests/ldap-tls/run @@ -151,7 +151,6 @@ main() { deploy_openldap desc 'create secrets and start client' - cluster="some-name" kubectl_bin apply -f $conf_dir/secrets.yml apply_client $conf_dir/client.yml diff --git a/e2e-tests/ldap/run b/e2e-tests/ldap/run index 9f14737740..c6732ba3c0 100755 --- a/e2e-tests/ldap/run +++ b/e2e-tests/ldap/run @@ -147,7 +147,6 @@ main() { deploy_openldap desc 'create secrets and start client' - cluster="some-name" kubectl_bin apply -f $conf_dir/secrets.yml apply_client $conf_dir/client.yml diff --git a/e2e-tests/mongod-major-upgrade-sharded/run b/e2e-tests/mongod-major-upgrade-sharded/run index e4378d70c6..5a9773d21f 100755 --- a/e2e-tests/mongod-major-upgrade-sharded/run +++ b/e2e-tests/mongod-major-upgrade-sharded/run @@ -17,8 +17,9 @@ function main() { apply_s3_storage_secrets - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'install version service' diff --git a/e2e-tests/mongod-major-upgrade/run b/e2e-tests/mongod-major-upgrade/run index 8cb58e23fc..e688b82791 100755 --- a/e2e-tests/mongod-major-upgrade/run +++ b/e2e-tests/mongod-major-upgrade/run @@ -16,8 +16,9 @@ function main() { create_infra "${namespace}" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'install version service' diff --git a/e2e-tests/multi-cluster-service/run b/e2e-tests/multi-cluster-service/run index f8af4f98f0..1a0c059927 100755 --- a/e2e-tests/multi-cluster-service/run +++ b/e2e-tests/multi-cluster-service/run @@ -78,9 +78,8 @@ gcloud container hub memberships register ${k8s_cluster_name} --gke-cluster ${k8 wait_mcs_api create_infra "$namespace" -desc 'create first PSMDB cluster' -cluster="some-name" +desc 'create secrets and start client' kubectl_bin apply -f $conf_dir/secrets.yml apply_client $conf_dir/client.yml @@ -93,7 +92,9 @@ else kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi -apply_cluster "$test_dir/conf/$cluster.yml" +desc 'create first PSMDB cluster' +cluster="some-name" +apply_cluster $test_dir/conf/$cluster.yml desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 wait_for_running $cluster-cfg 3 "false" diff --git a/e2e-tests/non-voting/run b/e2e-tests/non-voting/run index 6b6bf33e5f..9ee7f41b8c 100755 --- a/e2e-tests/non-voting/run +++ b/e2e-tests/non-voting/run @@ -42,7 +42,6 @@ main() { kubectl_bin apply -f $conf_dir/secrets.yml apply_client $conf_dir/client.yml - desc "check non-voting members" spinup_psmdb "$cluster" "$test_dir/conf/$cluster.yml" diff --git a/e2e-tests/operator-self-healing-chaos/run b/e2e-tests/operator-self-healing-chaos/run index ddeb9d8c76..6d22fe2b87 100755 --- a/e2e-tests/operator-self-healing-chaos/run +++ b/e2e-tests/operator-self-healing-chaos/run @@ -9,9 +9,8 @@ set_debug cluster="some-name-rs0" setup_cluster() { - desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/secrets.yml + desc 'create secrets' + kubectl_bin apply -f $conf_dir/secrets.yml desc "create first PSMDB cluster $cluster" apply_cluster $conf_dir/$cluster.yml diff --git a/e2e-tests/pvc-resize/run b/e2e-tests/pvc-resize/run index 7689e35f48..a454be8268 100755 --- a/e2e-tests/pvc-resize/run +++ b/e2e-tests/pvc-resize/run @@ -115,10 +115,9 @@ fi create_infra "${namespace}" -desc 'create secrets and psmdb client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml desc 'create PSMDB cluster' cluster="some-name" diff --git a/e2e-tests/recover-no-primary/run b/e2e-tests/recover-no-primary/run index 2ca5bff53b..9ba0a3f2a7 100755 --- a/e2e-tests/recover-no-primary/run +++ b/e2e-tests/recover-no-primary/run @@ -6,12 +6,13 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/../functions -create_infra ${namespace} +create_infra $namespace + +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client.yml cluster="some-name" -kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/client.yml function test_single_replset() { apply_cluster ${test_dir}/conf/${cluster}.yml diff --git a/e2e-tests/replset-overrides/run b/e2e-tests/replset-overrides/run index 29dc6dbce0..b226e70b81 100755 --- a/e2e-tests/replset-overrides/run +++ b/e2e-tests/replset-overrides/run @@ -26,9 +26,9 @@ run_recovery_check() { } test_override_after_deploy() { - kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/minio-secret.yml + # kubectl_bin apply \ + # -f ${conf_dir}/secrets_with_tls.yml \ + # -f ${conf_dir}/minio-secret.yml echo "creating PSMDB cluster: ${cluster}" apply_cluster ${test_dir}/conf/${cluster}.yml @@ -66,10 +66,6 @@ test_override_after_deploy() { } test_deploy_with_overrides() { - kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/minio-secret.yml - echo "creating external services" kubectl_bin apply -f ${test_dir}/conf/external-services.yml @@ -96,7 +92,9 @@ test_deploy_with_overrides() { } main() { - create_infra ${namespace} + create_infra $namespace + kubectl_bin apply -f $conf_dir/secrets_with_tls.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml apply_client $conf_dir/client.yml deploy_minio diff --git a/e2e-tests/scheduled-backup/run b/e2e-tests/scheduled-backup/run index 8cb5a463c8..a986427c85 100755 --- a/e2e-tests/scheduled-backup/run +++ b/e2e-tests/scheduled-backup/run @@ -47,9 +47,8 @@ cat - <<-EOF | kubectl apply -f - EOF desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets diff --git a/e2e-tests/serviceless-external-nodes/run b/e2e-tests/serviceless-external-nodes/run index b7a1272737..5345933bc8 100755 --- a/e2e-tests/serviceless-external-nodes/run +++ b/e2e-tests/serviceless-external-nodes/run @@ -14,9 +14,9 @@ unset OPERATOR_NS desc "Create main cluster" create_infra "$namespace" -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" + +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml apply_cluster "$test_dir/conf/main.yml" wait_for_running "$cluster-rs0" 1 @@ -33,13 +33,14 @@ kubectl_bin config set-context $(kubectl_bin config current-context) --namespace create_namespace $replica_namespace 0 deploy_operator -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" apply_cluster "$test_dir/conf/external.yml" -wait_pod ${cluster}-rs0-0 -wait_pod ${cluster}-rs0-1 +desc 'create secrets and start client' +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml + +wait_pod $cluster-rs0-0 +wait_pod $cluster-rs0-1 secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length') if [[ $secrets_count != 6 ]]; then diff --git a/e2e-tests/smart-update/run b/e2e-tests/smart-update/run index 015a4d0b19..e933d296f5 100755 --- a/e2e-tests/smart-update/run +++ b/e2e-tests/smart-update/run @@ -35,7 +35,8 @@ cluster="smart-update" create_infra ${namespace} desc 'create secrets and start client' -kubectl_bin apply -f ${conf_dir}/secrets.yml -f ${conf_dir}/client.yml +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml IMAGE_MONGOD_TO_UPDATE=${IMAGE_MONGOD} if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then diff --git a/e2e-tests/split-horizon/run b/e2e-tests/split-horizon/run index 20a903c1a2..1abf0eb3b8 100755 --- a/e2e-tests/split-horizon/run +++ b/e2e-tests/split-horizon/run @@ -25,13 +25,13 @@ configure_client_hostAliases() { wait_pod $(kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}') } -create_infra ${namespace} +create_infra $namespace -cluster="some-name" -kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/client_with_tls.yml +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client_with_tls.yml +cluster="some-name" apply_cluster ${test_dir}/conf/${cluster}-3horizons.yml wait_for_running "${cluster}-rs0" 3 wait_cluster_consistency ${cluster} diff --git a/e2e-tests/upgrade-consistency/run b/e2e-tests/upgrade-consistency/run index fcd87a6fb6..ea23680626 100755 --- a/e2e-tests/upgrade-consistency/run +++ b/e2e-tests/upgrade-consistency/run @@ -12,8 +12,8 @@ main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster 1.16.2 $CLUSTER" apply_cluster "$test_dir/conf/${CLUSTER}-rs0.yml" diff --git a/e2e-tests/upgrade-sharded/run b/e2e-tests/upgrade-sharded/run index 08db6b2323..30d5371855 100755 --- a/e2e-tests/upgrade-sharded/run +++ b/e2e-tests/upgrade-sharded/run @@ -167,9 +167,9 @@ function main() { deploy_minio desc 'create secrets and start client' - curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" >"${tmp_dir}/secrets.yaml" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${tmp_dir}/secrets.yaml" + curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" > $tmp_dir/secrets.yaml + kubectl_bin apply -f $tmp_dir/secrets.yaml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" local cr_yaml="${tmp_dir}/cr_${GIT_TAG}.yaml" diff --git a/e2e-tests/upgrade/run b/e2e-tests/upgrade/run index 78df239e26..ba4ec4df28 100755 --- a/e2e-tests/upgrade/run +++ b/e2e-tests/upgrade/run @@ -146,9 +146,9 @@ function main() { deploy_minio desc 'create secrets and start client' - curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" >"${tmp_dir}/secrets.yaml" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${tmp_dir}/secrets.yaml" + curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" > $tmp_dir/secrets.yaml + kubectl_bin apply -f $tmp_dir/secrets.yaml + apply_client $conf_dir/client.yml local cr_yaml="${tmp_dir}/cr_${GIT_TAG}.yaml" prepare_cr_yaml "${cr_yaml}" diff --git a/e2e-tests/users/run b/e2e-tests/users/run index 3be2f8f980..92d0124903 100755 --- a/e2e-tests/users/run +++ b/e2e-tests/users/run @@ -15,7 +15,7 @@ deploy_minio desc 'create secrets and start client' kubectl_bin apply -f $conf_dir/secrets.yml -kubectl_bin apply -f ${conf_dir}/minio-secret.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml apply_client $conf_dir/client.yml cluster="some-name-rs0" diff --git a/e2e-tests/version-service/run b/e2e-tests/version-service/run index 08eeabc3d7..7e879888cf 100755 --- a/e2e-tests/version-service/run +++ b/e2e-tests/version-service/run @@ -12,11 +12,11 @@ function check_telemetry_transfer() { local cr_vs_channel=${2:-"disabled"} local telemetry_state=${3:-"enabled"} - cluster="minimal-cluster" desc 'create secrets and start client' apply_client $conf_dir/client.yml - yq eval '.metadata.name = "'${cluster}'"' $conf_dir/secrets.yml | kubectl_bin apply -f - + yq eval '.metadata.name = "'$cluster'"' $conf_dir/secrets.yml | kubectl_bin apply -f - + cluster="minimal-cluster" desc "create PSMDB minimal cluster $cluster" yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "'${cr_vs_uri}'" | @@ -151,6 +151,7 @@ for i in "${!cases[@]}"; do cluster="${cases[$i]}" expected_image="${expected_images[$i]}" + desc 'create secrets and start client' kubectl_bin apply -f $conf_dir/secrets.yml apply_client $conf_dir/client.yml From b1a00f2b4eba732465791204773854f412824852 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Wed, 20 Nov 2024 12:56:52 +0200 Subject: [PATCH 11/17] add tolerations for arm64 architecture in run_simple_cli_inside_image function --- e2e-tests/functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e-tests/functions b/e2e-tests/functions index 86a5af3e3b..d49e22fd8d 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -1422,7 +1422,7 @@ function run_simple_cli_inside_image() { local cli=${2} local pod_name=${RANDOM} - kubectl_bin -n default run ${pod_name} --image=${image} --restart=Never --command -- sleep infinity >/dev/null + kubectl_bin -n default run ${pod_name} --image=${image} --restart=Never --overrides='{"apiVersion": "v1","spec": {"tolerations": [{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' --command -- sleep infinity >/dev/null kubectl_bin -n default wait --for=condition=Ready pod/${pod_name} >/dev/null local output=$(kubectl_bin -n default exec ${pod_name} -- ${cli}) kubectl_bin -n default delete pod/${pod_name} --grace-period=0 --force >/dev/null From 908eb0f5dd59e36c65f20ee7d504bc163a40133e Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Wed, 20 Nov 2024 20:18:13 +0200 Subject: [PATCH 12/17] add tolerations for arm64 architecture in deploy_operator_gh and clean up apply_cluster logic --- e2e-tests/functions | 20 ++++---------------- e2e-tests/upgrade-sharded/run | 2 +- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/e2e-tests/functions b/e2e-tests/functions index d49e22fd8d..959cad328f 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -438,6 +438,7 @@ deploy_operator_gh() { curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/${operator_yaml}.yaml" >"${tmp_dir}/${operator_yaml}_${git_tag}.yaml" $sed -i -e "s^image: .*^image: ${IMAGE}^" "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" + yq eval -i '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" kubectl_bin apply -f "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" sleep 2 @@ -1213,16 +1214,10 @@ apply_client() { } apply_cluster() { - if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - cat_config "$1" \ - | kubectl_bin apply -f - + if [[ "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]]; then + cat_config $1 | yq eval 'del(.spec.backup.tasks.[1]) | del(.spec.backup.tasks.[1]) | del(.spec.backup.tasks.[1])' - | kubectl_bin apply -f - else - cat_config "$1" \ - | yq eval ' - del(.spec.backup.tasks.[1]) | - del(.spec.backup.tasks.[1]) | - del(.spec.backup.tasks.[1])' - \ - | kubectl_bin apply -f - + cat_config $1 | kubectl_bin apply -f - fi } @@ -1573,13 +1568,6 @@ deploy_cmctl() { $sed -e "s/percona-server-mongodb-operator/cmctl/g" "${src_dir}/deploy/rbac.yaml" \ | yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' > $temp_cr - if [[ "$ARCH" == "arm64" ]]; then - yq eval '(.spec.template.spec.tolerations // []) += [{"key": "node.kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]' $temp_cr | - kubectl_bin apply -f - - else - kubectl_bin apply -f $temp_cr - fi - kubectl_bin apply -f "$conf_dir/cmctl.yml" } diff --git a/e2e-tests/upgrade-sharded/run b/e2e-tests/upgrade-sharded/run index 30d5371855..c3620d4bb1 100755 --- a/e2e-tests/upgrade-sharded/run +++ b/e2e-tests/upgrade-sharded/run @@ -161,7 +161,7 @@ function main() { if [ -n "$OPERATOR_NS" ]; then rbac="cw-rbac" fi - create_infra_gh "${namespace}" "${GIT_TAG}" + create_infra_gh $namespace $GIT_TAG deploy_cert_manager apply_s3_storage_secrets deploy_minio From 4c0c72d713e12cf4b9c1c184c1e2a53f32e92f9f Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Wed, 20 Nov 2024 20:19:57 +0200 Subject: [PATCH 13/17] add initial run-release-arm64 configuration with various options --- e2e-tests/run-release-arm64.csv | 44 +++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 e2e-tests/run-release-arm64.csv diff --git a/e2e-tests/run-release-arm64.csv b/e2e-tests/run-release-arm64.csv new file mode 100644 index 0000000000..8ac018e76c --- /dev/null +++ b/e2e-tests/run-release-arm64.csv @@ -0,0 +1,44 @@ +arbiter +balancer +custom-replset-name +custom-tls +custom-users-roles +custom-users-roles-sharded +cross-site-sharded +data-at-rest-encryption +data-sharded +default-cr +demand-backup +demand-backup-eks-credentials +demand-backup-physical +demand-backup-physical-sharded +demand-backup-sharded +expose-sharded +ignore-labels-annotations +init-deploy +finalizer +ldap +ldap-tls +limits +liveness +mongod-major-upgrade +mongod-major-upgrade-sharded +multi-cluster-service +non-voting +one-pod +operator-self-healing-chaos +pitr +pitr-sharded +pitr-physical +pvc-resize +recover-no-primary +replset-overrides +scaling +security-context +service-per-pod +serviceless-external-nodes +split-horizon +tls-issue-cert-manager +upgrade +upgrade-consistency +users From 43c416ba6bab0631d22919d8ac548495751153f7 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Thu, 21 Nov 2024 21:22:45 +0200 Subject: [PATCH 14/17] amazon/aws-cli instead of perconalab/awscli --- e2e-tests/data-at-rest-encryption/run | 5 +-- e2e-tests/demand-backup-sharded/run | 10 ++---- e2e-tests/demand-backup/run | 15 ++------- e2e-tests/functions | 29 ++++++++++------- e2e-tests/run-release-arm64.csv | 2 ++ e2e-tests/scheduled-backup/run | 45 +++++++++++++-------------- e2e-tests/storage/run | 8 ++++- e2e-tests/upgrade-sharded/run | 6 +--- e2e-tests/upgrade/run | 6 +--- 9 files changed, 56 insertions(+), 70 deletions(-) diff --git a/e2e-tests/data-at-rest-encryption/run b/e2e-tests/data-at-rest-encryption/run index e1f7ab1609..54901f7c06 100755 --- a/e2e-tests/data-at-rest-encryption/run +++ b/e2e-tests/data-at-rest-encryption/run @@ -58,10 +58,7 @@ sleep 5 desc "check backup and restore -- minio" backup_dest_minio=$(get_backup_dest "$backup_name_minio") -retry 3 8 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz +retry 3 8 aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongos 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster-mongos.$namespace" compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" "-2nd" run_restore "$backup_name_minio" diff --git a/e2e-tests/demand-backup-sharded/run b/e2e-tests/demand-backup-sharded/run index fc298ed598..f01802e830 100755 --- a/e2e-tests/demand-backup-sharded/run +++ b/e2e-tests/demand-backup-sharded/run @@ -146,10 +146,7 @@ fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls "s3://${backup_dest_minio}/rs0/" \ - | grep "myApp.test.gz" +aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" insert_data_mongos "100501" "myApp" insert_data_mongos "100501" "myApp1" insert_data_mongos "100501" "myApp2" @@ -161,10 +158,7 @@ check_data desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio}_ | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio}_ | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 diff --git a/e2e-tests/demand-backup/run b/e2e-tests/demand-backup/run index c616616b7e..0b6e13201b 100755 --- a/e2e-tests/demand-backup/run +++ b/e2e-tests/demand-backup/run @@ -215,10 +215,7 @@ fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz +aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_recovery_check "$backup_name_minio" "$cluster" run_mongo \ @@ -250,10 +247,7 @@ run_recovery_check_bkp_source "$backup_name_minio" "$backup_dest_minio" "$cluste desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio} | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio} | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 @@ -285,10 +279,7 @@ sleep 60 desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio} | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio} | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 diff --git a/e2e-tests/functions b/e2e-tests/functions index f58630e555..71eaa69603 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -30,15 +30,6 @@ logs_dir=$(realpath $test_dir/../logs || :) archs=$(kubectl get nodes -o jsonpath='{range .items[*]}{.status.nodeInfo.architecture}{" "}{end}') -# if [[ "$ARCH" == "arm64" ]]; then -# for arch in $archs; do -# if [[ "$arch" != "arm64" ]]; then -# echo "All nodes in the cluster must be arm64, because ARCH=$ARCH !" -# exit 1 -# fi -# done -# fi - first_arch=$(echo $archs | awk '{print $1}') if [[ "$first_arch" == "amd64" || "$first_arch" == "arm64" ]]; then @@ -542,9 +533,23 @@ deploy_vault() { --set injector.agentImage.repository="docker.io/hashicorp/vault" \ --set server.image.repository="docker.io/hashicorp/vault" else - retry 10 60 helm install $name hashicorp/vault \ - --disable-openapi-validation \ - --set dataStorage.enabled=false + if [[ "$ARCH" == "arm64" ]]; then + helm install vault-service hashicorp/vault \ + --disable-openapi-validation \ + --set dataStorage.enabled=false \ + --set server.tolerations[0].key=kubernetes.io/arch \ + --set server.tolerations[0].operator=Equal \ + --set server.tolerations[0].value=arm64 \ + --set server.tolerations[0].effect=NoSchedule \ + --set injector.tolerations[0].key=kubernetes.io/arch \ + --set injector.tolerations[0].operator=Equal \ + --set injector.tolerations[0].value=arm64 \ + --set injector.tolerations[0].effect=NoSchedule + else + retry 10 60 helm install $name hashicorp/vault \ + --disable-openapi-validation \ + --set dataStorage.enabled=false + fi fi until kubectl_bin get pod/vault-service-0 -o jsonpath='{.status.phase}' 2>/dev/null | grep 'Running'; do diff --git a/e2e-tests/run-release-arm64.csv b/e2e-tests/run-release-arm64.csv index 8ac018e76c..cf92dc1748 100644 --- a/e2e-tests/run-release-arm64.csv +++ b/e2e-tests/run-release-arm64.csv @@ -34,10 +34,12 @@ pvc-resize recover-no-primary replset-overrides scaling +scheduled-backup security-context service-per-pod serviceless-external-nodes split-horizon +storage tls-issue-cert-manager upgrade upgrade-consistency diff --git a/e2e-tests/scheduled-backup/run b/e2e-tests/scheduled-backup/run index a986427c85..aa275afaba 100755 --- a/e2e-tests/scheduled-backup/run +++ b/e2e-tests/scheduled-backup/run @@ -87,16 +87,7 @@ sleep 55 desc 'disable backups schedule' apply_cluster "$test_dir/conf/$cluster.yml" -if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - backup_name_aws=$(kubectl_bin get psmdb-backup | grep aws-s3 | awk '{print$1}' | head -1) - backup_name_gcp=$(kubectl_bin get psmdb-backup | grep gcp-cs | awk '{print$1}' | head -1) - backup_name_azure=$(kubectl_bin get psmdb-backup | grep azure-blob | awk '{print$1}' | head -1) - wait_backup "$backup_name_aws" - wait_backup "$backup_name_gcp" - wait_backup "$backup_name_azure" -fi - -backup_name_minio=$(kubectl_bin get psmdb-backup | grep minio | awk '{print$1}' | head -1) +backup_name_minio=$(kubectl_bin get psmdb-backup | grep minio | awk '{print $1}' | head -1) wait_backup "$backup_name_minio" sleep 5 @@ -105,24 +96,32 @@ echo -n "checking backup count for every-min-minio..." check_backup_count every-min-minio 1 echo "OK" -echo -n "checking backup count for every-min-aws-s3..." -check_backup_count every-min-aws-s3 1 -echo "OK" +if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + backup_name_aws=$(kubectl_bin get psmdb-backup | grep aws-s3 | awk '{print$1}' | head -1) + backup_name_gcp=$(kubectl_bin get psmdb-backup | grep gcp-cs | awk '{print$1}' | head -1) + backup_name_azure=$(kubectl_bin get psmdb-backup | grep azure-blob | awk '{print$1}' | head -1) + wait_backup "$backup_name_aws" + wait_backup "$backup_name_gcp" + wait_backup "$backup_name_azure" -echo -n "checking backup count for every-min-gcp-cs..." -check_backup_count every-min-gcp-cs 1 -echo "OK" + echo -n "checking backup count for every-min-aws-s3..." + check_backup_count every-min-aws-s3 1 + echo "OK" -echo -n "checking backup count for every-min-azure-blob..." -check_backup_count every-min-azure-blob 1 -echo "OK" + echo -n "checking backup count for every-min-gcp-cs..." + check_backup_count every-min-gcp-cs 1 + echo "OK" + + echo -n "checking backup count for every-min-azure-blob..." + check_backup_count every-min-azure-blob 1 + echo "OK" +fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep "myApp.test.gz" + +aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-0.$cluster.$namespace" "-2nd" compare_mongo_cmd "find" "myApp:myPass@$cluster-1.$cluster.$namespace" "-2nd" diff --git a/e2e-tests/storage/run b/e2e-tests/storage/run index 6ba8ac2fa6..17f52620d3 100755 --- a/e2e-tests/storage/run +++ b/e2e-tests/storage/run @@ -48,12 +48,18 @@ main() { desc 'create secrets and start client' kubectl_bin apply -f $conf_dir/secrets.yml - kubectl_bin apply -f $test_dir/conf/hostpath-helper.yml apply_client $conf_dir/client.yml desc 'check emptydir' check_cr_config "emptydir-rs0" + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' \ + $test_dir/conf/hostpath-helper.yml | kubectl_bin apply -f - + else + kubectl_bin apply -f $test_dir/conf/hostpath-helper.yml + fi + desc 'check hostpath' check_cr_config "hostpath-rs0" diff --git a/e2e-tests/upgrade-sharded/run b/e2e-tests/upgrade-sharded/run index c3620d4bb1..a8b4cc5b24 100755 --- a/e2e-tests/upgrade-sharded/run +++ b/e2e-tests/upgrade-sharded/run @@ -263,11 +263,7 @@ function main() { run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" backup_dest_minio=$(get_backup_dest "$backup_name_minio") - retry 3 5 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls "s3://${backup_dest_minio}/rs0/" \ - | grep myApp.test.gz - + retry 3 5 aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongos 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@${cluster}-mongos.${namespace}" compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" "-2nd" ".svc.cluster.local" "myApp" "test" run_restore "$backup_name_minio" diff --git a/e2e-tests/upgrade/run b/e2e-tests/upgrade/run index ba4ec4df28..a4589aa121 100755 --- a/e2e-tests/upgrade/run +++ b/e2e-tests/upgrade/run @@ -219,11 +219,7 @@ function main() { run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" backup_dest_minio=$(get_backup_dest "$backup_name_minio") - kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz - + aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@${cluster}-rs0.${namespace}" compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0.$namespace" "-2nd" ".svc.cluster.local" "myApp" "test" run_restore "$backup_name_minio" From f9282dd6e81185eb1a939257c79ad9e2cb7958b0 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 25 Nov 2024 10:56:05 +0200 Subject: [PATCH 15/17] add support for arm64 architecture in various e2e tests and enhance backup checks --- cw-rbac.yaml | 171 +++++++++++++++++++ cw-rbac.yaml.1 | 171 +++++++++++++++++++ cw-rbac.yaml.2 | 171 +++++++++++++++++++ e2e-tests/cross-site-sharded/run | 3 +- e2e-tests/data-sharded/run | 10 +- e2e-tests/default-cr/run | 43 ++++- e2e-tests/demand-backup-physical-sharded/run | 2 +- e2e-tests/demand-backup-sharded/run | 13 +- e2e-tests/functions | 36 ++-- e2e-tests/ldap/run | 6 +- e2e-tests/rs-shard-migration/run | 41 ++++- e2e-tests/run-release-arm64.csv | 2 + e2e-tests/scheduled-backup/run | 11 +- e2e-tests/self-healing-chaos/run | 7 +- 14 files changed, 653 insertions(+), 34 deletions(-) create mode 100644 cw-rbac.yaml create mode 100644 cw-rbac.yaml.1 create mode 100644 cw-rbac.yaml.2 diff --git a/cw-rbac.yaml b/cw-rbac.yaml new file mode 100644 index 0000000000..b874b3a7a3 --- /dev/null +++ b/cw-rbac.yaml @@ -0,0 +1,171 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: percona-server-mongodb-operator +rules: +- apiGroups: + - psmdb.percona.com + resources: + - perconaservermongodbs + - perconaservermongodbs/status + - perconaservermongodbs/finalizers + - perconaservermongodbbackups + - perconaservermongodbbackups/status + - perconaservermongodbbackups/finalizers + - perconaservermongodbrestores + - perconaservermongodbrestores/status + - perconaservermongodbrestores/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/exec + - services + - persistentvolumeclaims + - secrets + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - apps + resources: + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - events.k8s.io + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - patch +- apiGroups: + - certmanager.k8s.io + - cert-manager.io + resources: + - issuers + - certificates + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +- apiGroups: + - net.gke.io + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: percona-server-mongodb-operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: service-account-percona-server-mongodb-operator +subjects: +- kind: ServiceAccount + name: percona-server-mongodb-operator + namespace: "psmdb-operator" +roleRef: + kind: ClusterRole + name: percona-server-mongodb-operator + apiGroup: rbac.authorization.k8s.io diff --git a/cw-rbac.yaml.1 b/cw-rbac.yaml.1 new file mode 100644 index 0000000000..b874b3a7a3 --- /dev/null +++ b/cw-rbac.yaml.1 @@ -0,0 +1,171 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: percona-server-mongodb-operator +rules: +- apiGroups: + - psmdb.percona.com + resources: + - perconaservermongodbs + - perconaservermongodbs/status + - perconaservermongodbs/finalizers + - perconaservermongodbbackups + - perconaservermongodbbackups/status + - perconaservermongodbbackups/finalizers + - perconaservermongodbrestores + - perconaservermongodbrestores/status + - perconaservermongodbrestores/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/exec + - services + - persistentvolumeclaims + - secrets + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - apps + resources: + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - events.k8s.io + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - patch +- apiGroups: + - certmanager.k8s.io + - cert-manager.io + resources: + - issuers + - certificates + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +- apiGroups: + - net.gke.io + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: percona-server-mongodb-operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: service-account-percona-server-mongodb-operator +subjects: +- kind: ServiceAccount + name: percona-server-mongodb-operator + namespace: "psmdb-operator" +roleRef: + kind: ClusterRole + name: percona-server-mongodb-operator + apiGroup: rbac.authorization.k8s.io diff --git a/cw-rbac.yaml.2 b/cw-rbac.yaml.2 new file mode 100644 index 0000000000..b874b3a7a3 --- /dev/null +++ b/cw-rbac.yaml.2 @@ -0,0 +1,171 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: percona-server-mongodb-operator +rules: +- apiGroups: + - psmdb.percona.com + resources: + - perconaservermongodbs + - perconaservermongodbs/status + - perconaservermongodbs/finalizers + - perconaservermongodbbackups + - perconaservermongodbbackups/status + - perconaservermongodbbackups/finalizers + - perconaservermongodbrestores + - perconaservermongodbrestores/status + - perconaservermongodbrestores/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/exec + - services + - persistentvolumeclaims + - secrets + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - apps + resources: + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - events.k8s.io + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - patch +- apiGroups: + - certmanager.k8s.io + - cert-manager.io + resources: + - issuers + - certificates + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +- apiGroups: + - net.gke.io + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: percona-server-mongodb-operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: service-account-percona-server-mongodb-operator +subjects: +- kind: ServiceAccount + name: percona-server-mongodb-operator + namespace: "psmdb-operator" +roleRef: + kind: ClusterRole + name: percona-server-mongodb-operator + apiGroup: rbac.authorization.k8s.io diff --git a/e2e-tests/cross-site-sharded/run b/e2e-tests/cross-site-sharded/run index f91288325e..0d278749a2 100755 --- a/e2e-tests/cross-site-sharded/run +++ b/e2e-tests/cross-site-sharded/run @@ -109,8 +109,7 @@ create_namespace $replica_namespace 0 deploy_operator desc 'start client' -kubectl_bin apply \ - -f "$conf_dir/client.yml" +apply_client $conf_dir/client.yml desc "copy secrets from main to replica namespace and create all of them" kubectl get secret ${main_cluster}-secrets -o yaml -n ${namespace} \ diff --git a/e2e-tests/data-sharded/run b/e2e-tests/data-sharded/run index 07e40d9ac6..1815c38aea 100755 --- a/e2e-tests/data-sharded/run +++ b/e2e-tests/data-sharded/run @@ -29,11 +29,11 @@ check_rs_proper_component_deletion() { } main() { - if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then - MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*([0-9].[0-9])$/\1/') - else - MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') - fi + # if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then + # MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*([0-9].[0-9])$/\1/') + # else + # MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') + # fi create_infra "$namespace" deploy_cert_manager diff --git a/e2e-tests/default-cr/run b/e2e-tests/default-cr/run index 53aa97c4aa..e5f35d721b 100755 --- a/e2e-tests/default-cr/run +++ b/e2e-tests/default-cr/run @@ -52,22 +52,42 @@ function main() { desc "create first PSMDB cluster $cluster" kubectl_bin apply ${OPERATOR_NS:+-n $OPERATOR_NS} --server-side --force-conflicts -f $deploy_dir/crd.yaml + + + local temp_operator_yaml="$(mktemp)" + if [ -n "$OPERATOR_NS" ]; then apply_rbac cw-rbac kubectl_bin apply -n ${OPERATOR_NS} -f $deploy_dir/cw-operator.yaml else apply_rbac rbac - yq eval '((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true")' "$deploy_dir/operator.yaml" \ - | kubectl_bin apply -f - + yq eval '((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true")' "$deploy_dir/operator.yaml" > $temp_operator_yaml + + if [[ "$ARCH" == "arm64" ]]; then + yq eval -i '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' $temp_operator_yaml + fi + + kubectl_bin apply -f $temp_operator_yaml fi + local temp_cr="$(mktemp)" yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com" | .spec.replsets[].affinity.antiAffinityTopologyKey = "none" | .spec.replsets[].nonvoting.affinity.antiAffinityTopologyKey = "none" | .spec.replsets[].arbiter.affinity.antiAffinityTopologyKey = "none" | .spec.sharding.configsvrReplSet.affinity.antiAffinityTopologyKey = "none" | - .spec.sharding.mongos.affinity.antiAffinityTopologyKey = "none"' $deploy_dir/cr.yaml \ - | kubectl_bin apply -f - + .spec.sharding.mongos.affinity.antiAffinityTopologyKey = "none"' $deploy_dir/cr.yaml > $temp_cr + + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr | + kubectl_bin apply -f - + else + kubectl_bin apply -f $temp_cr + fi desc 'check if all 3 Pods started' wait_cluster_consistency $cluster 70 @@ -137,7 +157,20 @@ function main() { cluster="minimal-cluster" yq eval '.metadata.name = "'${cluster}'"' $deploy_dir/secrets.yaml | kubectl_bin apply -f - - yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com"' $deploy_dir/cr-minimal.yaml | kubectl_bin apply -f - + local temp_cr_minimal="$(mktemp)" + yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com"' $deploy_dir/cr-minimal.yaml > $temp_cr_minimal + + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr_minimal | + kubectl_bin apply -f - + else + kubectl_bin apply -f $temp_cr_minimal + fi + desc 'check if all Pods started' wait_cluster_consistency "${cluster}" diff --git a/e2e-tests/demand-backup-physical-sharded/run b/e2e-tests/demand-backup-physical-sharded/run index cf8c4a4c66..afb79a8716 100755 --- a/e2e-tests/demand-backup-physical-sharded/run +++ b/e2e-tests/demand-backup-physical-sharded/run @@ -70,8 +70,8 @@ desc 'create secrets and start client' kubectl_bin apply -f $conf_dir/secrets.yml apply_client $conf_dir/client_with_tls.yml -echo "Creating PSMDB cluster" cluster="some-name" +desc "create first PSMDB cluster $cluster" apply_cluster $test_dir/conf/$cluster-sharded.yml echo "check if all pods started" diff --git a/e2e-tests/demand-backup-sharded/run b/e2e-tests/demand-backup-sharded/run index f01802e830..756ad88e8f 100755 --- a/e2e-tests/demand-backup-sharded/run +++ b/e2e-tests/demand-backup-sharded/run @@ -146,7 +146,18 @@ fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" + +retry=0 +until aws_cli "s3 ls s3://$backup_dest_minio/rs0/" | grep "myApp.test.gz"; do + if [[ $retry -ge 10 ]]; then + echo "Max retry count $retry reached. File myApp.test.gz wasn't found on s3://$backup_dest_minio/rs0/" + exit 1 + fi + ((retry += 1)) + echo -n . + sleep 5 +done + insert_data_mongos "100501" "myApp" insert_data_mongos "100501" "myApp1" insert_data_mongos "100501" "myApp2" diff --git a/e2e-tests/functions b/e2e-tests/functions index 71eaa69603..f7faedf915 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -424,11 +424,11 @@ deploy_operator() { fi if [[ "$ARCH" == "arm64" ]]; then - yq eval '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' $temp_operator_yaml | kubectl_bin apply -f - - else - kubectl_bin apply -f $temp_operator_yaml + yq eval -i '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' $temp_operator_yaml fi + kubectl_bin apply -f $temp_operator_yaml + sleep 2 wait_pod $(get_operator_pod) } @@ -448,7 +448,11 @@ deploy_operator_gh() { curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/${operator_yaml}.yaml" >"${tmp_dir}/${operator_yaml}_${git_tag}.yaml" $sed -i -e "s^image: .*^image: ${IMAGE}^" "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" - yq eval -i '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" + + if [[ "$ARCH" == "arm64" ]]; then + yq eval -i '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" + fi + kubectl_bin apply -f "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" sleep 2 @@ -497,10 +501,6 @@ deploy_minio() { --set 'postJob.tolerations[0].effect=NoSchedule' \ minio/minio - # kubectl kustomize github.com/minio/operator?ref=v6.0.4 | yq eval ".metadata.namespace = \"${namespace}\"" - | kubectl -n ${namespace} apply -f - - # kubectl scale deployment minio-operator --replicas=1 - # kubectl patch deployment minio-operator -n ${namespace} --type='json' -p='[{"op": "add", "path": "/spec/template/spec/tolerations", "value": [{"key":"kubernetes.io/arch", "operator":"Equal", "value":"arm64", "effect":"NoSchedule"}]}]' - # MINIO_POD=$(kubectl_bin get pods --selector=name=minio-operator -o 'jsonpath={.items[].metadata.name}') MINIO_POD=$(kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}') wait_pod $MINIO_POD @@ -588,7 +588,18 @@ deploy_chaos_mesh() { desc 'install chaos-mesh' helm repo add chaos-mesh https://charts.chaos-mesh.org - helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${chaos_mesh_ns} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 + # helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${chaos_mesh_ns} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 + helm install chaos-mesh chaos-mesh/chaos-mesh \ + --namespace=${chaos_mesh_ns} \ + --set chaosDaemon.runtime=containerd \ + --set chaosDaemon.socketPath=/run/containerd/containerd.sock \ + --set dashboard.create=false \ + --set controllerManager.tolerations[0].key="kubernetes.io/arch" \ + --set controllerManager.tolerations[0].operator="Equal" \ + --set controllerManager.tolerations[0].value="arm64" \ + --set controllerManager.tolerations[0].effect="NoSchedule" \ + --version 2.5.1 + sleep 10 } @@ -1221,9 +1232,10 @@ cat_config() { if [[ "$ARCH" == "arm64" ]]; then yq eval '.spec.replsets[].tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | - (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | - (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | - (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr else cat $temp_cr fi diff --git a/e2e-tests/ldap/run b/e2e-tests/ldap/run index c6732ba3c0..f97d38c3ca 100755 --- a/e2e-tests/ldap/run +++ b/e2e-tests/ldap/run @@ -12,7 +12,11 @@ deploy_openldap() { select(.kind=="Deployment").spec.template.spec.containers[0].securityContext.capabilities.add[0]="NET_BIND_SERVICE"' "$test_dir/conf/openldap.yaml" \ | kubectl_bin apply -f - else - kubectl_bin apply -f "$test_dir/conf/openldap.yaml" + if [[ "$ARCH" == "arm64" ]]; then + yq eval '(select(.kind == "Deployment") | .spec.template.spec.tolerations) += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $test_dir/conf/openldap.yaml | kubectl_bin apply -f - + else + kubectl_bin apply -f "$test_dir/conf/openldap.yaml" + fi fi kubectl rollout status deployment/openldap --timeout=120s diff --git a/e2e-tests/rs-shard-migration/run b/e2e-tests/rs-shard-migration/run index 15c840d389..6507824459 100755 --- a/e2e-tests/rs-shard-migration/run +++ b/e2e-tests/rs-shard-migration/run @@ -33,8 +33,47 @@ function main() { simple_data_check "${cluster}-rs0" ${CLUSTER_SIZE} desc 'initiate migration from replicaset to sharded cluster' - kubectl_bin patch psmdb/${cluster} --type json -p='[{"op":"add","path":"/spec/sharding","value":{"configsvrReplSet":{"size":'${CLUSTER_SIZE}',"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"size":1}}}]' + + kubectl patch psmdb/some-name --type json '-p=[{ + "op": "add", + "path": "/spec/sharding", + "value": { + "configsvrReplSet": { + "size": 3, + "volumeSpec": { + "persistentVolumeClaim": { + "resources": { + "requests": { + "storage": "3Gi" + } + } + } + }, + "tolerations": [ + { + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + } + ] + }, + "enabled": true, + "mongos": { + "size": 1, + "tolerations": [ + { + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + } + ] + } + } + }]' sleep 10 + wait_for_running "${cluster}-rs0" "${CLUSTER_SIZE}" "false" wait_for_running "${cluster}-cfg" "${CLUSTER_SIZE}" "false" wait_cluster_consistency "${cluster}" diff --git a/e2e-tests/run-release-arm64.csv b/e2e-tests/run-release-arm64.csv index cf92dc1748..0492d48716 100644 --- a/e2e-tests/run-release-arm64.csv +++ b/e2e-tests/run-release-arm64.csv @@ -33,9 +33,11 @@ pitr-physical pvc-resize recover-no-primary replset-overrides +rs-shard-migration scaling scheduled-backup security-context +self-healing-chaos service-per-pod serviceless-external-nodes split-horizon diff --git a/e2e-tests/scheduled-backup/run b/e2e-tests/scheduled-backup/run index aa275afaba..dde0a5bb43 100755 --- a/e2e-tests/scheduled-backup/run +++ b/e2e-tests/scheduled-backup/run @@ -120,7 +120,16 @@ fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" +retry=0 +until aws_cli "s3 ls s3://$backup_dest_minio/rs0/" | grep "myApp.test.gz"; do + if [[ $retry -ge 10 ]]; then + echo "Max retry count $retry reached. File myApp.test.gz wasn't found on s3://$backup_dest_minio/rs0/" + exit 1 + fi + ((retry += 1)) + echo -n . + sleep 5 +done run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-0.$cluster.$namespace" "-2nd" diff --git a/e2e-tests/self-healing-chaos/run b/e2e-tests/self-healing-chaos/run index 35f30f4308..fb73c708c4 100755 --- a/e2e-tests/self-healing-chaos/run +++ b/e2e-tests/self-healing-chaos/run @@ -74,11 +74,8 @@ kill_pod() { local pod=$1 local old_resourceVersion=$(kubectl get pod $pod -ojson | jq '.metadata.resourceVersion' | tr -d '"') - yq eval ' - .metadata.name = "chaos-cluster-pod-kill" | - del(.spec.selector.pods.test-namespace) | - .spec.selector.pods.'$namespace'[0] = "'$pod'"' $conf_dir/chaos-pod-kill.yml \ - | kubectl apply -f - + yq eval '.metadata.name = "chaos-cluster-pod-kill" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.'$namespace'[0] = "'$pod'"' $conf_dir/chaos-pod-kill.yml | kubectl apply -f - + sleep 5 # check if all 3 Pods started From ee95241dd756a6264df421adca775e9af0b652d0 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Thu, 21 Nov 2024 12:48:19 +0200 Subject: [PATCH 16/17] for upgrade-consistency and upgrade-consistency-sharded-tls tests, we also need to rename compare files in addition to update of run file content --- ...162.yml => service_some-name-cfg-1190.yml} | 0 ...162.yml => service_some-name-rs0-1190.yml} | 0 .../statefulset_some-name-cfg-1170.yml | 2 +- .../statefulset_some-name-cfg-1180.yml | 2 +- ... => statefulset_some-name-cfg-1190-oc.yml} | 2 +- ...yml => statefulset_some-name-cfg-1190.yml} | 2 +- .../statefulset_some-name-rs0-1170.yml | 2 +- .../statefulset_some-name-rs0-1180.yml | 2 +- ... => statefulset_some-name-rs0-1190-oc.yml} | 2 +- ...yml => statefulset_some-name-rs0-1190.yml} | 2 +- e2e-tests/upgrade-consistency-sharded-tls/run | 34 +++++++++---------- ...162.yml => service_some-name-rs0-1190.yml} | 0 ... => statefulset_some-name-rs0-1190-oc.yml} | 0 ...yml => statefulset_some-name-rs0-1190.yml} | 0 .../conf/some-name-rs0.yml | 2 +- 15 files changed, 26 insertions(+), 26 deletions(-) rename e2e-tests/upgrade-consistency-sharded-tls/compare/{service_some-name-cfg-1162.yml => service_some-name-cfg-1190.yml} (100%) rename e2e-tests/upgrade-consistency-sharded-tls/compare/{service_some-name-rs0-1162.yml => service_some-name-rs0-1190.yml} (100%) rename e2e-tests/upgrade-consistency-sharded-tls/compare/{statefulset_some-name-cfg-1162-oc.yml => statefulset_some-name-cfg-1190-oc.yml} (99%) rename e2e-tests/upgrade-consistency-sharded-tls/compare/{statefulset_some-name-cfg-1162.yml => statefulset_some-name-cfg-1190.yml} (99%) rename e2e-tests/upgrade-consistency-sharded-tls/compare/{statefulset_some-name-rs0-1162-oc.yml => statefulset_some-name-rs0-1190-oc.yml} (99%) rename e2e-tests/upgrade-consistency-sharded-tls/compare/{statefulset_some-name-rs0-1162.yml => statefulset_some-name-rs0-1190.yml} (99%) rename e2e-tests/upgrade-consistency/compare/{service_some-name-rs0-1162.yml => service_some-name-rs0-1190.yml} (100%) rename e2e-tests/upgrade-consistency/compare/{statefulset_some-name-rs0-1162-oc.yml => statefulset_some-name-rs0-1190-oc.yml} (100%) rename e2e-tests/upgrade-consistency/compare/{statefulset_some-name-rs0-1162.yml => statefulset_some-name-rs0-1190.yml} (100%) diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1162.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1190.yml similarity index 100% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1162.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1190.yml diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1162.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1190.yml similarity index 100% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1162.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1190.yml diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1170.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1170.yml index 6c10a1135c..a4aceed30f 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1170.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1170.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 5 + generation: 3 labels: app.kubernetes.io/component: cfg app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1180.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1180.yml index bfd388030a..6c10a1135c 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1180.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1180.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 7 + generation: 5 labels: app.kubernetes.io/component: cfg app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162-oc.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190-oc.yml similarity index 99% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162-oc.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190-oc.yml index e1b011548f..1640b7b614 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162-oc.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190-oc.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 3 + generation: 7 labels: app.kubernetes.io/component: cfg app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190.yml similarity index 99% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190.yml index a4aceed30f..bfd388030a 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 3 + generation: 7 labels: app.kubernetes.io/component: cfg app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1170.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1170.yml index a0ce6a988b..abc93591b5 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1170.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1170.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 5 + generation: 3 labels: app.kubernetes.io/component: mongod app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1180.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1180.yml index 148a09afe6..a0ce6a988b 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1180.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1180.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 7 + generation: 5 labels: app.kubernetes.io/component: mongod app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162-oc.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190-oc.yml similarity index 99% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162-oc.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190-oc.yml index 87ad33b990..794034ad9b 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162-oc.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190-oc.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 3 + generation: 7 labels: app.kubernetes.io/component: mongod app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190.yml similarity index 99% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190.yml index abc93591b5..148a09afe6 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 3 + generation: 7 labels: app.kubernetes.io/component: mongod app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/run b/e2e-tests/upgrade-consistency-sharded-tls/run index c8a98787b4..f0dc59e78b 100755 --- a/e2e-tests/upgrade-consistency-sharded-tls/run +++ b/e2e-tests/upgrade-consistency-sharded-tls/run @@ -25,7 +25,7 @@ main() { deploy_cmctl - desc "create first PSMDB cluster 1.16.2 $CLUSTER" + desc "create first PSMDB cluster 1.17.0 $CLUSTER" apply_cluster "$test_dir/conf/${CLUSTER}.yml" desc 'check if Pod started' @@ -52,14 +52,14 @@ main() { compare_generation "3" "statefulset" "${CLUSTER}-cfg" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1162" - compare_kubectl service/${CLUSTER}-cfg "-1162" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1162" - compare_kubectl statefulset/${CLUSTER}-cfg "-1162" + compare_kubectl service/${CLUSTER}-rs0 "-1170" + compare_kubectl service/${CLUSTER}-cfg "-1170" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1170" + compare_kubectl statefulset/${CLUSTER}-cfg "-1170" - desc 'test 1.17.0' + desc 'test 1.18.0' kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{ - "spec": {"crVersion":"1.17.0"} + "spec": {"crVersion":"1.18.0"} }' # Wait for at least one reconciliation sleep 20 @@ -81,14 +81,14 @@ main() { compare_generation "5" "statefulset" "${CLUSTER}-cfg" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1170" - compare_kubectl service/${CLUSTER}-cfg "-1170" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1170" - compare_kubectl statefulset/${CLUSTER}-cfg "-1170" + compare_kubectl service/${CLUSTER}-rs0 "-1180" + compare_kubectl service/${CLUSTER}-cfg "-1180" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1180" + compare_kubectl statefulset/${CLUSTER}-cfg "-1180" - desc 'test 1.18.0' + desc 'test 1.19.0' kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{ - "spec": {"crVersion":"1.18.0"} + "spec": {"crVersion":"1.19.0"} }' # Wait for at least one reconciliation sleep 20 @@ -110,10 +110,10 @@ main() { compare_generation "7" "statefulset" "${CLUSTER}-cfg" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1180" - compare_kubectl service/${CLUSTER}-cfg "-1180" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1180" - compare_kubectl statefulset/${CLUSTER}-cfg "-1180" + compare_kubectl service/${CLUSTER}-rs0 "-1190" + compare_kubectl service/${CLUSTER}-cfg "-1190" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1190" + compare_kubectl statefulset/${CLUSTER}-cfg "-1190" destroy "$namespace" diff --git a/e2e-tests/upgrade-consistency/compare/service_some-name-rs0-1162.yml b/e2e-tests/upgrade-consistency/compare/service_some-name-rs0-1190.yml similarity index 100% rename from e2e-tests/upgrade-consistency/compare/service_some-name-rs0-1162.yml rename to e2e-tests/upgrade-consistency/compare/service_some-name-rs0-1190.yml diff --git a/e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1162-oc.yml b/e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1190-oc.yml similarity index 100% rename from e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1162-oc.yml rename to e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1190-oc.yml diff --git a/e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1162.yml b/e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1190.yml similarity index 100% rename from e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1162.yml rename to e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1190.yml diff --git a/e2e-tests/upgrade-consistency/conf/some-name-rs0.yml b/e2e-tests/upgrade-consistency/conf/some-name-rs0.yml index 67dec005fa..00f375a790 100644 --- a/e2e-tests/upgrade-consistency/conf/some-name-rs0.yml +++ b/e2e-tests/upgrade-consistency/conf/some-name-rs0.yml @@ -3,7 +3,7 @@ kind: PerconaServerMongoDB metadata: name: some-name spec: - crVersion: 1.16.2 + crVersion: 1.17.0 #platform: openshift image: imagePullPolicy: Always From 07aac83f0222752fb823e68c5a412cb4a6785e8d Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 25 Nov 2024 18:08:59 +0200 Subject: [PATCH 17/17] remove hardcoded image references in configuration files for e2e tests --- e2e-tests/custom-replset-name/conf/some-name.yml | 4 ++-- e2e-tests/replset-overrides/conf/some-name-overridden.yml | 4 ++-- e2e-tests/replset-overrides/conf/some-name.yml | 4 ++-- e2e-tests/replset-overrides/run | 6 +++--- e2e-tests/serviceless-external-nodes/conf/external.yml | 2 +- e2e-tests/serviceless-external-nodes/conf/main.yml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/e2e-tests/custom-replset-name/conf/some-name.yml b/e2e-tests/custom-replset-name/conf/some-name.yml index 9dd2b04398..d4d1c2e7b1 100644 --- a/e2e-tests/custom-replset-name/conf/some-name.yml +++ b/e2e-tests/custom-replset-name/conf/some-name.yml @@ -6,7 +6,7 @@ spec: crVersion: 1.16.0 backup: enabled: true - image: percona/percona-backup-mongodb:2.0.4 + image: pitr: enabled: false serviceAccountName: percona-server-mongodb-operator @@ -33,7 +33,7 @@ spec: bucket: operator-testing prefix: psmdb endpointUrl: https://storage.googleapis.com - image: percona/percona-server-mongodb:4.4.10-11 + image: imagePullPolicy: Always pmm: enabled: false diff --git a/e2e-tests/replset-overrides/conf/some-name-overridden.yml b/e2e-tests/replset-overrides/conf/some-name-overridden.yml index f38b41ee3a..bbcd26ea42 100644 --- a/e2e-tests/replset-overrides/conf/some-name-overridden.yml +++ b/e2e-tests/replset-overrides/conf/some-name-overridden.yml @@ -6,11 +6,11 @@ metadata: name: some-name spec: crVersion: 1.17.0 - image: perconalab/percona-server-mongodb-operator:main-mongod7.0 + image: imagePullPolicy: Always backup: enabled: true - image: perconalab/percona-server-mongodb-operator:main-backup + image: storages: minio: type: s3 diff --git a/e2e-tests/replset-overrides/conf/some-name.yml b/e2e-tests/replset-overrides/conf/some-name.yml index 287f40f49b..5fc31e9978 100644 --- a/e2e-tests/replset-overrides/conf/some-name.yml +++ b/e2e-tests/replset-overrides/conf/some-name.yml @@ -6,11 +6,11 @@ metadata: name: some-name spec: crVersion: 1.17.0 - image: perconalab/percona-server-mongodb-operator:main-mongod7.0 + image: imagePullPolicy: Always backup: enabled: true - image: perconalab/percona-server-mongodb-operator:main-backup + image: storages: minio: type: s3 diff --git a/e2e-tests/replset-overrides/run b/e2e-tests/replset-overrides/run index b226e70b81..95e76ea6dc 100755 --- a/e2e-tests/replset-overrides/run +++ b/e2e-tests/replset-overrides/run @@ -26,9 +26,9 @@ run_recovery_check() { } test_override_after_deploy() { - # kubectl_bin apply \ - # -f ${conf_dir}/secrets_with_tls.yml \ - # -f ${conf_dir}/minio-secret.yml + kubectl_bin apply \ + -f ${conf_dir}/secrets_with_tls.yml \ + -f ${conf_dir}/minio-secret.yml echo "creating PSMDB cluster: ${cluster}" apply_cluster ${test_dir}/conf/${cluster}.yml diff --git a/e2e-tests/serviceless-external-nodes/conf/external.yml b/e2e-tests/serviceless-external-nodes/conf/external.yml index 96f7840e55..f48b3388cf 100644 --- a/e2e-tests/serviceless-external-nodes/conf/external.yml +++ b/e2e-tests/serviceless-external-nodes/conf/external.yml @@ -10,7 +10,7 @@ spec: replsetSize: true mongosSize: true clusterServiceDNSMode: "Internal" - image: percona/percona-server-mongodb:6.0.4-3 + image: imagePullPolicy: Always secrets: users: mydb-custom-users diff --git a/e2e-tests/serviceless-external-nodes/conf/main.yml b/e2e-tests/serviceless-external-nodes/conf/main.yml index 4a9b7e3942..af55e1a35a 100644 --- a/e2e-tests/serviceless-external-nodes/conf/main.yml +++ b/e2e-tests/serviceless-external-nodes/conf/main.yml @@ -9,7 +9,7 @@ spec: clusterServiceDNSMode: "Internal" tls: mode: allowTLS - image: percona/percona-server-mongodb:6.0.4-3 + image: imagePullPolicy: Always secrets: users: mydb-custom-users