diff --git a/cw-rbac.yaml b/cw-rbac.yaml new file mode 100644 index 0000000000..b874b3a7a3 --- /dev/null +++ b/cw-rbac.yaml @@ -0,0 +1,171 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: percona-server-mongodb-operator +rules: +- apiGroups: + - psmdb.percona.com + resources: + - perconaservermongodbs + - perconaservermongodbs/status + - perconaservermongodbs/finalizers + - perconaservermongodbbackups + - perconaservermongodbbackups/status + - perconaservermongodbbackups/finalizers + - perconaservermongodbrestores + - perconaservermongodbrestores/status + - perconaservermongodbrestores/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/exec + - services + - persistentvolumeclaims + - secrets + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - apps + resources: + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - events.k8s.io + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - patch +- apiGroups: + - certmanager.k8s.io + - cert-manager.io + resources: + - issuers + - certificates + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +- apiGroups: + - net.gke.io + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: percona-server-mongodb-operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: service-account-percona-server-mongodb-operator +subjects: +- kind: ServiceAccount + name: percona-server-mongodb-operator + namespace: "psmdb-operator" +roleRef: + kind: ClusterRole + name: percona-server-mongodb-operator + apiGroup: rbac.authorization.k8s.io diff --git a/cw-rbac.yaml.1 b/cw-rbac.yaml.1 new file mode 100644 index 0000000000..b874b3a7a3 --- /dev/null +++ b/cw-rbac.yaml.1 @@ -0,0 +1,171 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: percona-server-mongodb-operator +rules: +- apiGroups: + - psmdb.percona.com + resources: + - perconaservermongodbs + - perconaservermongodbs/status + - perconaservermongodbs/finalizers + - perconaservermongodbbackups + - perconaservermongodbbackups/status + - perconaservermongodbbackups/finalizers + - perconaservermongodbrestores + - perconaservermongodbrestores/status + - perconaservermongodbrestores/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/exec + - services + - persistentvolumeclaims + - secrets + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - apps + resources: + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - events.k8s.io + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - patch +- apiGroups: + - certmanager.k8s.io + - cert-manager.io + resources: + - issuers + - certificates + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +- apiGroups: + - net.gke.io + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: percona-server-mongodb-operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: service-account-percona-server-mongodb-operator +subjects: +- kind: ServiceAccount + name: percona-server-mongodb-operator + namespace: "psmdb-operator" +roleRef: + kind: ClusterRole + name: percona-server-mongodb-operator + apiGroup: rbac.authorization.k8s.io diff --git a/cw-rbac.yaml.2 b/cw-rbac.yaml.2 new file mode 100644 index 0000000000..b874b3a7a3 --- /dev/null +++ b/cw-rbac.yaml.2 @@ -0,0 +1,171 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: percona-server-mongodb-operator +rules: +- apiGroups: + - psmdb.percona.com + resources: + - perconaservermongodbs + - perconaservermongodbs/status + - perconaservermongodbs/finalizers + - perconaservermongodbbackups + - perconaservermongodbbackups/status + - perconaservermongodbbackups/finalizers + - perconaservermongodbrestores + - perconaservermongodbrestores/status + - perconaservermongodbrestores/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/exec + - services + - persistentvolumeclaims + - secrets + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - apps + resources: + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - events.k8s.io + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - patch +- apiGroups: + - certmanager.k8s.io + - cert-manager.io + resources: + - issuers + - certificates + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +- apiGroups: + - net.gke.io + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: percona-server-mongodb-operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: service-account-percona-server-mongodb-operator +subjects: +- kind: ServiceAccount + name: percona-server-mongodb-operator + namespace: "psmdb-operator" +roleRef: + kind: ClusterRole + name: percona-server-mongodb-operator + apiGroup: rbac.authorization.k8s.io diff --git a/e2e-tests/arbiter/run b/e2e-tests/arbiter/run index 2721feae74..83af287fd8 100755 --- a/e2e-tests/arbiter/run +++ b/e2e-tests/arbiter/run @@ -74,9 +74,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'check arbiter without service-per-pod' check_cr_config "arbiter-rs0" diff --git a/e2e-tests/balancer/run b/e2e-tests/balancer/run index 94e03cfcc5..2c1cfe585f 100755 --- a/e2e-tests/balancer/run +++ b/e2e-tests/balancer/run @@ -55,9 +55,9 @@ main() { desc 'create first PSMDB cluster' cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml if version_gt "1.19" && [ $EKS -ne 1 ]; then $sed 's/docker/runc/g' "$conf_dir/container-rc.yaml" | kubectl_bin apply -f - diff --git a/e2e-tests/conf/client.yml b/e2e-tests/conf/client.yml index cc2a6b1e5f..1449675378 100644 --- a/e2e-tests/conf/client.yml +++ b/e2e-tests/conf/client.yml @@ -15,7 +15,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: psmdb-client - image: percona/percona-server-mongodb:4.4 + image: percona/percona-server-mongodb:4.4-multi imagePullPolicy: Always command: - sleep diff --git a/e2e-tests/conf/client_with_tls.yml b/e2e-tests/conf/client_with_tls.yml index 4b6f5e829e..bd259c26ea 100644 --- a/e2e-tests/conf/client_with_tls.yml +++ b/e2e-tests/conf/client_with_tls.yml @@ -15,7 +15,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: psmdb-client - image: percona/percona-server-mongodb:4.4 + image: percona/percona-server-mongodb:4.4-multi imagePullPolicy: Always command: ["/bin/bash","-c","cat /etc/mongodb-ssl/tls.key /etc/mongodb-ssl/tls.crt > /tmp/tls.pem && sleep 100500"] volumeMounts: diff --git a/e2e-tests/cross-site-sharded/run b/e2e-tests/cross-site-sharded/run index 14a3c99765..f8d36a143b 100755 --- a/e2e-tests/cross-site-sharded/run +++ b/e2e-tests/cross-site-sharded/run @@ -39,9 +39,8 @@ desc "create main cluster" create_infra "$namespace" desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml desc "create main PSMDB cluster $main_cluster." apply_cluster "$test_dir/conf/$main_cluster.yml" @@ -112,8 +111,7 @@ create_namespace $replica_namespace 0 deploy_operator desc 'start client' -kubectl_bin apply \ - -f "$conf_dir/client.yml" +apply_client $conf_dir/client.yml desc "copy secrets from main to replica namespace and create all of them" kubectl get secret ${main_cluster}-secrets -o yaml -n ${namespace} \ diff --git a/e2e-tests/custom-replset-name/conf/some-name.yml b/e2e-tests/custom-replset-name/conf/some-name.yml index 9dd2b04398..d4d1c2e7b1 100644 --- a/e2e-tests/custom-replset-name/conf/some-name.yml +++ b/e2e-tests/custom-replset-name/conf/some-name.yml @@ -6,7 +6,7 @@ spec: crVersion: 1.16.0 backup: enabled: true - image: percona/percona-backup-mongodb:2.0.4 + image: pitr: enabled: false serviceAccountName: percona-server-mongodb-operator @@ -33,7 +33,7 @@ spec: bucket: operator-testing prefix: psmdb endpointUrl: https://storage.googleapis.com - image: percona/percona-server-mongodb:4.4.10-11 + image: imagePullPolicy: Always pmm: enabled: false diff --git a/e2e-tests/custom-replset-name/run b/e2e-tests/custom-replset-name/run index c50d924433..9808298ab2 100755 --- a/e2e-tests/custom-replset-name/run +++ b/e2e-tests/custom-replset-name/run @@ -10,7 +10,11 @@ create_infra $namespace apply_s3_storage_secrets deploy_minio -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml + cluster="some-name" desc 'create first PSMDB cluster' diff --git a/e2e-tests/custom-tls/run b/e2e-tests/custom-tls/run index 9e39c37357..d4d8e91a7f 100755 --- a/e2e-tests/custom-tls/run +++ b/e2e-tests/custom-tls/run @@ -32,8 +32,8 @@ main() { destroy_cert_manager || true # We need to be sure that we are getting certificates created by the operator, not by cert-manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/custom-users-roles-sharded/run b/e2e-tests/custom-users-roles-sharded/run index 22025b0854..34ae025d96 100755 --- a/e2e-tests/custom-users-roles-sharded/run +++ b/e2e-tests/custom-users-roles-sharded/run @@ -42,10 +42,9 @@ create_infra "$namespace" mongosUri="userAdmin:userAdmin123456@$cluster-mongos.$namespace" desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${test_dir}/conf/app-user-secrets.yml" - +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $test_dir/conf/app-user-secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -89,7 +88,7 @@ kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ "key": "userTwoPassKey" }, "roles": [ - {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } diff --git a/e2e-tests/custom-users-roles/run b/e2e-tests/custom-users-roles/run index 965db48b65..ac72f5b85d 100755 --- a/e2e-tests/custom-users-roles/run +++ b/e2e-tests/custom-users-roles/run @@ -31,9 +31,9 @@ cluster="some-name-rs0" create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${test_dir}/conf/app-user-secrets.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $test_dir/conf/app-user-secrets.yml +apply_client $conf_dir/client.yml mongoUri="userAdmin:userAdmin123456@$cluster.$namespace" @@ -61,7 +61,7 @@ kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ "key": "userTwoPassKey" }, "roles": [ - {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } diff --git a/e2e-tests/data-at-rest-encryption/run b/e2e-tests/data-at-rest-encryption/run index 44982a0247..54901f7c06 100755 --- a/e2e-tests/data-at-rest-encryption/run +++ b/e2e-tests/data-at-rest-encryption/run @@ -13,7 +13,8 @@ deploy_minio apply_s3_storage_secrets desc 'create secrets and start client' -kubectl_bin apply -f "$conf_dir/secrets.yml" -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster='some-name' desc "create PSMDB cluster $cluster" @@ -57,10 +58,7 @@ sleep 5 desc "check backup and restore -- minio" backup_dest_minio=$(get_backup_dest "$backup_name_minio") -retry 3 8 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz +retry 3 8 aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongos 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster-mongos.$namespace" compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" "-2nd" run_restore "$backup_name_minio" diff --git a/e2e-tests/data-sharded/run b/e2e-tests/data-sharded/run index 5c77ed5a78..1815c38aea 100755 --- a/e2e-tests/data-sharded/run +++ b/e2e-tests/data-sharded/run @@ -29,18 +29,18 @@ check_rs_proper_component_deletion() { } main() { - if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then - MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*([0-9].[0-9])$/\1/') - else - MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') - fi + # if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then + # MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*([0-9].[0-9])$/\1/') + # else + # MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') + # fi create_infra "$namespace" deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/default-cr/run b/e2e-tests/default-cr/run index a0c80c3b78..e5f35d721b 100755 --- a/e2e-tests/default-cr/run +++ b/e2e-tests/default-cr/run @@ -48,26 +48,46 @@ function main() { desc 'create secrets and start client' kubectl_bin apply -f $deploy_dir/secrets.yaml - kubectl_bin apply -f $conf_dir/client.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" kubectl_bin apply ${OPERATOR_NS:+-n $OPERATOR_NS} --server-side --force-conflicts -f $deploy_dir/crd.yaml + + + local temp_operator_yaml="$(mktemp)" + if [ -n "$OPERATOR_NS" ]; then apply_rbac cw-rbac kubectl_bin apply -n ${OPERATOR_NS} -f $deploy_dir/cw-operator.yaml else apply_rbac rbac - yq eval '((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true")' "$deploy_dir/operator.yaml" \ - | kubectl_bin apply -f - + yq eval '((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true")' "$deploy_dir/operator.yaml" > $temp_operator_yaml + + if [[ "$ARCH" == "arm64" ]]; then + yq eval -i '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' $temp_operator_yaml + fi + + kubectl_bin apply -f $temp_operator_yaml fi + local temp_cr="$(mktemp)" yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com" | .spec.replsets[].affinity.antiAffinityTopologyKey = "none" | .spec.replsets[].nonvoting.affinity.antiAffinityTopologyKey = "none" | .spec.replsets[].arbiter.affinity.antiAffinityTopologyKey = "none" | .spec.sharding.configsvrReplSet.affinity.antiAffinityTopologyKey = "none" | - .spec.sharding.mongos.affinity.antiAffinityTopologyKey = "none"' $deploy_dir/cr.yaml \ - | kubectl_bin apply -f - + .spec.sharding.mongos.affinity.antiAffinityTopologyKey = "none"' $deploy_dir/cr.yaml > $temp_cr + + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr | + kubectl_bin apply -f - + else + kubectl_bin apply -f $temp_cr + fi desc 'check if all 3 Pods started' wait_cluster_consistency $cluster 70 @@ -137,7 +157,20 @@ function main() { cluster="minimal-cluster" yq eval '.metadata.name = "'${cluster}'"' $deploy_dir/secrets.yaml | kubectl_bin apply -f - - yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com"' $deploy_dir/cr-minimal.yaml | kubectl_bin apply -f - + local temp_cr_minimal="$(mktemp)" + yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com"' $deploy_dir/cr-minimal.yaml > $temp_cr_minimal + + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr_minimal | + kubectl_bin apply -f - + else + kubectl_bin apply -f $temp_cr_minimal + fi + desc 'check if all Pods started' wait_cluster_consistency "${cluster}" diff --git a/e2e-tests/demand-backup-eks-credentials/run b/e2e-tests/demand-backup-eks-credentials/run index 96ffed793e..4a59214f9c 100755 --- a/e2e-tests/demand-backup-eks-credentials/run +++ b/e2e-tests/demand-backup-eks-credentials/run @@ -14,9 +14,8 @@ fi create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/demand-backup-physical-sharded/run b/e2e-tests/demand-backup-physical-sharded/run index 4cfb61bc15..afb79a8716 100755 --- a/e2e-tests/demand-backup-physical-sharded/run +++ b/e2e-tests/demand-backup-physical-sharded/run @@ -66,11 +66,13 @@ apply_s3_storage_secrets ### Case 1: Backup and restore on sharded cluster desc 'Testing on sharded cluster' -echo "Creating PSMDB cluster" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client_with_tls.yml + cluster="some-name" -kubectl_bin apply -f "${conf_dir}/secrets.yml" -apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" -kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" +desc "create first PSMDB cluster $cluster" +apply_cluster $test_dir/conf/$cluster-sharded.yml echo "check if all pods started" wait_for_running ${cluster}-rs0 3 diff --git a/e2e-tests/demand-backup-physical/run b/e2e-tests/demand-backup-physical/run index 4ce2ea040d..cd53be1ebc 100755 --- a/e2e-tests/demand-backup-physical/run +++ b/e2e-tests/demand-backup-physical/run @@ -58,11 +58,13 @@ apply_s3_storage_secrets desc 'Testing on not sharded cluster' +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client_with_tls.yml + echo "Creating PSMDB cluster" cluster="some-name" -kubectl_bin apply -f "${conf_dir}/secrets.yml" -apply_cluster "${test_dir}/conf/${cluster}.yml" -kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" +apply_cluster $test_dir/conf/$cluster.yml echo "check if all pods started" wait_for_running ${cluster}-rs0 3 diff --git a/e2e-tests/demand-backup-sharded/run b/e2e-tests/demand-backup-sharded/run index 94456ba08a..756ad88e8f 100755 --- a/e2e-tests/demand-backup-sharded/run +++ b/e2e-tests/demand-backup-sharded/run @@ -19,11 +19,9 @@ create_infra "$namespace" deploy_minio -desc 'create first PSMDB cluster' -cluster="some-name" -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -34,6 +32,8 @@ else kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi +desc 'create first PSMDB cluster' +cluster="some-name" apply_cluster "$test_dir/conf/$cluster-rs0.yml" desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 @@ -146,10 +146,18 @@ fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls "s3://${backup_dest_minio}/rs0/" \ - | grep "myApp.test.gz" + +retry=0 +until aws_cli "s3 ls s3://$backup_dest_minio/rs0/" | grep "myApp.test.gz"; do + if [[ $retry -ge 10 ]]; then + echo "Max retry count $retry reached. File myApp.test.gz wasn't found on s3://$backup_dest_minio/rs0/" + exit 1 + fi + ((retry += 1)) + echo -n . + sleep 5 +done + insert_data_mongos "100501" "myApp" insert_data_mongos "100501" "myApp1" insert_data_mongos "100501" "myApp2" @@ -161,10 +169,7 @@ check_data desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio}_ | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio}_ | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 diff --git a/e2e-tests/demand-backup/run b/e2e-tests/demand-backup/run index 4c8e810e89..0b6e13201b 100755 --- a/e2e-tests/demand-backup/run +++ b/e2e-tests/demand-backup/run @@ -116,11 +116,11 @@ create_infra $namespace deploy_minio -desc 'create secrets and start client' cluster="some-name-rs0" -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets @@ -215,10 +215,7 @@ fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz +aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_recovery_check "$backup_name_minio" "$cluster" run_mongo \ @@ -250,10 +247,7 @@ run_recovery_check_bkp_source "$backup_name_minio" "$backup_dest_minio" "$cluste desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio} | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio} | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 @@ -285,10 +279,7 @@ sleep 60 desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio} | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio} | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 diff --git a/e2e-tests/expose-sharded/run b/e2e-tests/expose-sharded/run index 7a20d7cedd..81d258ba83 100755 --- a/e2e-tests/expose-sharded/run +++ b/e2e-tests/expose-sharded/run @@ -91,12 +91,10 @@ function expose_cluster() { function main() { create_infra "$namespace" - desc 'create first PSMDB cluster' - cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -107,6 +105,8 @@ function main() { kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi + desc 'create first PSMDB cluster' + cluster="some-name" apply_cluster "$test_dir/conf/$cluster-rs0.yml" desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 diff --git a/e2e-tests/finalizer/run b/e2e-tests/finalizer/run index 2bd0acf3d9..ad8bfe5b6e 100755 --- a/e2e-tests/finalizer/run +++ b/e2e-tests/finalizer/run @@ -7,13 +7,12 @@ test_dir=$(realpath "$(dirname "$0")") . "${test_dir}/../functions" create_infra "$namespace" -cluster="some-name" desc 'create secrets and start client' -kubectl_bin apply \ - -f $conf_dir/secrets_with_tls.yml \ - -f $conf_dir/client.yml +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client.yml +cluster="some-name" apply_cluster "$test_dir/conf/$cluster.yml" desc 'check if all 3 Pods started' wait_for_running "$cluster-rs0" 3 diff --git a/e2e-tests/functions b/e2e-tests/functions index ad78834815..b1724a85c6 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -28,6 +28,24 @@ conf_dir=$(realpath $test_dir/../conf || :) src_dir=$(realpath $test_dir/../..) logs_dir=$(realpath $test_dir/../logs || :) +archs=$(kubectl get nodes -o jsonpath='{range .items[*]}{.status.nodeInfo.architecture}{" "}{end}') + +first_arch=$(echo $archs | awk '{print $1}') + +if [[ "$first_arch" == "amd64" || "$first_arch" == "arm64" ]]; then + for arch in $archs; do + if [[ "$arch" != "$first_arch" ]]; then + echo "All nodes in the cluster must be equal to the 1-st one: $first_arch !" + exit 1 + fi + done + ARCH="$first_arch" + echo "================================== Using ARCH=$ARCH ==================================" +else + echo "Unsupported architecture: $first_arch" + exit 1 +fi + if [[ ${ENABLE_LOGGING} == "true" ]]; then if [ ! -d "${logs_dir}" ]; then mkdir "${logs_dir}" @@ -386,6 +404,8 @@ deploy_operator() { desc 'start PSMDB operator' local cr_file + local temp_operator_yaml="$(mktemp)" + if [ -f "${test_dir}/conf/crd.yaml" ]; then cr_file="${test_dir}/conf/crd.yaml" else @@ -393,21 +413,27 @@ deploy_operator() { fi kubectl_bin apply --server-side --force-conflicts -f "${cr_file}" - if [ -n "$OPERATOR_NS" ]; then + + if [[ "$OPERATOR_NS" ]]; then apply_rbac cw-rbac yq eval ' (.spec.template.spec.containers[].image = "'${IMAGE}'") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | - ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/cw-operator.yaml \ - | kubectl_bin apply -f - + ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/cw-operator.yaml > $temp_operator_yaml else apply_rbac rbac yq eval ' (.spec.template.spec.containers[].image = "'${IMAGE}'") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | - ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/operator.yaml \ - | kubectl_bin apply -f - + ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/operator.yaml > $temp_operator_yaml + fi + + if [[ "$ARCH" == "arm64" ]]; then + yq eval -i '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' $temp_operator_yaml fi + + kubectl_bin apply -f $temp_operator_yaml + sleep 2 wait_pod $(get_operator_pod) } @@ -427,12 +453,29 @@ deploy_operator_gh() { curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/${operator_yaml}.yaml" >"${tmp_dir}/${operator_yaml}_${git_tag}.yaml" $sed -i -e "s^image: .*^image: ${IMAGE}^" "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" + + if [[ "$ARCH" == "arm64" ]]; then + yq eval -i '(.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}])' "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" + fi + kubectl_bin apply -f "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" sleep 2 wait_pod "$(get_operator_pod)" } +aws_cli() { + local cmd=$1 + + kubectl_bin run -i --rm aws-cli --image=amazon/aws-cli \ + --restart=Never \ + --env=AWS_ACCESS_KEY_ID=some-access-key \ + --env=AWS_SECRET_ACCESS_KEY=some-secret-key \ + --env=AWS_DEFAULT_REGION=us-east-1 \ + --overrides='{"apiVersion": "v1","spec": {"tolerations": [{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' \ + -- --endpoint-url http://minio-service:9000 $cmd +} + deploy_minio() { desc 'install Minio' helm uninstall minio-service || : @@ -453,7 +496,16 @@ deploy_minio() { --set configPathmc=/tmp/.minio/ \ --set persistence.size=2G \ --set securityContext.enabled=false \ + --set 'tolerations[0].key=kubernetes.io/arch' \ + --set 'tolerations[0].operator=Equal' \ + --set 'tolerations[0].value=arm64' \ + --set 'tolerations[0].effect=NoSchedule' \ + --set 'postJob.tolerations[0].key=kubernetes.io/arch' \ + --set 'postJob.tolerations[0].operator=Equal' \ + --set 'postJob.tolerations[0].value=arm64' \ + --set 'postJob.tolerations[0].effect=NoSchedule' \ minio/minio + MINIO_POD=$(kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}') wait_pod $MINIO_POD @@ -462,9 +514,7 @@ deploy_minio() { fi # create bucket - kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + aws_cli "s3 mb s3://operator-testing" } deploy_vault() { @@ -488,9 +538,23 @@ deploy_vault() { --set injector.agentImage.repository="docker.io/hashicorp/vault" \ --set server.image.repository="docker.io/hashicorp/vault" else - retry 10 60 helm install $name hashicorp/vault \ - --disable-openapi-validation \ - --set dataStorage.enabled=false + if [[ "$ARCH" == "arm64" ]]; then + helm install vault-service hashicorp/vault \ + --disable-openapi-validation \ + --set dataStorage.enabled=false \ + --set server.tolerations[0].key=kubernetes.io/arch \ + --set server.tolerations[0].operator=Equal \ + --set server.tolerations[0].value=arm64 \ + --set server.tolerations[0].effect=NoSchedule \ + --set injector.tolerations[0].key=kubernetes.io/arch \ + --set injector.tolerations[0].operator=Equal \ + --set injector.tolerations[0].value=arm64 \ + --set injector.tolerations[0].effect=NoSchedule + else + retry 10 60 helm install $name hashicorp/vault \ + --disable-openapi-validation \ + --set dataStorage.enabled=false + fi fi until kubectl_bin get pod/vault-service-0 -o jsonpath='{.status.phase}' 2>/dev/null | grep 'Running'; do @@ -529,7 +593,18 @@ deploy_chaos_mesh() { desc 'install chaos-mesh' helm repo add chaos-mesh https://charts.chaos-mesh.org - helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${chaos_mesh_ns} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 + # helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${chaos_mesh_ns} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 + helm install chaos-mesh chaos-mesh/chaos-mesh \ + --namespace=${chaos_mesh_ns} \ + --set chaosDaemon.runtime=containerd \ + --set chaosDaemon.socketPath=/run/containerd/containerd.sock \ + --set dashboard.create=false \ + --set controllerManager.tolerations[0].key="kubernetes.io/arch" \ + --set controllerManager.tolerations[0].operator="Equal" \ + --set controllerManager.tolerations[0].value="arm64" \ + --set controllerManager.tolerations[0].effect="NoSchedule" \ + --version 2.5.1 + sleep 10 } @@ -709,6 +784,10 @@ compare_kubectl() { (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - >${new_result} + if [[ "$ARCH" == "arm64" ]]; then + yq -i eval 'del(.spec.template.spec.tolerations)' ${new_result} + fi + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' ${new_result} if version_gt "1.22"; then @@ -921,9 +1000,52 @@ deploy_cert_manager() { kubectl_bin create namespace cert-manager || : kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true || : - kubectl_bin apply -f "https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml" --validate=false || : 2>/dev/null + + helm uninstall cert-manager --namespace cert-manager || : + helm repo remove jetstack || : + helm repo add jetstack https://charts.jetstack.io + + # Check and delete existing conflicting resources + kubectl_bin delete role cert-manager-cainjector:leaderelection -n kube-system --ignore-not-found=true + kubectl_bin delete role cert-manager:leaderelection -n kube-system --ignore-not-found=true + kubectl_bin delete rolebinding cert-manager-cainjector:leaderelection -n kube-system --ignore-not-found=true + kubectl_bin delete rolebinding cert-manager:leaderelection -n kube-system --ignore-not-found=true + + if [[ "$ARCH" == "arm64" ]]; then + helm install cert-manager \ + --namespace cert-manager \ + --version v${CERT_MANAGER_VER} \ + --set nodeSelector."kubernetes\.io/arch"=arm64 \ + --set tolerations[0].key="kubernetes.io/arch" \ + --set tolerations[0].operator="Equal" \ + --set tolerations[0].value="arm64" \ + --set tolerations[0].effect="NoSchedule" \ + --set startupapicheck.nodeSelector."kubernetes\.io/arch"=arm64 \ + --set startupapicheck.tolerations[0].key="kubernetes.io/arch" \ + --set startupapicheck.tolerations[0].operator="Equal" \ + --set startupapicheck.tolerations[0].value="arm64" \ + --set startupapicheck.tolerations[0].effect="NoSchedule" \ + --set cainjector.nodeSelector."kubernetes\.io/arch"=arm64 \ + --set cainjector.tolerations[0].key="kubernetes.io/arch" \ + --set cainjector.tolerations[0].operator="Equal" \ + --set cainjector.tolerations[0].value="arm64" \ + --set cainjector.tolerations[0].effect="NoSchedule" \ + --set webhook.nodeSelector."kubernetes\.io/arch"=arm64 \ + --set webhook.tolerations[0].key="kubernetes.io/arch" \ + --set webhook.tolerations[0].operator="Equal" \ + --set webhook.tolerations[0].value="arm64" \ + --set webhook.tolerations[0].effect="NoSchedule" \ + --no-hooks \ + jetstack/cert-manager + else + helm install cert-manager \ + --namespace cert-manager \ + --version v${CERT_MANAGER_VER} \ + --no-hooks \ + jetstack/cert-manager + fi + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready - sleep 120 } delete_crd() { @@ -1104,25 +1226,39 @@ EOF } cat_config() { - cat "$1" \ - | yq eval '(.spec | select(.image == null)).image = "'"$IMAGE_MONGOD"'"' \ - | yq eval '(.spec | select(has("pmm"))).pmm.image = "'"$IMAGE_PMM_CLIENT"'"' \ - | yq eval '(.spec | select(has("initImage"))).initImage = "'"$IMAGE"'"' \ - | yq eval '(.spec | select(has("backup"))).backup.image = "'"$IMAGE_BACKUP"'"' \ - | yq eval '.spec.upgradeOptions.apply="Never"' + local temp_cr="$(mktemp)" + + yq eval ' + (.spec | select(.image == null)).image = "'"$IMAGE_MONGOD"'" | + (.spec | select(has("pmm"))).pmm.image = "'"$IMAGE_PMM_CLIENT"'" | + (.spec | select(has("initImage"))).initImage = "'"$IMAGE"'" | + (.spec | select(has("backup"))).backup.image = "'"$IMAGE_BACKUP"'" | + .spec.upgradeOptions.apply="Never"' "$1" > $temp_cr + + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}] | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $temp_cr + else + cat $temp_cr + fi +} + +apply_client() { + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' "$1" | kubectl_bin apply -f - + else + kubectl_bin apply -f "$1" + fi } apply_cluster() { - if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - cat_config "$1" \ - | kubectl_bin apply -f - + if [[ "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]]; then + cat_config $1 | yq eval 'del(.spec.backup.tasks.[1]) | del(.spec.backup.tasks.[1]) | del(.spec.backup.tasks.[1])' - | kubectl_bin apply -f - else - cat_config "$1" \ - | yq eval ' - del(.spec.backup.tasks.[1]) | - del(.spec.backup.tasks.[1]) | - del(.spec.backup.tasks.[1])' - \ - | kubectl_bin apply -f - + cat_config $1 | kubectl_bin apply -f - fi } @@ -1322,7 +1458,7 @@ function run_simple_cli_inside_image() { local cli=${2} local pod_name=${RANDOM} - kubectl_bin -n default run ${pod_name} --image=${image} --restart=Never --command -- sleep infinity >/dev/null + kubectl_bin -n default run ${pod_name} --image=${image} --restart=Never --overrides='{"apiVersion": "v1","spec": {"tolerations": [{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' --command -- sleep infinity >/dev/null kubectl_bin -n default wait --for=condition=Ready pod/${pod_name} >/dev/null local output=$(kubectl_bin -n default exec ${pod_name} -- ${cli}) kubectl_bin -n default delete pod/${pod_name} --grace-period=0 --force >/dev/null @@ -1468,11 +1604,11 @@ renew_certificate() { } deploy_cmctl() { - local service_account="cmctl" + local temp_cr="$(mktemp)" + + $sed -e "s/percona-server-mongodb-operator/cmctl/g" "${src_dir}/deploy/rbac.yaml" \ + | yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' > $temp_cr - $sed -e "s/percona-server-mongodb-operator/$service_account/g" "${src_dir}/deploy/rbac.yaml" \ - | yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' \ - | kubectl_bin apply -f - kubectl_bin apply -f "$conf_dir/cmctl.yml" } diff --git a/e2e-tests/ignore-labels-annotations/run b/e2e-tests/ignore-labels-annotations/run index ffda3d1151..cc90333f52 100755 --- a/e2e-tests/ignore-labels-annotations/run +++ b/e2e-tests/ignore-labels-annotations/run @@ -45,16 +45,16 @@ check_service() { # `notIgnoredLabel` and `notIgnoredAnnotation` should be deleted kubectl_bin patch "service/$svc_name" --type=json --patch '[ { - "op": "add", - "path": "/metadata/labels", + "op": "add", + "path": "/metadata/labels", "value": { "notIgnoredLabel": "true", "ignoredLabel": "true" } }, { - "op": "add", - "path": "/metadata/annotations", + "op": "add", + "path": "/metadata/annotations", "value": { "notIgnoredAnnotation": "true", "ignoredAnnotation": "true" @@ -85,15 +85,15 @@ check_service() { desc "adding labels and annotations to $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "crLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "crAnnotation": "true", } @@ -105,11 +105,11 @@ check_service() { desc "removing labels and annotations from $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "remove", + "op": "remove", "path": "'$expose_path'/labels" }, { - "op": "remove", + "op": "remove", "path": "'$expose_path'/annotations" }]' sleep 5 @@ -119,16 +119,16 @@ check_service() { desc "adding other labels and annotations to $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "otherCrLabel": "true", "secondCrLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "otherCrAnnotation": "true", "secondCrAnnotation": "true", @@ -141,15 +141,15 @@ check_service() { desc "adding removing one labels from $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "otherCrLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "otherCrAnnotation": "true", } @@ -162,11 +162,11 @@ check_service() { # When `labels` and `annotations` are not set, old metadata should stay kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "remove", + "op": "remove", "path": "/spec/ignoreAnnotations", }, { - "op": "remove", + "op": "remove", "path": "/spec/ignoreLabels", }]' diff --git a/e2e-tests/init-deploy/run b/e2e-tests/init-deploy/run index 7520d936c3..30e1a27821 100755 --- a/e2e-tests/init-deploy/run +++ b/e2e-tests/init-deploy/run @@ -11,9 +11,8 @@ max_conn=13 create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f $conf_dir/secrets_with_tls.yml \ - -f $conf_dir/client.yml +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client.yml desc 'create custom RuntimeClass' if version_gt "1.19" && [ $EKS -ne 1 ]; then diff --git a/e2e-tests/ldap-tls/run b/e2e-tests/ldap-tls/run index 0742cf71f6..3b5df7d723 100755 --- a/e2e-tests/ldap-tls/run +++ b/e2e-tests/ldap-tls/run @@ -151,10 +151,8 @@ main() { deploy_openldap desc 'create secrets and start client' - cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml test_mongod_openldap test_sharded_openldap diff --git a/e2e-tests/ldap/run b/e2e-tests/ldap/run index 529622e90e..f97d38c3ca 100755 --- a/e2e-tests/ldap/run +++ b/e2e-tests/ldap/run @@ -12,7 +12,11 @@ deploy_openldap() { select(.kind=="Deployment").spec.template.spec.containers[0].securityContext.capabilities.add[0]="NET_BIND_SERVICE"' "$test_dir/conf/openldap.yaml" \ | kubectl_bin apply -f - else - kubectl_bin apply -f "$test_dir/conf/openldap.yaml" + if [[ "$ARCH" == "arm64" ]]; then + yq eval '(select(.kind == "Deployment") | .spec.template.spec.tolerations) += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' $test_dir/conf/openldap.yaml | kubectl_bin apply -f - + else + kubectl_bin apply -f "$test_dir/conf/openldap.yaml" + fi fi kubectl rollout status deployment/openldap --timeout=120s @@ -147,10 +151,8 @@ main() { deploy_openldap desc 'create secrets and start client' - cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml test_mongod_openldap test_sharded_openldap diff --git a/e2e-tests/liveness/run b/e2e-tests/liveness/run index c888b037a0..cba0acba8a 100755 --- a/e2e-tests/liveness/run +++ b/e2e-tests/liveness/run @@ -9,7 +9,9 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml cluster="liveness" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/mongod-major-upgrade-sharded/run b/e2e-tests/mongod-major-upgrade-sharded/run index e4378d70c6..5a9773d21f 100755 --- a/e2e-tests/mongod-major-upgrade-sharded/run +++ b/e2e-tests/mongod-major-upgrade-sharded/run @@ -17,8 +17,9 @@ function main() { apply_s3_storage_secrets - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'install version service' diff --git a/e2e-tests/mongod-major-upgrade/run b/e2e-tests/mongod-major-upgrade/run index 8cb58e23fc..e688b82791 100755 --- a/e2e-tests/mongod-major-upgrade/run +++ b/e2e-tests/mongod-major-upgrade/run @@ -16,8 +16,9 @@ function main() { create_infra "${namespace}" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'install version service' diff --git a/e2e-tests/multi-cluster-service/run b/e2e-tests/multi-cluster-service/run index 4dc5b7560d..1a0c059927 100755 --- a/e2e-tests/multi-cluster-service/run +++ b/e2e-tests/multi-cluster-service/run @@ -69,20 +69,19 @@ wait_service_export() { } desc "Register Kubernetes cluster" -k8s_cluster_name=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=appropriate/curl -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name) -k8s_cluster_region=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=appropriate/curl -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location) +kubectl -n default delete pod curl || : +k8s_cluster_name=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=alpine/curl --overrides='{"spec": {"tolerations":[{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name) +k8s_cluster_region=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=alpine/curl --overrides='{"spec": {"tolerations":[{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location) gcloud container hub memberships register ${k8s_cluster_name} --gke-cluster ${k8s_cluster_region}/${k8s_cluster_name} --enable-workload-identity wait_mcs_api create_infra "$namespace" -desc 'create first PSMDB cluster' -cluster="some-name" -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -93,7 +92,9 @@ else kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi -apply_cluster "$test_dir/conf/$cluster.yml" +desc 'create first PSMDB cluster' +cluster="some-name" +apply_cluster $test_dir/conf/$cluster.yml desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 wait_for_running $cluster-cfg 3 "false" diff --git a/e2e-tests/non-voting/run b/e2e-tests/non-voting/run index 088df33f7c..9ee7f41b8c 100755 --- a/e2e-tests/non-voting/run +++ b/e2e-tests/non-voting/run @@ -39,9 +39,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc "check non-voting members" spinup_psmdb "$cluster" "$test_dir/conf/$cluster.yml" diff --git a/e2e-tests/one-pod/run b/e2e-tests/one-pod/run index 9d8266fe37..51f62d21fd 100755 --- a/e2e-tests/one-pod/run +++ b/e2e-tests/one-pod/run @@ -28,9 +28,9 @@ main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${conf_dir}/minio-secret.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + apply_client $conf_dir/client.yml deploy_minio diff --git a/e2e-tests/operator-self-healing-chaos/run b/e2e-tests/operator-self-healing-chaos/run index 8cd344ad92..6d22fe2b87 100755 --- a/e2e-tests/operator-self-healing-chaos/run +++ b/e2e-tests/operator-self-healing-chaos/run @@ -9,9 +9,8 @@ set_debug cluster="some-name-rs0" setup_cluster() { - desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/secrets.yml + desc 'create secrets' + kubectl_bin apply -f $conf_dir/secrets.yml desc "create first PSMDB cluster $cluster" apply_cluster $conf_dir/$cluster.yml @@ -28,8 +27,9 @@ fail_pod() { yq eval ' .metadata.name = "chaos-operator-pod-failure" | del(.spec.selector.pods.test-namespace) | - .spec.selector.pods.'$test_namespace'[0] = "'$init_pod'"' $conf_dir/chaos-pod-failure.yml \ - | kubectl apply --namespace $test_namespace -f - + .spec.selector.pods.'$test_namespace'[0] = "'$init_pod'" | + .spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' \ + $conf_dir/chaos-pod-failure.yml | kubectl apply --namespace $test_namespace -f - sleep 10 desc 'check if operator works fine: scale down from 5 to 3' diff --git a/e2e-tests/pitr-physical/run b/e2e-tests/pitr-physical/run index 49ae125d4a..249c60d2e2 100755 --- a/e2e-tests/pitr-physical/run +++ b/e2e-tests/pitr-physical/run @@ -121,10 +121,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/pitr-sharded/run b/e2e-tests/pitr-sharded/run index 94638aed9a..858ffda293 100755 --- a/e2e-tests/pitr-sharded/run +++ b/e2e-tests/pitr-sharded/run @@ -79,10 +79,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'create custom RuntimeClass' if version_gt "1.19" && [ $EKS -ne 1 ]; then diff --git a/e2e-tests/pitr/run b/e2e-tests/pitr/run index 6e839e27ae..a035958b1b 100755 --- a/e2e-tests/pitr/run +++ b/e2e-tests/pitr/run @@ -112,10 +112,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/pvc-resize/run b/e2e-tests/pvc-resize/run index 7689e35f48..a454be8268 100755 --- a/e2e-tests/pvc-resize/run +++ b/e2e-tests/pvc-resize/run @@ -115,10 +115,9 @@ fi create_infra "${namespace}" -desc 'create secrets and psmdb client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml desc 'create PSMDB cluster' cluster="some-name" diff --git a/e2e-tests/recover-no-primary/run b/e2e-tests/recover-no-primary/run index 2ca5bff53b..9ba0a3f2a7 100755 --- a/e2e-tests/recover-no-primary/run +++ b/e2e-tests/recover-no-primary/run @@ -6,12 +6,13 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/../functions -create_infra ${namespace} +create_infra $namespace + +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client.yml cluster="some-name" -kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/client.yml function test_single_replset() { apply_cluster ${test_dir}/conf/${cluster}.yml diff --git a/e2e-tests/replset-overrides/conf/some-name-overridden.yml b/e2e-tests/replset-overrides/conf/some-name-overridden.yml index f38b41ee3a..bbcd26ea42 100644 --- a/e2e-tests/replset-overrides/conf/some-name-overridden.yml +++ b/e2e-tests/replset-overrides/conf/some-name-overridden.yml @@ -6,11 +6,11 @@ metadata: name: some-name spec: crVersion: 1.17.0 - image: perconalab/percona-server-mongodb-operator:main-mongod7.0 + image: imagePullPolicy: Always backup: enabled: true - image: perconalab/percona-server-mongodb-operator:main-backup + image: storages: minio: type: s3 diff --git a/e2e-tests/replset-overrides/conf/some-name.yml b/e2e-tests/replset-overrides/conf/some-name.yml index 287f40f49b..5fc31e9978 100644 --- a/e2e-tests/replset-overrides/conf/some-name.yml +++ b/e2e-tests/replset-overrides/conf/some-name.yml @@ -6,11 +6,11 @@ metadata: name: some-name spec: crVersion: 1.17.0 - image: perconalab/percona-server-mongodb-operator:main-mongod7.0 + image: imagePullPolicy: Always backup: enabled: true - image: perconalab/percona-server-mongodb-operator:main-backup + image: storages: minio: type: s3 diff --git a/e2e-tests/replset-overrides/run b/e2e-tests/replset-overrides/run index 79419b9621..95e76ea6dc 100755 --- a/e2e-tests/replset-overrides/run +++ b/e2e-tests/replset-overrides/run @@ -25,17 +25,7 @@ run_recovery_check() { wait_cluster_consistency "${cluster}" } -delete_cluster() { - local cluster=$1 - - echo "deleting cluster: ${cluster}" - kubectl_bin delete psmdb ${cluster} - wait_for_delete psmdb/${cluster} - wait_for_delete pod/${cluster}-rs0-0 - kubectl delete secrets --all -} - -test_override_host_after_deploy() { +test_override_after_deploy() { kubectl_bin apply \ -f ${conf_dir}/secrets_with_tls.yml \ -f ${conf_dir}/minio-secret.yml @@ -68,14 +58,14 @@ test_override_host_after_deploy() { run_recovery_check "${cluster}" "backup-minio-logical" "external-rs0-0.${namespace}" - delete_cluster ${cluster} + echo "deleting cluster" + kubectl_bin delete psmdb ${cluster} + wait_for_delete psmdb/${cluster} + wait_for_delete pod/${cluster}-rs0-0 + kubectl delete secrets --all } -test_deploy_with_host_overrides() { - kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/minio-secret.yml - +test_deploy_with_overrides() { echo "creating external services" kubectl_bin apply -f ${test_dir}/conf/external-services.yml @@ -94,54 +84,29 @@ test_deploy_with_host_overrides() { wait_backup "backup-minio-physical" run_recovery_check "${cluster}" "backup-minio-physical" "external-rs0-0.${namespace}" - delete_cluster ${cluster} -} - -test_override_priority() { - kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/minio-secret.yml - - echo "creating PSMDB cluster: ${cluster}" - apply_cluster ${test_dir}/conf/${cluster}-override-priority.yml - wait_for_running ${cluster}-rs0 3 - - echo "writing some data" - run_mongo \ - 'use myApp\n db.test.insert({ x: 100500 })' \ - "databaseAdmin:databaseAdmin123456@${cluster}-rs0.${namespace}" - compare_mongo_cmd "find" "databaseAdmin:databaseAdmin123456@${cluster}-rs0.${namespace}" - - echo "checking member priorities" - run_mongo \ - "rs.conf().members.map(m => m.priority)" \ - "databaseAdmin:databaseAdmin123456@${cluster}-rs0.${namespace}" \ - | egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' \ - > ${tmp_dir}/priorities.json - diff -u ${test_dir}/compare/priorities.json ${tmp_dir}/priorities.json - echo "member priorities are OK" - - delete_cluster ${cluster} + echo "deleting cluster" + kubectl_bin delete psmdb ${cluster} + wait_for_delete psmdb/${cluster} + wait_for_delete pod/${cluster}-rs0-0 + kubectl_bin delete secret --all } main() { - create_infra ${namespace} - kubectl_bin apply -f ${conf_dir}/client.yml + create_infra $namespace + kubectl_bin apply -f $conf_dir/secrets_with_tls.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + apply_client $conf_dir/client.yml deploy_minio - desc "Case 1: Deploying a new cluster with hostname overrides" - test_deploy_with_host_overrides + desc "Case 1: Deploying a new cluster with replsetOverrides" + test_deploy_with_overrides desc "Case 1: PASSED" - desc "Case 2: Patching a running cluster to override hostnames" - test_override_host_after_deploy + desc "Case 2: Patching a running cluster with replsetOverrides" + test_override_after_deploy desc "Case 2: PASSED" - desc "Case 3: Overriding member priorities" - test_override_priority - desc "Case 3: PASSED" - desc "All cases PASSED" } diff --git a/e2e-tests/rs-shard-migration/run b/e2e-tests/rs-shard-migration/run index 7020b091a6..6507824459 100755 --- a/e2e-tests/rs-shard-migration/run +++ b/e2e-tests/rs-shard-migration/run @@ -18,7 +18,9 @@ function main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml + cluster="some-name" CLUSTER_SIZE=3 @@ -31,8 +33,47 @@ function main() { simple_data_check "${cluster}-rs0" ${CLUSTER_SIZE} desc 'initiate migration from replicaset to sharded cluster' - kubectl_bin patch psmdb/${cluster} --type json -p='[{"op":"add","path":"/spec/sharding","value":{"configsvrReplSet":{"size":'${CLUSTER_SIZE}',"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"size":1}}}]' + + kubectl patch psmdb/some-name --type json '-p=[{ + "op": "add", + "path": "/spec/sharding", + "value": { + "configsvrReplSet": { + "size": 3, + "volumeSpec": { + "persistentVolumeClaim": { + "resources": { + "requests": { + "storage": "3Gi" + } + } + } + }, + "tolerations": [ + { + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + } + ] + }, + "enabled": true, + "mongos": { + "size": 1, + "tolerations": [ + { + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + } + ] + } + } + }]' sleep 10 + wait_for_running "${cluster}-rs0" "${CLUSTER_SIZE}" "false" wait_for_running "${cluster}-cfg" "${CLUSTER_SIZE}" "false" wait_cluster_consistency "${cluster}" diff --git a/e2e-tests/run-release-arm64.csv b/e2e-tests/run-release-arm64.csv new file mode 100644 index 0000000000..0492d48716 --- /dev/null +++ b/e2e-tests/run-release-arm64.csv @@ -0,0 +1,48 @@ +arbiter +balancer +custom-replset-name +custom-tls +custom-users-roles +custom-users-roles-sharded +cross-site-sharded +data-at-rest-encryption +data-sharded +default-cr +demand-backup +demand-backup-eks-credentials +demand-backup-physical +demand-backup-physical-sharded +demand-backup-sharded +expose-sharded +ignore-labels-annotations +init-deploy +finalizer +ldap +ldap-tls +limits +liveness +mongod-major-upgrade +mongod-major-upgrade-sharded +multi-cluster-service +non-voting +one-pod +operator-self-healing-chaos +pitr +pitr-sharded +pitr-physical +pvc-resize +recover-no-primary +replset-overrides +rs-shard-migration +scaling +scheduled-backup +security-context +self-healing-chaos +service-per-pod +serviceless-external-nodes +split-horizon +storage +tls-issue-cert-manager +upgrade +upgrade-consistency +users diff --git a/e2e-tests/scaling/run b/e2e-tests/scaling/run index 4246858b75..56ce1d5de8 100755 --- a/e2e-tests/scaling/run +++ b/e2e-tests/scaling/run @@ -9,9 +9,8 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f $conf_dir/secrets.yml \ - -f $conf_dir/client.yml +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster='some-name-rs0' desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/scheduled-backup/run b/e2e-tests/scheduled-backup/run index 8cb5a463c8..dde0a5bb43 100755 --- a/e2e-tests/scheduled-backup/run +++ b/e2e-tests/scheduled-backup/run @@ -47,9 +47,8 @@ cat - <<-EOF | kubectl apply -f - EOF desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets @@ -88,16 +87,7 @@ sleep 55 desc 'disable backups schedule' apply_cluster "$test_dir/conf/$cluster.yml" -if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - backup_name_aws=$(kubectl_bin get psmdb-backup | grep aws-s3 | awk '{print$1}' | head -1) - backup_name_gcp=$(kubectl_bin get psmdb-backup | grep gcp-cs | awk '{print$1}' | head -1) - backup_name_azure=$(kubectl_bin get psmdb-backup | grep azure-blob | awk '{print$1}' | head -1) - wait_backup "$backup_name_aws" - wait_backup "$backup_name_gcp" - wait_backup "$backup_name_azure" -fi - -backup_name_minio=$(kubectl_bin get psmdb-backup | grep minio | awk '{print$1}' | head -1) +backup_name_minio=$(kubectl_bin get psmdb-backup | grep minio | awk '{print $1}' | head -1) wait_backup "$backup_name_minio" sleep 5 @@ -106,24 +96,41 @@ echo -n "checking backup count for every-min-minio..." check_backup_count every-min-minio 1 echo "OK" -echo -n "checking backup count for every-min-aws-s3..." -check_backup_count every-min-aws-s3 1 -echo "OK" +if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + backup_name_aws=$(kubectl_bin get psmdb-backup | grep aws-s3 | awk '{print$1}' | head -1) + backup_name_gcp=$(kubectl_bin get psmdb-backup | grep gcp-cs | awk '{print$1}' | head -1) + backup_name_azure=$(kubectl_bin get psmdb-backup | grep azure-blob | awk '{print$1}' | head -1) + wait_backup "$backup_name_aws" + wait_backup "$backup_name_gcp" + wait_backup "$backup_name_azure" -echo -n "checking backup count for every-min-gcp-cs..." -check_backup_count every-min-gcp-cs 1 -echo "OK" + echo -n "checking backup count for every-min-aws-s3..." + check_backup_count every-min-aws-s3 1 + echo "OK" -echo -n "checking backup count for every-min-azure-blob..." -check_backup_count every-min-azure-blob 1 -echo "OK" + echo -n "checking backup count for every-min-gcp-cs..." + check_backup_count every-min-gcp-cs 1 + echo "OK" + + echo -n "checking backup count for every-min-azure-blob..." + check_backup_count every-min-azure-blob 1 + echo "OK" +fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep "myApp.test.gz" + +retry=0 +until aws_cli "s3 ls s3://$backup_dest_minio/rs0/" | grep "myApp.test.gz"; do + if [[ $retry -ge 10 ]]; then + echo "Max retry count $retry reached. File myApp.test.gz wasn't found on s3://$backup_dest_minio/rs0/" + exit 1 + fi + ((retry += 1)) + echo -n . + sleep 5 +done + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-0.$cluster.$namespace" "-2nd" compare_mongo_cmd "find" "myApp:myPass@$cluster-1.$cluster.$namespace" "-2nd" diff --git a/e2e-tests/security-context/run b/e2e-tests/security-context/run index fd1f06ba9d..2e166af207 100755 --- a/e2e-tests/security-context/run +++ b/e2e-tests/security-context/run @@ -9,7 +9,9 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml desc 'create additional service account' kubectl_bin apply -f "$test_dir/conf/service-account.yml" diff --git a/e2e-tests/self-healing-chaos/run b/e2e-tests/self-healing-chaos/run index 1380150331..fb73c708c4 100755 --- a/e2e-tests/self-healing-chaos/run +++ b/e2e-tests/self-healing-chaos/run @@ -23,9 +23,8 @@ check_pod_restarted() { setup_cluster() { desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/secrets.yml \ - -f $conf_dir/client.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" apply_cluster $conf_dir/$cluster.yml @@ -75,11 +74,8 @@ kill_pod() { local pod=$1 local old_resourceVersion=$(kubectl get pod $pod -ojson | jq '.metadata.resourceVersion' | tr -d '"') - yq eval ' - .metadata.name = "chaos-cluster-pod-kill" | - del(.spec.selector.pods.test-namespace) | - .spec.selector.pods.'$namespace'[0] = "'$pod'"' $conf_dir/chaos-pod-kill.yml \ - | kubectl apply -f - + yq eval '.metadata.name = "chaos-cluster-pod-kill" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.'$namespace'[0] = "'$pod'"' $conf_dir/chaos-pod-kill.yml | kubectl apply -f - + sleep 5 # check if all 3 Pods started diff --git a/e2e-tests/service-per-pod/run b/e2e-tests/service-per-pod/run index 9a785a393f..3fa44ab55a 100755 --- a/e2e-tests/service-per-pod/run +++ b/e2e-tests/service-per-pod/run @@ -79,9 +79,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'check ClusterIP' check_cr_config "cluster-ip-rs0" diff --git a/e2e-tests/serviceless-external-nodes/conf/external.yml b/e2e-tests/serviceless-external-nodes/conf/external.yml index 96f7840e55..f48b3388cf 100644 --- a/e2e-tests/serviceless-external-nodes/conf/external.yml +++ b/e2e-tests/serviceless-external-nodes/conf/external.yml @@ -10,7 +10,7 @@ spec: replsetSize: true mongosSize: true clusterServiceDNSMode: "Internal" - image: percona/percona-server-mongodb:6.0.4-3 + image: imagePullPolicy: Always secrets: users: mydb-custom-users diff --git a/e2e-tests/serviceless-external-nodes/conf/main.yml b/e2e-tests/serviceless-external-nodes/conf/main.yml index 4a9b7e3942..af55e1a35a 100644 --- a/e2e-tests/serviceless-external-nodes/conf/main.yml +++ b/e2e-tests/serviceless-external-nodes/conf/main.yml @@ -9,7 +9,7 @@ spec: clusterServiceDNSMode: "Internal" tls: mode: allowTLS - image: percona/percona-server-mongodb:6.0.4-3 + image: imagePullPolicy: Always secrets: users: mydb-custom-users diff --git a/e2e-tests/serviceless-external-nodes/run b/e2e-tests/serviceless-external-nodes/run index b7a1272737..5345933bc8 100755 --- a/e2e-tests/serviceless-external-nodes/run +++ b/e2e-tests/serviceless-external-nodes/run @@ -14,9 +14,9 @@ unset OPERATOR_NS desc "Create main cluster" create_infra "$namespace" -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" + +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml apply_cluster "$test_dir/conf/main.yml" wait_for_running "$cluster-rs0" 1 @@ -33,13 +33,14 @@ kubectl_bin config set-context $(kubectl_bin config current-context) --namespace create_namespace $replica_namespace 0 deploy_operator -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" apply_cluster "$test_dir/conf/external.yml" -wait_pod ${cluster}-rs0-0 -wait_pod ${cluster}-rs0-1 +desc 'create secrets and start client' +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml + +wait_pod $cluster-rs0-0 +wait_pod $cluster-rs0-1 secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length') if [[ $secrets_count != 6 ]]; then diff --git a/e2e-tests/smart-update/run b/e2e-tests/smart-update/run index 015a4d0b19..e933d296f5 100755 --- a/e2e-tests/smart-update/run +++ b/e2e-tests/smart-update/run @@ -35,7 +35,8 @@ cluster="smart-update" create_infra ${namespace} desc 'create secrets and start client' -kubectl_bin apply -f ${conf_dir}/secrets.yml -f ${conf_dir}/client.yml +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml IMAGE_MONGOD_TO_UPDATE=${IMAGE_MONGOD} if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then diff --git a/e2e-tests/split-horizon/run b/e2e-tests/split-horizon/run index 20a903c1a2..1abf0eb3b8 100755 --- a/e2e-tests/split-horizon/run +++ b/e2e-tests/split-horizon/run @@ -25,13 +25,13 @@ configure_client_hostAliases() { wait_pod $(kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}') } -create_infra ${namespace} +create_infra $namespace -cluster="some-name" -kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/client_with_tls.yml +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client_with_tls.yml +cluster="some-name" apply_cluster ${test_dir}/conf/${cluster}-3horizons.yml wait_for_running "${cluster}-rs0" 3 wait_cluster_consistency ${cluster} diff --git a/e2e-tests/storage/run b/e2e-tests/storage/run index 4dff4779f6..17f52620d3 100755 --- a/e2e-tests/storage/run +++ b/e2e-tests/storage/run @@ -47,14 +47,19 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml \ - -f $test_dir/conf/hostpath-helper.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'check emptydir' check_cr_config "emptydir-rs0" + if [[ "$ARCH" == "arm64" ]]; then + yq eval '.spec.template.spec.tolerations += [{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' \ + $test_dir/conf/hostpath-helper.yml | kubectl_bin apply -f - + else + kubectl_bin apply -f $test_dir/conf/hostpath-helper.yml + fi + desc 'check hostpath' check_cr_config "hostpath-rs0" diff --git a/e2e-tests/tls-issue-cert-manager/run b/e2e-tests/tls-issue-cert-manager/run index 0b10d74b75..bfa6071766 100755 --- a/e2e-tests/tls-issue-cert-manager/run +++ b/e2e-tests/tls-issue-cert-manager/run @@ -29,8 +29,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml desc 'create custom cert-manager issuers and certificates' kubectl_bin apply -f "$test_dir/conf/some-name-psmdb-ca-issuer.yml" diff --git a/e2e-tests/upgrade-consistency-sharded-tls/run b/e2e-tests/upgrade-consistency-sharded-tls/run index 4f7a0a88cf..f0dc59e78b 100755 --- a/e2e-tests/upgrade-consistency-sharded-tls/run +++ b/e2e-tests/upgrade-consistency-sharded-tls/run @@ -20,8 +20,9 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml + deploy_cmctl desc "create first PSMDB cluster 1.17.0 $CLUSTER" diff --git a/e2e-tests/upgrade-consistency/run b/e2e-tests/upgrade-consistency/run index b2021a2184..e3323be828 100755 --- a/e2e-tests/upgrade-consistency/run +++ b/e2e-tests/upgrade-consistency/run @@ -12,7 +12,8 @@ main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f "${conf_dir}/client.yml" -f "${conf_dir}/secrets.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster 1.17.0 $CLUSTER" apply_cluster "$test_dir/conf/${CLUSTER}-rs0.yml" diff --git a/e2e-tests/upgrade-sharded/run b/e2e-tests/upgrade-sharded/run index 08db6b2323..a8b4cc5b24 100755 --- a/e2e-tests/upgrade-sharded/run +++ b/e2e-tests/upgrade-sharded/run @@ -161,15 +161,15 @@ function main() { if [ -n "$OPERATOR_NS" ]; then rbac="cw-rbac" fi - create_infra_gh "${namespace}" "${GIT_TAG}" + create_infra_gh $namespace $GIT_TAG deploy_cert_manager apply_s3_storage_secrets deploy_minio desc 'create secrets and start client' - curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" >"${tmp_dir}/secrets.yaml" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${tmp_dir}/secrets.yaml" + curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" > $tmp_dir/secrets.yaml + kubectl_bin apply -f $tmp_dir/secrets.yaml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" local cr_yaml="${tmp_dir}/cr_${GIT_TAG}.yaml" @@ -263,11 +263,7 @@ function main() { run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" backup_dest_minio=$(get_backup_dest "$backup_name_minio") - retry 3 5 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls "s3://${backup_dest_minio}/rs0/" \ - | grep myApp.test.gz - + retry 3 5 aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongos 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@${cluster}-mongos.${namespace}" compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" "-2nd" ".svc.cluster.local" "myApp" "test" run_restore "$backup_name_minio" diff --git a/e2e-tests/upgrade/run b/e2e-tests/upgrade/run index 78df239e26..a4589aa121 100755 --- a/e2e-tests/upgrade/run +++ b/e2e-tests/upgrade/run @@ -146,9 +146,9 @@ function main() { deploy_minio desc 'create secrets and start client' - curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" >"${tmp_dir}/secrets.yaml" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${tmp_dir}/secrets.yaml" + curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" > $tmp_dir/secrets.yaml + kubectl_bin apply -f $tmp_dir/secrets.yaml + apply_client $conf_dir/client.yml local cr_yaml="${tmp_dir}/cr_${GIT_TAG}.yaml" prepare_cr_yaml "${cr_yaml}" @@ -219,11 +219,7 @@ function main() { run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" backup_dest_minio=$(get_backup_dest "$backup_name_minio") - kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz - + aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@${cluster}-rs0.${namespace}" compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0.$namespace" "-2nd" ".svc.cluster.local" "myApp" "test" run_restore "$backup_name_minio" diff --git a/e2e-tests/users/run b/e2e-tests/users/run index bb61bd132c..92d0124903 100755 --- a/e2e-tests/users/run +++ b/e2e-tests/users/run @@ -14,9 +14,9 @@ create_infra $namespace deploy_minio desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${conf_dir}/minio-secret.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/version-service/run b/e2e-tests/version-service/run index fbef8220bd..7e879888cf 100755 --- a/e2e-tests/version-service/run +++ b/e2e-tests/version-service/run @@ -12,11 +12,11 @@ function check_telemetry_transfer() { local cr_vs_channel=${2:-"disabled"} local telemetry_state=${3:-"enabled"} - cluster="minimal-cluster" desc 'create secrets and start client' - kubectl_bin apply -f $conf_dir/client.yml - yq eval '.metadata.name = "'${cluster}'"' $conf_dir/secrets.yml | kubectl_bin apply -f - + apply_client $conf_dir/client.yml + yq eval '.metadata.name = "'$cluster'"' $conf_dir/secrets.yml | kubectl_bin apply -f - + cluster="minimal-cluster" desc "create PSMDB minimal cluster $cluster" yq eval ' .spec.upgradeOptions.versionServiceEndpoint = "'${cr_vs_uri}'" | @@ -151,7 +151,9 @@ for i in "${!cases[@]}"; do cluster="${cases[$i]}" expected_image="${expected_images[$i]}" - kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'create PSMDB cluster' tmp_file=$(mktemp)