diff --git a/.gitignore b/.gitignore index 3d830ccc6d..5bc9ed6695 100644 --- a/.gitignore +++ b/.gitignore @@ -116,6 +116,7 @@ percona-server-mongodb-operator mongodb-healthcheck !cmd/percona-server-mongodb-operator +!cmd/mongodb-healthcheck # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode diff --git a/cmd/mongodb-healthcheck/main.go b/cmd/mongodb-healthcheck/main.go index 0e701ef9f1..274f05a7c9 100644 --- a/cmd/mongodb-healthcheck/main.go +++ b/cmd/mongodb-healthcheck/main.go @@ -17,8 +17,10 @@ package main import ( "context" "os" + "os/signal" "strconv" "strings" + "syscall" uzap "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -36,6 +38,9 @@ var ( ) func main() { + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, os.Interrupt) + defer stop() + app := tool.New("Performs health and readiness checks for MongoDB", GitCommit, GitBranch) k8sCmd := app.Command("k8s", "Performs liveness check for MongoDB on Kubernetes") @@ -77,14 +82,14 @@ func main() { os.Exit(1) } - client, err := db.Dial(cnf) + client, err := db.Dial(ctx, cnf) if err != nil { log.Error(err, "connection error") os.Exit(1) } defer func() { - if err := client.Disconnect(context.TODO()); err != nil { + if err := client.Disconnect(ctx); err != nil { log.Error(err, "failed to disconnect") os.Exit(1) } @@ -99,7 +104,7 @@ func main() { case "mongod": memberState, err := healthcheck.HealthCheckMongodLiveness(client, int64(*startupDelaySeconds)) if err != nil { - client.Disconnect(context.TODO()) // nolint:golint,errcheck + client.Disconnect(ctx) // nolint:golint,errcheck log.Error(err, "Member failed Kubernetes liveness check") os.Exit(1) } @@ -108,7 +113,7 @@ func main() { case "mongos": err := healthcheck.HealthCheckMongosLiveness(client) if err != nil { - client.Disconnect(context.TODO()) // nolint:golint,errcheck + client.Disconnect(ctx) // nolint:golint,errcheck log.Error(err, "Member failed Kubernetes liveness check") os.Exit(1) } @@ -120,14 +125,14 @@ func main() { switch *component { case "mongod": - client.Disconnect(context.TODO()) // nolint:golint,errcheck + client.Disconnect(ctx) // nolint:golint,errcheck log.Error(err, "readiness check for mongod is not implemented") os.Exit(1) case "mongos": - err := healthcheck.MongosReadinessCheck(client) + err := healthcheck.MongosReadinessCheck(ctx, client) if err != nil { - client.Disconnect(context.TODO()) // nolint:golint,errcheck + client.Disconnect(ctx) // nolint:golint,errcheck log.Error(err, "Member failed Kubernetes readiness check") os.Exit(1) } diff --git a/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml b/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml index 941f1c3b68..e5e3eeb9b5 100644 --- a/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml +++ b/e2e-tests/demand-backup-physical-sharded/conf/some-name-sharded.yml @@ -62,7 +62,7 @@ spec: cpu: 100m memory: 0.1G expose: - exposeType: ClusterIP + exposeType: LoadBalancer configsvrReplSet: affinity: antiAffinityTopologyKey: none diff --git a/e2e-tests/demand-backup-physical-sharded/run b/e2e-tests/demand-backup-physical-sharded/run index 13b513685d..bbc2b05232 100755 --- a/e2e-tests/demand-backup-physical-sharded/run +++ b/e2e-tests/demand-backup-physical-sharded/run @@ -48,6 +48,15 @@ run_recovery_check() { set -o xtrace } +check_exported_mongos_service_endpoint() { + local host=$1 + + if [ "$host" != "$(kubectl_bin get psmdb $cluster -o=jsonpath='{.status.host}')" ]; then + echo "Exported host is not correct after the restore" + exit 1 + fi +} + create_infra "${namespace}" deploy_minio @@ -68,6 +77,13 @@ wait_for_running ${cluster}-cfg 3 wait_for_running ${cluster}-mongos 3 wait_cluster_consistency ${cluster} +lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' \ + | jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0].ip') +if [ -z $lbEndpoint ]; then + echo "mongos service not exported correctly" + exit 1 +fi + run_mongos \ 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ "userAdmin:userAdmin123456@${cluster}-mongos.${namespace}" @@ -102,18 +118,21 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then echo 'check backup and restore -- aws-s3' run_restore ${backup_name_aws} "_restore_sharded" run_recovery_check ${backup_name_aws} "_restore_sharded" + check_exported_mongos_service_endpoint "$lbEndpoint" echo "drop collection" run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" echo 'check backup and restore -- gcp-cs' run_restore ${backup_name_gcp} "_restore_sharded" run_recovery_check ${backup_name_gcp} "_restore_sharded" + check_exported_mongos_service_endpoint "$lbEndpoint" echo "drop collection" run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" echo 'check backup and restore -- azure-blob' run_restore ${backup_name_azure} "_restore_sharded" run_recovery_check ${backup_name_azure} "_restore_sharded" + check_exported_mongos_service_endpoint "$lbEndpoint" fi echo "drop collection" diff --git a/e2e-tests/demand-backup-sharded/run b/e2e-tests/demand-backup-sharded/run index 058df83c1a..45f4fe2ecb 100755 --- a/e2e-tests/demand-backup-sharded/run +++ b/e2e-tests/demand-backup-sharded/run @@ -131,7 +131,7 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then insert_data "100501" check_data "-2nd" run_restore "$backup_name_gcp" - wait_restore "$backup_name_aws" "$cluster" + wait_restore "$backup_name_gcp" "$cluster" check_data desc 'check backup and restore -- azure-blob' @@ -142,7 +142,7 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then insert_data "100501" check_data "-2nd" run_restore "$backup_name_azure" - wait_restore "$backup_name_aws" "$cluster" + wait_restore "$backup_name_azure" "$cluster" check_data fi @@ -155,7 +155,7 @@ kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ insert_data "100501" check_data "-2nd" run_restore "$backup_name_minio" -wait_restore "$backup_name_aws" "$cluster" +wait_restore "$backup_name_minio" "$cluster" check_data desc 'delete backup and check if it is removed from bucket -- minio' diff --git a/e2e-tests/finalizer/conf/some-name.yml b/e2e-tests/finalizer/conf/some-name.yml new file mode 100644 index 0000000000..21c52f7051 --- /dev/null +++ b/e2e-tests/finalizer/conf/some-name.yml @@ -0,0 +1,85 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name + finalizers: + - delete-psmdb-pods-in-order + - delete-psmdb-pvc +spec: +# platform: openshift + image: + imagePullPolicy: Always + allowUnsafeConfigurations: false + updateStrategy: SmartUpdate + secrets: + users: some-users + replsets: + - name: rs0 + size: 3 + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + expose: + enabled: true + exposeType: ClusterIP + resources: + limits: + cpu: "500m" + memory: "0.5G" + requests: + cpu: "100m" + memory: "0.1G" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 1Gi + sharding: + enabled: true + + configsvrReplSet: + size: 3 + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + expose: + enabled: true + exposeType: ClusterIP + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + + mongos: + size: 3 + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" + expose: + exposeType: ClusterIP + servicePerPod: true + + backup: + enabled: false + image: perconalab/percona-server-mongodb-operator:main-backup + serviceAccountName: percona-server-mongodb-operator + pitr: + enabled: false diff --git a/e2e-tests/finalizer/run b/e2e-tests/finalizer/run new file mode 100755 index 0000000000..6745fd0f1d --- /dev/null +++ b/e2e-tests/finalizer/run @@ -0,0 +1,30 @@ +#!/bin/bash + +set -o errexit +set -o xtrace + +test_dir=$(realpath "$(dirname "$0")") +. "${test_dir}/../functions" + +create_infra "$namespace" +cluster="some-name" + +apply_cluster "$test_dir/conf/$cluster.yml" +desc 'check if all 3 Pods started' +wait_for_running "$cluster-rs0" 3 + +kubectl_bin delete psmdb $cluster + +desc "Wait for delete cluster $cluster" +wait_for_delete psmdb/$cluster + +desc "Wait for delete PVCs" +wait_for_delete pvc/mongod-data-$cluster-cfg-0 +wait_for_delete pvc/mongod-data-$cluster-cfg-1 +wait_for_delete pvc/mongod-data-$cluster-cfg-2 +wait_for_delete pvc/mongod-data-$cluster-rs0-0 +wait_for_delete pvc/mongod-data-$cluster-rs0-1 +wait_for_delete pvc/mongod-data-$cluster-rs0-2 + +desc "Test passed" +destroy "$namespace" diff --git a/e2e-tests/functions b/e2e-tests/functions index 3384eb2573..512da38525 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -621,6 +621,7 @@ compare_kubectl() { local new_result="${tmp_dir}/${resource//\//_}.yml" if [ -n "$OPENSHIFT" -a -f ${expected_result//.yml/-oc.yml} ]; then + desc "OPENSHIFT" expected_result=${expected_result//.yml/-oc.yml} if [ "$OPENSHIFT" = 4 -a -f ${expected_result//-oc.yml/-4-oc.yml} ]; then expected_result=${expected_result//-oc.yml/-4-oc.yml} @@ -674,7 +675,6 @@ compare_kubectl() { yq -i eval 'del(.metadata.generation)' ${new_result} fi fi - diff -u "$expected_result" "$new_result" } diff --git a/e2e-tests/mongod-major-upgrade-sharded/run b/e2e-tests/mongod-major-upgrade-sharded/run index 7d04203616..47dd90ed7a 100755 --- a/e2e-tests/mongod-major-upgrade-sharded/run +++ b/e2e-tests/mongod-major-upgrade-sharded/run @@ -31,7 +31,6 @@ function main() { create_infra "${namespace}" apply_s3_storage_secrets - deploy_minio kubectl_bin apply -f "${conf_dir}/client.yml" \ -f "${conf_dir}/secrets.yml" @@ -93,11 +92,6 @@ function main() { target_generation=2 for version in ${versions_to_verify[@]}; do - - backup_name_minio="backup-minio-${target_generation}" - run_backup minio ${backup_name_minio} - wait_backup ${backup_name_minio} - kubectl_bin patch psmdb/${cluster%%-rs0} \ --type=json \ -p='[ @@ -119,18 +113,6 @@ function main() { 'use myApp\n db.test.insert({ x: 10050'${target_generation}' })' \ "myApp:myPass@${cluster}-mongos.${namespace}" compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-${target_generation}" - - backup_dest_minio=$(get_backup_dest "${backup_name_minio}") - kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz - - run_mongos 'use myApp\n db.test.insert({ x: 100600 })' "myApp:myPass@${cluster}-mongos.${namespace}" - run_restore "${backup_name_minio}" - wait_restore "${backup_name_minio}" "${cluster}" - compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" - target_generation=$((target_generation + 1)) done diff --git a/e2e-tests/run b/e2e-tests/run index 43308bef70..d36d445f53 100755 --- a/e2e-tests/run +++ b/e2e-tests/run @@ -24,6 +24,7 @@ fail() { "$dir/expose-sharded/run" || fail "expose-sharded" "$dir/ignore-labels-annotations/run" || fail "ignore-labels-annotations" "$dir/init-deploy/run" || fail "init-deploy" +"$dir/finalizer/run" || fail "finalizer" "$dir/limits/run" || fail "limits" "$dir/liveness/run" || fail "liveness" "$dir/multi-cluster-service/run" || fail "multi-cluster-service" @@ -46,6 +47,7 @@ fail() { "$dir/smart-update/run" || fail "smart-update" "$dir/storage/run" || fail "storage" "$dir/upgrade-consistency/run" || fail "upgrade-consistency" +"$dir/upgrade-consistency-sharded/run" || fail "upgrade-consistency-sharded" "$dir/upgrade-sharded/run" || fail "upgrade-sharded" "$dir/upgrade/run" || fail "upgrade" "$dir/users/run" || fail "users" diff --git a/e2e-tests/run-minikube.csv b/e2e-tests/run-minikube.csv index 33c58f302e..d72ccfc4a1 100644 --- a/e2e-tests/run-minikube.csv +++ b/e2e-tests/run-minikube.csv @@ -2,6 +2,7 @@ arbiter default-cr demand-backup demand-backup-physical +finalizer limits liveness mongod-major-upgrade @@ -14,5 +15,6 @@ security-context self-healing-chaos smart-update upgrade-consistency +upgrade-consistency-sharded users version-service diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index ea43ddfc3e..b67ce94ab1 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -10,6 +10,7 @@ demand-backup-sharded expose-sharded ignore-labels-annotations init-deploy +finalizer limits liveness mongod-major-upgrade @@ -33,6 +34,7 @@ smart-update storage upgrade upgrade-consistency +upgrade-consistency-sharded upgrade-sharded users version-service diff --git a/e2e-tests/run-release.csv b/e2e-tests/run-release.csv index 15c1dde6bb..9f3eddf634 100644 --- a/e2e-tests/run-release.csv +++ b/e2e-tests/run-release.csv @@ -11,6 +11,7 @@ demand-backup-sharded expose-sharded ignore-labels-annotations init-deploy +finalizer limits liveness mongod-major-upgrade @@ -34,6 +35,7 @@ smart-update storage upgrade upgrade-consistency +upgrade-consistency-sharded upgrade-sharded users version-service diff --git a/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1130.yml b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1130.yml new file mode 100644 index 0000000000..dc03f04a90 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1130.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + ports: + - name: mongodb + port: 27017 + protocol: TCP + targetPort: 27017 + selector: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1140.yml b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1140.yml new file mode 100644 index 0000000000..dc03f04a90 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1140.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + ports: + - name: mongodb + port: 27017 + protocol: TCP + targetPort: 27017 + selector: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1150.yml b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1150.yml new file mode 100644 index 0000000000..dc03f04a90 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-cfg-1150.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + ports: + - name: mongodb + port: 27017 + protocol: TCP + targetPort: 27017 + selector: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1130.yml b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1130.yml new file mode 100644 index 0000000000..affc5cfd31 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1130.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + ports: + - name: mongodb + port: 27017 + protocol: TCP + targetPort: 27017 + selector: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1140.yml b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1140.yml new file mode 100644 index 0000000000..affc5cfd31 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1140.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + ports: + - name: mongodb + port: 27017 + protocol: TCP + targetPort: 27017 + selector: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1150.yml b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1150.yml new file mode 100644 index 0000000000..affc5cfd31 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/service_some-name-rs0-1150.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + ports: + - name: mongodb + port: 27017 + protocol: TCP + targetPort: 27017 + selector: + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + sessionAffinity: None + type: ClusterIP diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1130-oc.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1130-oc.yml new file mode 100644 index 0000000000..564cfdbc41 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1130-oc.yml @@ -0,0 +1,207 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /data/db/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /data/db/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1130.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1130.yml new file mode 100644 index 0000000000..5aa44473b4 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1130.yml @@ -0,0 +1,212 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + serviceName: some-name-cfg + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=cfg + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --configsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + command: + - /data/db/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: cfg + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /data/db/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1140-oc.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1140-oc.yml new file mode 100644 index 0000000000..ee3265533f --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1140-oc.yml @@ -0,0 +1,209 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1140.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1140.yml new file mode 100644 index 0000000000..d69ddfad7a --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1140.yml @@ -0,0 +1,214 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 2 + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + serviceName: some-name-cfg + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=cfg + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --configsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: cfg + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1150-oc.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1150-oc.yml new file mode 100644 index 0000000000..a8134afdbe --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1150-oc.yml @@ -0,0 +1,209 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 3 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1150.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1150.yml new file mode 100644 index 0000000000..76c7f42d66 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-cfg-1150.yml @@ -0,0 +1,214 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 3 + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + serviceName: some-name-cfg + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=cfg + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --configsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: cfg + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1130-oc.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1130-oc.yml new file mode 100644 index 0000000000..564cfdbc41 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1130-oc.yml @@ -0,0 +1,207 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /data/db/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /data/db/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1130.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1130.yml new file mode 100644 index 0000000000..fae81d0043 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1130.yml @@ -0,0 +1,220 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /data/db/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /data/db/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1140-oc.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1140-oc.yml new file mode 100644 index 0000000000..ee3265533f --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1140-oc.yml @@ -0,0 +1,209 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1140.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1140.yml new file mode 100644 index 0000000000..8d964a4584 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1140.yml @@ -0,0 +1,222 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 2 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1150-oc.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1150-oc.yml new file mode 100644 index 0000000000..a8134afdbe --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1150-oc.yml @@ -0,0 +1,209 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 3 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1150.yml b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1150.yml new file mode 100644 index 0000000000..d762d06671 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/compare/statefulset_some-name-rs0-1150.yml @@ -0,0 +1,222 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 3 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + tcpSocket: + port: 27017 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 500M + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/upgrade-consistency-sharded/conf/some-name.yml b/e2e-tests/upgrade-consistency-sharded/conf/some-name.yml new file mode 100644 index 0000000000..1a1771a354 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/conf/some-name.yml @@ -0,0 +1,106 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name + finalizers: + - delete-psmdb-pods-in-order + - delete-psmdb-pvc +spec: + crVersion: 1.13.0 + #platform: openshift + image: + imagePullPolicy: Always + allowUnsafeConfigurations: false + updateStrategy: SmartUpdate + secrets: + users: some-users + replsets: + - name: rs0 + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + expose: + enabled: true + exposeType: ClusterIP + resources: + limits: + cpu: "500m" + memory: "0.5G" + requests: + cpu: "100m" + memory: "0.1G" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 1Gi + sharding: + enabled: true + + configsvrReplSet: + size: 3 + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + expose: + enabled: true + exposeType: ClusterIP + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + + mongos: + size: 3 + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" + expose: + exposeType: ClusterIP + servicePerPod: true + + backup: + enabled: false + image: perconalab/percona-server-mongodb-operator:main-backup + serviceAccountName: percona-server-mongodb-operator + pitr: + enabled: false diff --git a/e2e-tests/upgrade-consistency-sharded/run b/e2e-tests/upgrade-consistency-sharded/run new file mode 100755 index 0000000000..77b0003c60 --- /dev/null +++ b/e2e-tests/upgrade-consistency-sharded/run @@ -0,0 +1,72 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions +set_debug + +CLUSTER='some-name' + +main() { + create_infra $namespace + + desc 'create secrets and start client' + kubectl_bin apply -f "${conf_dir}/client.yml" \ + -f "${conf_dir}/secrets.yml" + + desc "create first PSMDB cluster 1.13.0 $CLUSTER" + apply_cluster "$test_dir/conf/${CLUSTER}.yml" + + desc 'check if Pod started' + wait_for_running ${CLUSTER}-rs0 3 + wait_for_running ${CLUSTER}-cfg 3 + wait_for_running ${CLUSTER}-mongos 3 + wait_cluster_consistency ${CLUSTER} + + desc 'check if service and statefulset created with expected config' + compare_kubectl service/${CLUSTER}-rs0 "-1130" + compare_kubectl service/${CLUSTER}-cfg "-1130" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1130" + compare_kubectl statefulset/${CLUSTER}-cfg "-1130" + + desc 'test 1.14.0' + kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{ + "spec": {"crVersion":"1.14.0"} + }' + # Wait for at least one reconciliation + sleep 10 + desc 'check if Pod started' + wait_for_running "${CLUSTER}-cfg" 3 + wait_for_running "${CLUSTER}-rs0" 3 + wait_for_running "${CLUSTER}-mongos" 3 + + desc 'check if service and statefulset created with expected config' + compare_kubectl service/${CLUSTER}-rs0 "-1140" + compare_kubectl service/${CLUSTER}-cfg "-1140" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1140" + compare_kubectl statefulset/${CLUSTER}-cfg "-1140" + + desc 'test 1.15.0' + kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{ + "spec": {"crVersion":"1.15.0"} + }' + # Wait for at least one reconciliation + sleep 10 + desc 'check if Pod started' + wait_for_running "${CLUSTER}-cfg" 3 + wait_for_running "${CLUSTER}-rs0" 3 + wait_for_running "${CLUSTER}-mongos" 3 + + desc 'check if service and statefulset created with expected config' + compare_kubectl service/${CLUSTER}-rs0 "-1150" + compare_kubectl service/${CLUSTER}-cfg "-1150" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1150" + compare_kubectl statefulset/${CLUSTER}-cfg "-1150" + + destroy $namespace + + desc 'test passed' +} + +main diff --git a/go.mod b/go.mod index 8802845139..7474e95d49 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/go-openapi/strfmt v0.21.7 github.com/go-openapi/swag v0.22.4 github.com/go-openapi/validate v0.22.1 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.17.1 github.com/hashicorp/go-version v1.6.0 github.com/jetstack/cert-manager v1.6.1 github.com/percona/percona-backup-mongodb v1.8.1-0.20230725073611-5d2c6eeb81be @@ -99,19 +99,19 @@ require ( go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.12.0 // indirect golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 6ac6373b99..e8e38237ef 100644 --- a/go.sum +++ b/go.sum @@ -303,8 +303,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 h1:dygLcbEBA+t/P7ck6a8AkXv6juQ4cK0RHBoh32jxhHM= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2/go.mod h1:Ap9RLCIJVtgQg1/BBgVEfypOAySvvlcpcVQkSzJCH4Y= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.17.1 h1:LSsiG61v9IzzxMkqEr6nrix4miJI62xlRjwT7BYD2SM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.17.1/go.mod h1:Hbb13e3/WtqQ8U5hLGkek9gJvBLasHuPFI0UEGfnQ10= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -592,8 +592,8 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -634,13 +634,13 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -692,13 +692,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -710,8 +710,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -757,12 +757,12 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e h1:S83+ibolgyZ0bqz7KEsUOPErxcv4VzlszxY+31OfB/E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= diff --git a/healthcheck/health.go b/healthcheck/health.go index 20111f633d..29b606b6c2 100644 --- a/healthcheck/health.go +++ b/healthcheck/health.go @@ -22,7 +22,6 @@ import ( "github.com/pkg/errors" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" - mgo "go.mongodb.org/mongo-driver/mongo" ) // OkMemberStates is a slice of acceptable replication member states @@ -57,8 +56,8 @@ func isStateOk(memberState *mongo.MemberState, okMemberStates []mongo.MemberStat } // HealthCheck checks the replication member state of the local MongoDB member -func HealthCheck(client *mgo.Client, okMemberStates []mongo.MemberState) (State, *mongo.MemberState, error) { - rsStatus, err := mongo.RSStatus(context.TODO(), client) +func HealthCheck(client mongo.Client, okMemberStates []mongo.MemberState) (State, *mongo.MemberState, error) { + rsStatus, err := client.RSStatus(context.TODO()) if err != nil { return StateFailed, nil, errors.Wrap(err, "get replica set status") } @@ -74,8 +73,8 @@ func HealthCheck(client *mgo.Client, okMemberStates []mongo.MemberState) (State, return StateFailed, state, errors.Errorf("member has unhealthy replication state: %d", state) } -func HealthCheckMongosLiveness(client *mgo.Client) error { - isMasterResp, err := mongo.IsMaster(context.TODO(), client) +func HealthCheckMongosLiveness(client mongo.Client) error { + isMasterResp, err := client.IsMaster(context.TODO()) if err != nil { return errors.Wrap(err, "get isMaster response") } @@ -87,13 +86,13 @@ func HealthCheckMongosLiveness(client *mgo.Client) error { return nil } -func HealthCheckMongodLiveness(client *mgo.Client, startupDelaySeconds int64) (*mongo.MemberState, error) { - isMasterResp, err := mongo.IsMaster(context.TODO(), client) +func HealthCheckMongodLiveness(client mongo.Client, startupDelaySeconds int64) (*mongo.MemberState, error) { + isMasterResp, err := client.IsMaster(context.TODO()) if err != nil { return nil, errors.Wrap(err, "get isMaster response") } - buildInfo, err := mongo.RSBuildInfo(context.TODO(), client) + buildInfo, err := client.RSBuildInfo(context.TODO()) if err != nil { return nil, errors.Wrap(err, "get buildInfo response") } diff --git a/healthcheck/readiness.go b/healthcheck/readiness.go index 58749fbe36..d5c923abd3 100644 --- a/healthcheck/readiness.go +++ b/healthcheck/readiness.go @@ -19,22 +19,23 @@ import ( "github.com/pkg/errors" "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/readpref" + + "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" ) // ReadinessCheck runs a ping on a pmgo.SessionManager to check server readiness -func ReadinessCheck(client *mongo.Client) (State, error) { - if err := client.Ping(context.TODO(), readpref.Primary()); err != nil { +func ReadinessCheck(ctx context.Context, client mongo.Client) (State, error) { + if err := client.Ping(ctx, readpref.Primary()); err != nil { return StateFailed, errors.Wrap(err, "ping") } return StateOk, nil } -func MongosReadinessCheck(client *mongo.Client) error { +func MongosReadinessCheck(ctx context.Context, client mongo.Client) error { ss := ServerStatus{} - cur := client.Database("admin").RunCommand(context.TODO(), bson.D{ + cur := client.Database("admin").RunCommand(ctx, bson.D{ {Key: "listDatabases", Value: 1}, {Key: "filter", Value: bson.D{{Key: "name", Value: "admin"}}}, {Key: "nameOnly", Value: true}}) diff --git a/healthcheck/tools/db/db.go b/healthcheck/tools/db/db.go index 0553ac9230..2a76447732 100644 --- a/healthcheck/tools/db/db.go +++ b/healthcheck/tools/db/db.go @@ -22,14 +22,16 @@ import ( log "github.com/sirupsen/logrus" mgo "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" ) var ( - ErrMsgAuthFailedStr string = "server returned error on SASL authentication step: Authentication failed." - ErrNoReachableServersStr string = "no reachable servers" + ErrMsgAuthFailedStr = "server returned error on SASL authentication step: Authentication failed." + ErrNoReachableServersStr = "no reachable servers" ) -func Dial(conf *Config) (*mgo.Client, error) { +func Dial(ctx context.Context, conf *Config) (mongo.Client, error) { log.WithFields(log.Fields{ "hosts": conf.Hosts, "ssl": conf.SSL.Enabled, @@ -52,13 +54,13 @@ func Dial(conf *Config) (*mgo.Client, error) { log.WithFields(log.Fields{"user": conf.Username}).Debug("Enabling authentication for session") } - client, err := mgo.Connect(context.TODO(), opts) + client, err := mgo.Connect(ctx, opts) if err != nil { return nil, errors.Wrap(err, "connect to mongo replica set") } - if err := client.Ping(context.TODO(), nil); err != nil { - if err := client.Disconnect(context.TODO()); err != nil { + if err := client.Ping(ctx, nil); err != nil { + if err := client.Disconnect(ctx); err != nil { return nil, errors.Wrap(err, "disconnect client") } @@ -69,15 +71,15 @@ func Dial(conf *Config) (*mgo.Client, error) { SetServerSelectionTimeout(10 * time.Second). SetDirect(true) - client, err = mgo.Connect(context.TODO(), opts) + client, err = mgo.Connect(ctx, opts) if err != nil { return nil, errors.Wrap(err, "connect to mongo replica set with direct") } - if err := client.Ping(context.TODO(), nil); err != nil { + if err := client.Ping(ctx, nil); err != nil { return nil, errors.Wrap(err, "ping mongo") } } - return client, nil + return mongo.ToInterface(client), nil } diff --git a/pkg/apis/psmdb/v1/psmdb_defaults.go b/pkg/apis/psmdb/v1/psmdb_defaults.go index 73ff776925..22718c8263 100644 --- a/pkg/apis/psmdb/v1/psmdb_defaults.go +++ b/pkg/apis/psmdb/v1/psmdb_defaults.go @@ -134,9 +134,7 @@ func (cr *PerconaServerMongoDB) CheckNSetDefaults(platform version.Platform, log return errors.New("mongos should be specified") } - if cr.Spec.Pause { - cr.Spec.Sharding.Mongos.Size = 0 - } else { + if !cr.Spec.Pause && cr.DeletionTimestamp == nil { if !cr.Spec.UnsafeConf && cr.Spec.Sharding.Mongos.Size < minSafeMongosSize { log.Info("Safe config set, updating mongos size", "oldSize", cr.Spec.Sharding.Mongos.Size, "newSize", minSafeMongosSize) @@ -443,20 +441,6 @@ func (cr *PerconaServerMongoDB) CheckNSetDefaults(platform version.Platform, log if err := replset.NonVoting.SetDefaults(cr, replset); err != nil { return errors.Wrap(err, "set nonvoting defaults") } - - if cr.Spec.Pause { - if cr.Status.State == AppStateStopping { - log.Info("Pausing cluster", "replset", replset.Name, "oldSize", replset.Size, "newSize", 0) - } - replset.Size = 0 - replset.Arbiter.Enabled = false - replset.NonVoting.Enabled = false - } - } - - // there is shouldn't be any backups while pause - if cr.Spec.Pause { - cr.Spec.Backup.Enabled = false } if cr.Spec.Backup.Enabled { @@ -564,7 +548,7 @@ func (rs *ReplsetSpec) SetDefaults(platform version.Platform, cr *PerconaServerM rs.Arbiter.MultiAZ.reconcileOpts(cr) } - if !cr.Spec.UnsafeConf && cr.DeletionTimestamp == nil { + if !cr.Spec.UnsafeConf && (cr.DeletionTimestamp == nil && !cr.Spec.Pause) { rs.setSafeDefaults(log) } diff --git a/pkg/controller/perconaservermongodb/backup.go b/pkg/controller/perconaservermongodb/backup.go index 72a5a16166..f6f0553faa 100644 --- a/pkg/controller/perconaservermongodb/backup.go +++ b/pkg/controller/perconaservermongodb/backup.go @@ -11,7 +11,7 @@ import ( "github.com/robfig/cron/v3" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" - batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -101,7 +101,7 @@ func (r *ReconcilePerconaServerMongoDB) deleteOldBackupTasks(ctx context.Context if cr.CompareVersion("1.13.0") < 0 { ls := backup.NewBackupCronJobLabels(cr.Name, cr.Spec.Backup.Labels) - tasksList := &batchv1beta1.CronJobList{} + tasksList := &batchv1.CronJobList{} err := r.client.List(ctx, tasksList, &client.ListOptions{ @@ -357,7 +357,7 @@ func (r *ReconcilePerconaServerMongoDB) updatePITR(ctx context.Context, cr *api. } } - val, err := pbm.C.GetConfigVar("pitr.enabled") + val, err := pbm.GetConfigVar("pitr.enabled") if err != nil { if !errors.Is(err, mongo.ErrNoDocuments) { return errors.Wrap(err, "get pitr.enabled") @@ -374,7 +374,7 @@ func (r *ReconcilePerconaServerMongoDB) updatePITR(ctx context.Context, cr *api. if enabled != cr.Spec.Backup.PITR.Enabled { val := strconv.FormatBool(cr.Spec.Backup.PITR.Enabled) log.Info("Setting pitr.enabled in PBM config", "enabled", val) - if err := pbm.C.SetConfigVar("pitr.enabled", val); err != nil { + if err := pbm.SetConfigVar("pitr.enabled", val); err != nil { return errors.Wrap(err, "update pitr.enabled") } } @@ -383,7 +383,7 @@ func (r *ReconcilePerconaServerMongoDB) updatePITR(ctx context.Context, cr *api. return nil } - val, err = pbm.C.GetConfigVar("pitr.oplogSpanMin") + val, err = pbm.GetConfigVar("pitr.oplogSpanMin") if err != nil { if !errors.Is(err, mongo.ErrNoDocuments) { return errors.Wrap(err, "get pitr.oplogSpanMin") @@ -399,12 +399,12 @@ func (r *ReconcilePerconaServerMongoDB) updatePITR(ctx context.Context, cr *api. if oplogSpanMin != cr.Spec.Backup.PITR.OplogSpanMin.Float64() { val := cr.Spec.Backup.PITR.OplogSpanMin.String() - if err := pbm.C.SetConfigVar("pitr.oplogSpanMin", val); err != nil { + if err := pbm.SetConfigVar("pitr.oplogSpanMin", val); err != nil { return errors.Wrap(err, "update pitr.oplogSpanMin") } } - val, err = pbm.C.GetConfigVar("pitr.compression") + val, err = pbm.GetConfigVar("pitr.compression") var compression = "" if err != nil { if errors.Is(err, mongo.ErrNoDocuments) { @@ -421,23 +421,23 @@ func (r *ReconcilePerconaServerMongoDB) updatePITR(ctx context.Context, cr *api. if compression != string(cr.Spec.Backup.PITR.CompressionType) { if string(cr.Spec.Backup.PITR.CompressionType) == "" { - if err := pbm.C.DeleteConfigVar("pitr.compression"); err != nil { + if err := pbm.DeleteConfigVar("pitr.compression"); err != nil { return errors.Wrap(err, "delete pitr.compression") } - } else if err := pbm.C.SetConfigVar("pitr.compression", string(cr.Spec.Backup.PITR.CompressionType)); err != nil { + } else if err := pbm.SetConfigVar("pitr.compression", string(cr.Spec.Backup.PITR.CompressionType)); err != nil { return errors.Wrap(err, "update pitr.compression") } // PBM needs to disabling and enabling PITR to change compression type - if err := pbm.C.SetConfigVar("pitr.enabled", "false"); err != nil { + if err := pbm.SetConfigVar("pitr.enabled", "false"); err != nil { return errors.Wrap(err, "disable pitr") } - if err := pbm.C.SetConfigVar("pitr.enabled", "true"); err != nil { + if err := pbm.SetConfigVar("pitr.enabled", "true"); err != nil { return errors.Wrap(err, "enable pitr") } } - val, err = pbm.C.GetConfigVar("pitr.compressionLevel") + val, err = pbm.GetConfigVar("pitr.compressionLevel") var compressionLevel *int = nil if err != nil { if errors.Is(err, mongo.ErrNoDocuments) { @@ -455,18 +455,18 @@ func (r *ReconcilePerconaServerMongoDB) updatePITR(ctx context.Context, cr *api. if !reflect.DeepEqual(compressionLevel, cr.Spec.Backup.PITR.CompressionLevel) { if cr.Spec.Backup.PITR.CompressionLevel == nil { - if err := pbm.C.DeleteConfigVar("pitr.compressionLevel"); err != nil { + if err := pbm.DeleteConfigVar("pitr.compressionLevel"); err != nil { return errors.Wrap(err, "delete pitr.compressionLevel") } - } else if err := pbm.C.SetConfigVar("pitr.compressionLevel", strconv.FormatInt(int64(*cr.Spec.Backup.PITR.CompressionLevel), 10)); err != nil { + } else if err := pbm.SetConfigVar("pitr.compressionLevel", strconv.FormatInt(int64(*cr.Spec.Backup.PITR.CompressionLevel), 10)); err != nil { return errors.Wrap(err, "update pitr.compressionLevel") } // PBM needs to disabling and enabling PITR to change compression level - if err := pbm.C.SetConfigVar("pitr.enabled", "false"); err != nil { + if err := pbm.SetConfigVar("pitr.enabled", "false"); err != nil { return errors.Wrap(err, "disable pitr") } - if err := pbm.C.SetConfigVar("pitr.enabled", "true"); err != nil { + if err := pbm.SetConfigVar("pitr.enabled", "true"); err != nil { return errors.Wrap(err, "enable pitr") } } diff --git a/pkg/controller/perconaservermongodb/balancer.go b/pkg/controller/perconaservermongodb/balancer.go index 21480d2633..3260cffae8 100644 --- a/pkg/controller/perconaservermongodb/balancer.go +++ b/pkg/controller/perconaservermongodb/balancer.go @@ -5,7 +5,6 @@ import ( "time" "github.com/percona/percona-server-mongodb-operator/pkg/psmdb" - "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" @@ -18,7 +17,7 @@ import ( func (r *ReconcilePerconaServerMongoDB) enableBalancerIfNeeded(ctx context.Context, cr *api.PerconaServerMongoDB) error { log := logf.FromContext(ctx) - if !cr.Spec.Sharding.Enabled || cr.Spec.Sharding.Mongos.Size == 0 || cr.Spec.Unmanaged { + if !cr.Spec.Sharding.Enabled || cr.Spec.Sharding.Mongos.Size == 0 || cr.Spec.Unmanaged || cr.DeletionTimestamp != nil || cr.Spec.Pause { return nil } @@ -87,13 +86,13 @@ func (r *ReconcilePerconaServerMongoDB) enableBalancerIfNeeded(ctx context.Conte } }() - run, err := mongo.IsBalancerRunning(ctx, mongosSession) + run, err := mongosSession.IsBalancerRunning(ctx) if err != nil { return errors.Wrap(err, "failed to check if balancer running") } if !run { - err := mongo.StartBalancer(ctx, mongosSession) + err := mongosSession.StartBalancer(ctx) if err != nil { return errors.Wrap(err, "failed to start balancer") } @@ -133,13 +132,13 @@ func (r *ReconcilePerconaServerMongoDB) disableBalancer(ctx context.Context, cr } }() - run, err := mongo.IsBalancerRunning(ctx, mongosSession) + run, err := mongosSession.IsBalancerRunning(ctx) if err != nil { return errors.Wrap(err, "failed to check if balancer running") } if run { - err := mongo.StopBalancer(ctx, mongosSession) + err := mongosSession.StopBalancer(ctx) if err != nil { return errors.Wrap(err, "failed to stop balancer") } diff --git a/pkg/controller/perconaservermongodb/connections.go b/pkg/controller/perconaservermongodb/connections.go index 9ceb62f7bf..3ab7f4eb5f 100644 --- a/pkg/controller/perconaservermongodb/connections.go +++ b/pkg/controller/perconaservermongodb/connections.go @@ -4,37 +4,66 @@ import ( "context" "github.com/pkg/errors" - mgo "go.mongodb.org/mongo-driver/mongo" + "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" "github.com/percona/percona-server-mongodb-operator/pkg/psmdb" + "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" ) -func (r *ReconcilePerconaServerMongoDB) mongoClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, rs api.ReplsetSpec, - role UserRole) (*mgo.Client, error) { +type MongoClientProvider interface { + Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs api.ReplsetSpec, role UserRole) (mongo.Client, error) + Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole) (mongo.Client, error) + Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole, host string) (mongo.Client, error) +} + +func (r *ReconcilePerconaServerMongoDB) MongoClientProvider() MongoClientProvider { + if r.mongoClientProvider == nil { + return &mongoClientProvider{r.client} + } + return r.mongoClientProvider +} + +type mongoClientProvider struct { + k8sclient client.Client +} - c, err := r.getInternalCredentials(ctx, cr, role) +func (p *mongoClientProvider) Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs api.ReplsetSpec, role UserRole) (mongo.Client, error) { + c, err := getInternalCredentials(ctx, p.k8sclient, cr, role) if err != nil { return nil, errors.Wrap(err, "failed to get credentials") } - return psmdb.MongoClient(ctx, r.client, cr, rs, c) + return psmdb.MongoClient(ctx, p.k8sclient, cr, rs, c) } -func (r *ReconcilePerconaServerMongoDB) mongosClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole) (*mgo.Client, error) { - c, err := r.getInternalCredentials(ctx, cr, role) +func (p *mongoClientProvider) Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole) (mongo.Client, error) { + c, err := getInternalCredentials(ctx, p.k8sclient, cr, role) if err != nil { return nil, errors.Wrap(err, "failed to get credentials") } - return psmdb.MongosClient(ctx, r.client, cr, c) + return psmdb.MongosClient(ctx, p.k8sclient, cr, c) } -func (r *ReconcilePerconaServerMongoDB) standaloneClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole, host string) (*mgo.Client, error) { - c, err := r.getInternalCredentials(ctx, cr, role) +func (p *mongoClientProvider) Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole, host string) (mongo.Client, error) { + c, err := getInternalCredentials(ctx, p.k8sclient, cr, role) if err != nil { return nil, errors.Wrap(err, "failed to get credentials") } - return psmdb.StandaloneClient(ctx, r.client, cr, c, host) + return psmdb.StandaloneClient(ctx, p.k8sclient, cr, c, host) +} + +func (r *ReconcilePerconaServerMongoDB) mongoClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, rs api.ReplsetSpec, + role UserRole) (mongo.Client, error) { + return r.MongoClientProvider().Mongo(ctx, cr, rs, role) +} + +func (r *ReconcilePerconaServerMongoDB) mongosClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole) (mongo.Client, error) { + return r.MongoClientProvider().Mongos(ctx, cr, role) +} + +func (r *ReconcilePerconaServerMongoDB) standaloneClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole, host string) (mongo.Client, error) { + return r.MongoClientProvider().Standalone(ctx, cr, role, host) } diff --git a/pkg/controller/perconaservermongodb/connections_test.go b/pkg/controller/perconaservermongodb/connections_test.go new file mode 100644 index 0000000000..b2e87ff99d --- /dev/null +++ b/pkg/controller/perconaservermongodb/connections_test.go @@ -0,0 +1,518 @@ +package perconaservermongodb + +import ( + "context" + "fmt" + "io" + "testing" + "time" + + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" + "github.com/percona/percona-server-mongodb-operator/pkg/psmdb" + "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" + mongoFake "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo/fake" + "github.com/percona/percona-server-mongodb-operator/version" +) + +// TestConnectionLeaks aims to cover every initialization of a connection to the MongoDB database. +// Whenever we establish a connection to the MongoDB database, the "connectionCount" variable increments, +// and every time we call `Disconnect`, this variable decrements. If at the end of each reconciliation, +// this variable is not 0, it indicates that we have not closed the connection somewhere. +func TestConnectionLeaks(t *testing.T) { + logf.SetLogger(zap.New(zap.WriteTo(io.Discard))) + ctx := context.Background() + + q, err := resource.ParseQuantity("1Gi") + if err != nil { + t.Fatal(err) + } + volumeSpec := &api.VolumeSpec{ + PersistentVolumeClaim: api.PVCSpec{ + PersistentVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: q, + }, + }, + }, + }, + } + cr := &api.PerconaServerMongoDB{ + ObjectMeta: metav1.ObjectMeta{ + Name: "psmdb-mock", + Namespace: "psmdb", + Generation: 1, + }, + Spec: api.PerconaServerMongoDBSpec{ + Backup: api.BackupSpec{ + Enabled: false, + }, + CRVersion: version.Version, + Image: "percona/percona-server-mongodb:latest", + Replsets: []*api.ReplsetSpec{ + { + Name: "rs0", + Size: 3, + VolumeSpec: volumeSpec, + }, + }, + UpdateStrategy: api.SmartUpdateStatefulSetStrategyType, + UpgradeOptions: api.UpgradeOptions{ + SetFCV: true, + }, + Sharding: api.Sharding{Enabled: false}, + }, + Status: api.PerconaServerMongoDBStatus{ + MongoVersion: "4.2", + ObservedGeneration: 1, + State: api.AppStateReady, + MongoImage: "percona/percona-server-mongodb:4.0", + }, + } + + tests := []struct { + name string + cr *api.PerconaServerMongoDB + }{ + { + name: "not sharded", + cr: cr.DeepCopy(), + }, + { + name: "not sharded unmanaged", + cr: updateResource(cr.DeepCopy(), func(cr *api.PerconaServerMongoDB) { + cr.Spec.Unmanaged = true + cr.Spec.UpdateStrategy = appsv1.RollingUpdateStatefulSetStrategyType + }), + }, + { + name: "sharded", + cr: updateResource(cr.DeepCopy(), func(cr *api.PerconaServerMongoDB) { + cr.Spec.Sharding.Enabled = true + cr.Spec.Sharding.ConfigsvrReplSet = &api.ReplsetSpec{ + Size: 3, + VolumeSpec: volumeSpec, + } + cr.Spec.Sharding.Mongos = &api.MongosSpec{ + Size: 3, + } + }), + }, + { + name: "sharded unmanaged", + cr: updateResource(cr.DeepCopy(), func(cr *api.PerconaServerMongoDB) { + cr.Spec.Unmanaged = true + cr.Spec.UpdateStrategy = appsv1.RollingUpdateStatefulSetStrategyType + cr.Spec.Sharding.Enabled = true + cr.Spec.Sharding.ConfigsvrReplSet = &api.ReplsetSpec{ + Size: 3, + VolumeSpec: volumeSpec, + } + cr.Spec.Sharding.Mongos = &api.MongosSpec{ + Size: 3, + } + }), + }, + } + for _, tt := range tests { + cr := tt.cr + t.Run(tt.name, func(t *testing.T) { + updatedRevision := "some-revision" + + obj := []client.Object{} + obj = append(obj, cr, + fakeStatefulset(cr, cr.Spec.Replsets[0].Name, cr.Spec.Replsets[0].Size, updatedRevision), + fakeStatefulset(cr, "deleted-sts", 0, ""), + ) + + rsPods := fakePodsForRS(cr, cr.Spec.Replsets[0]) + allPods := append([]client.Object{}, rsPods...) + + if cr.Spec.Sharding.Enabled { + sts := psmdb.MongosStatefulset(cr) + sts.Spec = psmdb.MongosStatefulsetSpec(cr, corev1.PodTemplateSpec{}) + obj = append(obj, sts) + + allPods = append(allPods, fakePodsForMongos(cr)...) + + cr := cr.DeepCopy() + if err := cr.CheckNSetDefaults(version.PlatformKubernetes, logf.FromContext(ctx)); err != nil { + t.Fatal(err) + } + obj = append(obj, fakeStatefulset(cr, cr.Spec.Sharding.ConfigsvrReplSet.Name, cr.Spec.Sharding.ConfigsvrReplSet.Size, updatedRevision)) + allPods = append(allPods, fakePodsForRS(cr, cr.Spec.Sharding.ConfigsvrReplSet)...) + } + + obj = append(obj, allPods...) + + if cr.Spec.Unmanaged { + cr := cr.DeepCopy() + if err := cr.CheckNSetDefaults(version.PlatformKubernetes, logf.FromContext(ctx)); err != nil { + t.Fatal(err) + } + obj = append(obj, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: cr.Spec.Secrets.Users, + Namespace: cr.Namespace, + }, + }) + } + + connectionCount := new(int) + + r := buildFakeClient(obj...) + r.mongoClientProvider = &fakeMongoClientProvider{pods: rsPods, cr: cr, connectionCount: connectionCount} + r.serverVersion = &version.ServerVersion{Platform: version.PlatformKubernetes} + + g, gCtx := errgroup.WithContext(ctx) + gCtx, cancel := context.WithCancel(gCtx) + g.Go(func() error { + return updatePodsForSmartUpdate(gCtx, r.client, cr, allPods, updatedRevision) + }) + g.Go(func() error { + defer cancel() + _, err := r.Reconcile(gCtx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cr.Namespace, + Name: cr.Name, + }, + }) + if *connectionCount != 0 { + return errors.Errorf("open connections: %d", *connectionCount) + } + if err != nil { + return err + } + // smart update sets status to initializing + // we need second reconcile to update status to ready + _, err = r.Reconcile(gCtx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cr.Namespace, + Name: cr.Name, + }, + }) + if *connectionCount != 0 { + return errors.Errorf("open connections: %d", *connectionCount) + } + if err != nil { + return err + } + + if err := updateUsersSecret(ctx, r.client, cr); err != nil { + return err + } + + // and third reconcile to have cr with ready status from the start + _, err = r.Reconcile(gCtx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cr.Namespace, + Name: cr.Name, + }, + }) + if *connectionCount != 0 { + return errors.Errorf("open connections: %d", *connectionCount) + } + return err + }, + ) + if err := g.Wait(); err != nil { + t.Fatal(err) + } + }) + } + +} + +func updateResource[T any](resource T, update func(T)) T { + update(resource) + return resource +} + +func updateUsersSecret(ctx context.Context, cl client.Client, cr *api.PerconaServerMongoDB) error { + cr = cr.DeepCopy() + if err := cr.CheckNSetDefaults(version.PlatformKubernetes, logf.FromContext(ctx)); err != nil { + return err + } + secret := corev1.Secret{} + err := cl.Get(ctx, + types.NamespacedName{ + Namespace: cr.Namespace, + Name: cr.Spec.Secrets.Users, + }, + &secret, + ) + if err != nil { + return err + } + if secret.Data == nil { + secret.Data = make(map[string][]byte) + } + secret.Data["MONGODB_CLUSTER_ADMIN_PASSWORD"] = []byte("new-password") + return cl.Update(ctx, &secret) +} + +func updatePodsForSmartUpdate(ctx context.Context, cl client.Client, cr *api.PerconaServerMongoDB, pods []client.Object, updatedRevision string) error { + for { + select { + case <-ctx.Done(): + return nil + default: + } + time.Sleep(time.Second) + + podList := corev1.PodList{} + err := cl.List(ctx, + &podList, + &client.ListOptions{ + Namespace: cr.Namespace, + }, + ) + if err != nil { + return err + } + if len(podList.Items) < len(pods) { + podNames := make(map[string]struct{}, len(pods)) + for _, pod := range podList.Items { + podNames[pod.GetName()] = struct{}{} + } + for _, pod := range pods { + if _, ok := podNames[pod.GetName()]; !ok { + pod := pod.DeepCopyObject().(*corev1.Pod) + pod.Labels["controller-revision-hash"] = updatedRevision + pod.ResourceVersion = "" + if err := cl.Create(ctx, pod); err != nil { + return err + } + } + } + } + } +} + +func fakePodsForRS(cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec) []client.Object { + pods := []client.Object{} + ls := psmdb.RSLabels(cr, rs.Name) + + ls["app.kubernetes.io/component"] = "mongod" + if rs.Name == api.ConfigReplSetName { + ls["app.kubernetes.io/component"] = api.ConfigReplSetName + } + for i := 0; i < int(rs.Size); i++ { + pods = append(pods, fakePod(fmt.Sprintf("%s-%s-%d", cr.Name, rs.Name, i), cr.Namespace, ls, "mongod")) + } + return pods +} + +func fakePodsForMongos(cr *api.PerconaServerMongoDB) []client.Object { + pods := []client.Object{} + ls := psmdb.MongosLabels(cr) + ms := cr.Spec.Sharding.Mongos + for i := 0; i < int(ms.Size); i++ { + pods = append(pods, fakePod(fmt.Sprintf("%s-%s-%d", cr.Name, "mongos", i), cr.Namespace, ls, "mongos")) + } + return pods +} + +func fakePod(name, namespace string, ls map[string]string, containerName string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: ls, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: containerName, + Ready: true, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Now(), + }, + }, + }, + }, + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } +} + +func fakeStatefulset(cr *api.PerconaServerMongoDB, rsName string, size int32, updateRevision string) client.Object { + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", cr.Name, rsName), + Namespace: cr.Namespace, + Labels: psmdb.RSLabels(cr, rsName), + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &size, + }, + Status: appsv1.StatefulSetStatus{ + UpdateRevision: updateRevision, + }, + } +} + +type fakeMongoClientProvider struct { + pods []client.Object + cr *api.PerconaServerMongoDB + connectionCount *int +} + +func (g *fakeMongoClientProvider) Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs api.ReplsetSpec, role UserRole) (mongo.Client, error) { + *g.connectionCount++ + + fakeClient := mongoFake.NewClient() + return &fakeMongoClient{pods: g.pods, cr: g.cr, connectionCount: g.connectionCount, Client: fakeClient}, nil +} +func (g *fakeMongoClientProvider) Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole) (mongo.Client, error) { + *g.connectionCount++ + + fakeClient := mongoFake.NewClient() + return &fakeMongoClient{pods: g.pods, cr: g.cr, connectionCount: g.connectionCount, Client: fakeClient}, nil +} +func (g *fakeMongoClientProvider) Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole, host string) (mongo.Client, error) { + *g.connectionCount++ + + fakeClient := mongoFake.NewClient() + return &fakeMongoClient{pods: g.pods, cr: g.cr, connectionCount: g.connectionCount, Client: fakeClient}, nil +} + +type fakeMongoClient struct { + pods []client.Object + cr *api.PerconaServerMongoDB + connectionCount *int + mongo.Client +} + +func (c *fakeMongoClient) Disconnect(ctx context.Context) error { + *c.connectionCount-- + return nil +} + +func (c *fakeMongoClient) GetFCV(ctx context.Context) (string, error) { + return "4.0", nil +} + +func (c *fakeMongoClient) GetRole(ctx context.Context, role string) (*mongo.Role, error) { + return &mongo.Role{ + Role: string(roleClusterAdmin), + }, nil +} + +func (c *fakeMongoClient) GetUserInfo(ctx context.Context, username string) (*mongo.User, error) { + return &mongo.User{ + Roles: []map[string]interface{}{}, + }, nil +} + +func (c *fakeMongoClient) RSBuildInfo(ctx context.Context) (mongo.BuildInfo, error) { + return mongo.BuildInfo{ + Version: "4.2", + OKResponse: mongo.OKResponse{ + OK: 1, + }, + }, nil +} + +func (c *fakeMongoClient) RSStatus(ctx context.Context) (mongo.Status, error) { + log := logf.FromContext(ctx) + cr := c.cr.DeepCopy() + if err := cr.CheckNSetDefaults(version.PlatformKubernetes, log); err != nil { + return mongo.Status{}, err + } + members := []*mongo.Member{} + for key, pod := range c.pods { + host := psmdb.GetAddr(cr, pod.GetName(), cr.Spec.Replsets[0].Name) + state := mongo.MemberStateSecondary + if key == 0 { + state = mongo.MemberStatePrimary + } + member := mongo.Member{ + Id: key, + Name: host, + State: state, + } + members = append(members, &member) + } + return mongo.Status{ + Members: members, + OKResponse: mongo.OKResponse{ + OK: 1, + }, + }, nil +} + +func (c *fakeMongoClient) ReadConfig(ctx context.Context) (mongo.RSConfig, error) { + log := logf.FromContext(ctx) + cr := c.cr.DeepCopy() + if err := cr.CheckNSetDefaults(version.PlatformKubernetes, log); err != nil { + return mongo.RSConfig{}, err + } + members := []mongo.ConfigMember{} + for key, pod := range c.pods { + host := psmdb.GetAddr(cr, pod.GetName(), cr.Spec.Replsets[0].Name) + + member := mongo.ConfigMember{ + ID: key, + Host: host, + BuildIndexes: true, + Priority: mongo.DefaultPriority, + Votes: mongo.DefaultVotes, + } + + member.Tags = mongo.ReplsetTags{ + "podName": pod.GetName(), + "serviceName": cr.Name, + } + + members = append(members, member) + } + return mongo.RSConfig{ + Members: members, + }, nil +} + +func (c *fakeMongoClient) RemoveShard(ctx context.Context, shard string) (mongo.ShardRemoveResp, error) { + return mongo.ShardRemoveResp{ + State: mongo.ShardRemoveCompleted, + OKResponse: mongo.OKResponse{ + OK: 1, + }, + }, nil + +} + +func (c *fakeMongoClient) IsBalancerRunning(ctx context.Context) (bool, error) { + return true, nil +} + +func (c *fakeMongoClient) ListShard(ctx context.Context) (mongo.ShardList, error) { + return mongo.ShardList{ + OKResponse: mongo.OKResponse{ + OK: 1, + }, + }, nil +} diff --git a/pkg/controller/perconaservermongodb/fcv.go b/pkg/controller/perconaservermongodb/fcv.go index 30dcfbcc84..b20aca899d 100644 --- a/pkg/controller/perconaservermongodb/fcv.go +++ b/pkg/controller/perconaservermongodb/fcv.go @@ -5,7 +5,6 @@ import ( v "github.com/hashicorp/go-version" "github.com/pkg/errors" - mgo "go.mongodb.org/mongo-driver/mongo" logf "sigs.k8s.io/controller-runtime/pkg/log" api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" @@ -24,7 +23,7 @@ func (r *ReconcilePerconaServerMongoDB) getFCV(ctx context.Context, cr *api.Perc } }() - return mongo.GetFCV(ctx, c) + return c.GetFCV(ctx) } func (r *ReconcilePerconaServerMongoDB) setFCV(ctx context.Context, cr *api.PerconaServerMongoDB, version string) error { @@ -37,7 +36,7 @@ func (r *ReconcilePerconaServerMongoDB) setFCV(ctx context.Context, cr *api.Perc return errors.Wrap(err, "failed to get go semver") } - var cli *mgo.Client + var cli mongo.Client var connErr error if cr.Spec.Sharding.Enabled { @@ -56,5 +55,5 @@ func (r *ReconcilePerconaServerMongoDB) setFCV(ctx context.Context, cr *api.Perc } }() - return mongo.SetFCV(ctx, cli, MajorMinor(v)) + return cli.SetFCV(ctx, MajorMinor(v)) } diff --git a/pkg/controller/perconaservermongodb/finalizers.go b/pkg/controller/perconaservermongodb/finalizers.go index c559c9500d..7e5cff84f6 100644 --- a/pkg/controller/perconaservermongodb/finalizers.go +++ b/pkg/controller/perconaservermongodb/finalizers.go @@ -4,6 +4,7 @@ import ( "context" "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -55,44 +56,106 @@ func (r *ReconcilePerconaServerMongoDB) checkFinalizers(ctx context.Context, cr } func (r *ReconcilePerconaServerMongoDB) deletePSMDBPods(ctx context.Context, cr *api.PerconaServerMongoDB) (err error) { - done := true - for _, rs := range cr.Spec.Replsets { - sts, err := r.getRsStatefulset(ctx, cr, rs.Name) - if err != nil { - if k8serrors.IsNotFound(err) { - continue + if cr.Spec.Sharding.Enabled { + cr.Spec.Sharding.Mongos.Size = 0 + + sts := new(appsv1.StatefulSet) + err := r.client.Get(ctx, cr.MongosNamespacedName(), sts) + if client.IgnoreNotFound(err) != nil { + return errors.Wrap(err, "failed to get mongos statefulset") + } + if sts.Spec.Replicas != nil && *sts.Spec.Replicas > 0 { + err = r.disableBalancer(ctx, cr) + if err != nil { + return errors.Wrap(err, "failed to disable balancer") } - return errors.Wrap(err, "get rs statefulset") } - - pods := &corev1.PodList{} - err = r.client.List(ctx, - pods, - &client.ListOptions{ - Namespace: cr.Namespace, - LabelSelector: labels.SelectorFromSet(sts.Spec.Selector.MatchLabels), - }, - ) + list, err := r.getMongosPods(ctx, cr) if err != nil { - if k8serrors.IsNotFound(err) { - continue - } - return errors.Wrap(err, "get rs statefulset") + return errors.Wrap(err, "get mongos pods") } - if len(pods.Items) > int(*sts.Spec.Replicas) { + if len(list.Items) != 0 { return errWaitingTermination } - if *sts.Spec.Replicas != 1 { - rs.Size = 1 - done = false + } + + replsetsDeleted := true + for _, rs := range cr.Spec.Replsets { + if err := r.deleteRSPods(ctx, cr, rs); err != nil { + if err == errWaitingTermination { + replsetsDeleted = false + continue + } + return err } } - if !done { + if !replsetsDeleted { return errWaitingTermination } + + if cr.Spec.Sharding.Enabled && cr.Spec.Sharding.ConfigsvrReplSet != nil { + if err := r.deleteRSPods(ctx, cr, cr.Spec.Sharding.ConfigsvrReplSet); err != nil { + return err + } + } return nil } +func (r *ReconcilePerconaServerMongoDB) deleteRSPods(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec) error { + sts, err := r.getRsStatefulset(ctx, cr, rs.Name) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return errors.Wrap(err, "get rs statefulset") + } + + pods := &corev1.PodList{} + err = r.client.List(ctx, + pods, + &client.ListOptions{ + Namespace: cr.Namespace, + LabelSelector: labels.SelectorFromSet(sts.Spec.Selector.MatchLabels), + }, + ) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return errors.Wrap(err, "get rs statefulset") + } + + rs.Size = 1 + + switch *sts.Spec.Replicas { + case 0: + rs.Size = 0 + if len(pods.Items) == 0 { + return nil + } + return errWaitingTermination + case 1: + // If there is one pod left, we should be sure that it's the primary + if len(pods.Items) != 1 { + return errWaitingTermination + } + + isPrimary, err := r.isPodPrimary(ctx, cr, pods.Items[0], rs) + if err != nil { + return errors.Wrap(err, "is pod primary") + } + if !isPrimary { + return errWaitingTermination + } + + // If true, we should resize the replset to 0 + rs.Size = 0 + return errWaitingTermination + default: + return errWaitingTermination + } +} + func (r *ReconcilePerconaServerMongoDB) deletePvcFinalizer(ctx context.Context, cr *api.PerconaServerMongoDB) error { err := r.deleteAllStatefulsets(ctx, cr) if err != nil { diff --git a/pkg/controller/perconaservermongodb/mgo.go b/pkg/controller/perconaservermongodb/mgo.go index 6e17238c88..b27e9194ba 100644 --- a/pkg/controller/perconaservermongodb/mgo.go +++ b/pkg/controller/perconaservermongodb/mgo.go @@ -10,7 +10,6 @@ import ( "time" "github.com/pkg/errors" - mgo "go.mongodb.org/mongo-driver/mongo" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -117,9 +116,14 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr return api.AppStateInit, nil } + defer func() { + if err := cli.Disconnect(ctx); err != nil { + log.Error(err, "failed to close connection") + } + }() if cr.Spec.Unmanaged { - status, err := mongo.RSStatus(ctx, cli) + status, err := cli.RSStatus(ctx) if err != nil { return api.AppStateError, errors.Wrap(err, "failed to get rs status") } @@ -133,12 +137,6 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr return api.AppStateInit, errors.Wrap(err, "create system users") } - defer func() { - if err := cli.Disconnect(ctx); err != nil { - log.Error(err, "failed to close connection") - } - }() - // this can happen if cluster is initialized but status update failed if !cr.Status.Replsets[replset.Name].Initialized { rs := cr.Status.Replsets[replset.Name] @@ -167,7 +165,7 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr cr.Status.Mongos != nil && cr.Status.Mongos.Status == api.AppStateReady && replset.ClusterRole == api.ClusterRoleShardSvr && - len(mongosPods) > 0 { + len(mongosPods) > 0 && cr.Spec.Sharding.Mongos.Size > 0 { mongosSession, err := r.mongosClientWithRole(ctx, cr, roleClusterAdmin) if err != nil { @@ -181,7 +179,7 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr } }() - err = mongo.SetDefaultRWConcern(ctx, mongosSession, mongo.DefaultReadConcern, mongo.DefaultWriteConcern) + err = mongosSession.SetDefaultRWConcern(ctx, mongo.DefaultReadConcern, mongo.DefaultWriteConcern) // SetDefaultRWConcern introduced in MongoDB 4.4 if err != nil && !strings.Contains(err.Error(), "CommandNotFound") { return api.AppStateError, errors.Wrap(err, "set default RW concern") @@ -194,7 +192,6 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr if !in { log.Info("adding rs to shard", "rs", replset.Name) - err := r.handleRsAddToShard(ctx, cr, replset, pods.Items[0], mongosPods[0]) if err != nil { return api.AppStateError, errors.Wrap(err, "add shard") @@ -211,7 +208,7 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr } if replset.Arbiter.Enabled && !cr.Spec.Sharding.Enabled { - err := mongo.SetDefaultRWConcern(ctx, cli, mongo.DefaultReadConcern, mongo.DefaultWriteConcern) + err := cli.SetDefaultRWConcern(ctx, mongo.DefaultReadConcern, mongo.DefaultWriteConcern) // SetDefaultRWConcern introduced in MongoDB 4.4 if err != nil && !strings.Contains(err.Error(), "CommandNotFound") { return api.AppStateError, errors.Wrap(err, "set default RW concern") @@ -230,7 +227,7 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr return api.AppStateInit, nil } -func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, cli *mgo.Client, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec) (int, error) { +func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, cli mongo.Client, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec) (int, error) { log := logf.FromContext(ctx) // Primary with a Secondary and an Arbiter (PSA) unsafePSA := cr.Spec.UnsafeConf && rs.Arbiter.Enabled && rs.Arbiter.Size == 1 && !rs.NonVoting.Enabled && rs.Size == 2 @@ -240,7 +237,7 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, return 0, errors.Wrap(err, "get rs pods") } - cnf, err := mongo.ReadConfig(ctx, cli) + cnf, err := cli.ReadConfig(ctx) if err != nil { return 0, errors.Wrap(err, "get mongo config") } @@ -316,7 +313,7 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, log.Info("Fixing member tags", "replset", rs.Name) - if err := mongo.WriteConfig(ctx, cli, cnf); err != nil { + if err := cli.WriteConfig(ctx, cnf); err != nil { return 0, errors.Wrap(err, "fix tags: write mongo config") } } @@ -326,7 +323,7 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, log.Info("Fixing member hosts", "replset", rs.Name) - err = mongo.WriteConfig(ctx, cli, cnf) + err = cli.WriteConfig(ctx, cnf) if err != nil { return 0, errors.Wrap(err, "fix hosts: write mongo config") } @@ -337,7 +334,7 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, log.Info("Removing old nodes", "replset", rs.Name) - err = mongo.WriteConfig(ctx, cli, cnf) + err = cli.WriteConfig(ctx, cnf) if err != nil { return 0, errors.Wrap(err, "delete: write mongo config") } @@ -348,7 +345,7 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, log.Info("Adding new nodes", "replset", rs.Name) - err = mongo.WriteConfig(ctx, cli, cnf) + err = cli.WriteConfig(ctx, cnf) if err != nil { return 0, errors.Wrap(err, "add new: write mongo config") } @@ -359,7 +356,7 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, log.Info("Updating external nodes", "replset", rs.Name) - err = mongo.WriteConfig(ctx, cli, cnf) + err = cli.WriteConfig(ctx, cnf) if err != nil { return 0, errors.Wrap(err, "update external nodes: write mongo config") } @@ -372,13 +369,13 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, log.Info("Configuring member votes and priorities", "replset", rs.Name) - err := mongo.WriteConfig(ctx, cli, cnf) + err := cli.WriteConfig(ctx, cnf) if err != nil { return 0, errors.Wrap(err, "set votes: write mongo config") } } - rsStatus, err := mongo.RSStatus(ctx, cli) + rsStatus, err := cli.RSStatus(ctx) if err != nil { return 0, errors.Wrap(err, "unable to get replset members") } @@ -414,8 +411,8 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, return membersLive, nil } -func inShard(ctx context.Context, client *mgo.Client, rsName string) (bool, error) { - shardList, err := mongo.ListShard(ctx, client) +func inShard(ctx context.Context, client mongo.Client, rsName string) (bool, error) { + shardList, err := client.ListShard(ctx) if err != nil { return false, errors.Wrap(err, "unable to get shard list") } @@ -460,7 +457,7 @@ func (r *ReconcilePerconaServerMongoDB) removeRSFromShard(ctx context.Context, c }() for { - resp, err := mongo.RemoveShard(ctx, cli, rsName) + resp, err := cli.RemoveShard(ctx, rsName) if err != nil { return errors.Wrap(err, "remove shard") } @@ -503,7 +500,7 @@ func (r *ReconcilePerconaServerMongoDB) handleRsAddToShard(ctx context.Context, } }() - err = mongo.AddShard(ctx, cli, replset.Name, host) + err = cli.AddShard(ctx, replset.Name, host) if err != nil { return errors.Wrap(err, "failed to add shard") } @@ -572,7 +569,7 @@ func (r *ReconcilePerconaServerMongoDB) handleReplsetInit(ctx context.Context, c time.Sleep(time.Second * 5) - userAdmin, err := r.getInternalCredentials(ctx, cr, roleUserAdmin) + userAdmin, err := getInternalCredentials(ctx, r.client, cr, roleUserAdmin) if err != nil { return errors.Wrap(err, "failed to get userAdmin credentials") } @@ -667,17 +664,17 @@ func comparePrivileges(x []mongo.RolePrivilege, y []mongo.RolePrivilege) bool { return true } -func (r *ReconcilePerconaServerMongoDB) createOrUpdateSystemRoles(ctx context.Context, cli *mgo.Client, role string, privileges []mongo.RolePrivilege) error { - roleInfo, err := mongo.GetRole(ctx, cli, role) +func (r *ReconcilePerconaServerMongoDB) createOrUpdateSystemRoles(ctx context.Context, cli mongo.Client, role string, privileges []mongo.RolePrivilege) error { + roleInfo, err := cli.GetRole(ctx, role) if err != nil { return errors.Wrap(err, "mongo get role") } if roleInfo == nil { - err = mongo.CreateRole(ctx, cli, role, privileges, []interface{}{}) + err = cli.CreateRole(ctx, role, privileges, []interface{}{}) return errors.Wrapf(err, "create role %s", role) } if !comparePrivileges(privileges, roleInfo.Privileges) { - err = mongo.UpdateRole(ctx, cli, role, privileges, []interface{}{}) + err = cli.UpdateRole(ctx, role, privileges, []interface{}{}) return errors.Wrapf(err, "update role") } return nil @@ -747,24 +744,24 @@ func (r *ReconcilePerconaServerMongoDB) createOrUpdateSystemUsers(ctx context.Co } for _, role := range users { - creds, err := r.getInternalCredentials(ctx, cr, role) + creds, err := getInternalCredentials(ctx, r.client, cr, role) if err != nil { log.Error(err, "failed to get credentials", "role", role) continue } - user, err := mongo.GetUserInfo(ctx, cli, creds.Username) + user, err := cli.GetUserInfo(ctx, creds.Username) if err != nil { return errors.Wrap(err, "get user info") } if user == nil { - err = mongo.CreateUser(ctx, cli, creds.Username, creds.Password, getRoles(cr, role)...) + err = cli.CreateUser(ctx, creds.Username, creds.Password, getRoles(cr, role)...) if err != nil { return errors.Wrapf(err, "failed to create user %s", role) } continue } if !compareRoles(user.Roles, getRoles(cr, role)) { - err = mongo.UpdateUserRoles(ctx, cli, creds.Username, getRoles(cr, role)) + err = cli.UpdateUserRoles(ctx, creds.Username, getRoles(cr, role)) if err != nil { return errors.Wrapf(err, "failed to create user %s", role) } @@ -773,42 +770,6 @@ func (r *ReconcilePerconaServerMongoDB) createOrUpdateSystemUsers(ctx context.Co return nil } -func (r *ReconcilePerconaServerMongoDB) recoverReplsetNoPrimary(ctx context.Context, cr *api.PerconaServerMongoDB, replset *api.ReplsetSpec, pod corev1.Pod) error { - host, err := psmdb.MongoHost(ctx, r.client, cr, replset.Name, replset.Expose.Enabled, pod) - if err != nil { - return errors.Wrapf(err, "get mongo hostname for pod/%s", pod.Name) - } - - cli, err := r.standaloneClientWithRole(ctx, cr, roleClusterAdmin, host) - if err != nil { - return errors.Wrap(err, "get standalone client") - } - - cnf, err := mongo.ReadConfig(ctx, cli) - if err != nil { - return errors.Wrap(err, "get mongo config") - } - - for i := 0; i < len(cnf.Members); i++ { - tags := []mongo.ConfigMember(cnf.Members)[i].Tags - podName, ok := tags["podName"] - if !ok { - continue - } - - []mongo.ConfigMember(cnf.Members)[i].Host = replset.PodFQDNWithPort(cr, podName) - } - - cnf.Version++ - logf.FromContext(ctx).Info("Writing replicaset config", "config", cnf) - - if err := mongo.WriteConfig(ctx, cli, cnf); err != nil { - return errors.Wrap(err, "write mongo config") - } - - return nil -} - func (r *ReconcilePerconaServerMongoDB) restoreInProgress(ctx context.Context, cr *api.PerconaServerMongoDB, replset *api.ReplsetSpec) (bool, error) { sts := appsv1.StatefulSet{} stsName := cr.Name + "-" + replset.Name diff --git a/pkg/controller/perconaservermongodb/psmdb_controller.go b/pkg/controller/perconaservermongodb/psmdb_controller.go index af3d90a7a1..8ddba74a05 100644 --- a/pkg/controller/perconaservermongodb/psmdb_controller.go +++ b/pkg/controller/perconaservermongodb/psmdb_controller.go @@ -39,7 +39,6 @@ import ( api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" "github.com/percona/percona-server-mongodb-operator/pkg/psmdb" "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/backup" - "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/secret" "github.com/percona/percona-server-mongodb-operator/pkg/util" "github.com/percona/percona-server-mongodb-operator/version" @@ -84,6 +83,7 @@ func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) { reconcileIn: time.Second * 5, crons: NewCronRegistry(), lockers: newLockStore(), + newPBM: backup.NewPBM, initImage: initImage, @@ -167,10 +167,13 @@ type ReconcilePerconaServerMongoDB struct { client client.Client scheme *runtime.Scheme - crons CronRegistry - clientcmd *clientcmd.Client - serverVersion *version.ServerVersion - reconcileIn time.Duration + crons CronRegistry + clientcmd *clientcmd.Client + serverVersion *version.ServerVersion + reconcileIn time.Duration + mongoClientProvider MongoClientProvider + + newPBM backup.NewPBMFunc initImage string @@ -271,6 +274,11 @@ func (r *ReconcilePerconaServerMongoDB) Reconcile(ctx context.Context, request r } } + err = r.reconcilePause(ctx, cr) + if err != nil { + return reconcile.Result{}, err + } + err = r.checkConfiguration(ctx, cr) if err != nil { return reconcile.Result{}, err @@ -550,6 +558,44 @@ func (r *ReconcilePerconaServerMongoDB) Reconcile(ctx context.Context, request r return rr, nil } +func (r *ReconcilePerconaServerMongoDB) reconcilePause(ctx context.Context, cr *api.PerconaServerMongoDB) error { + if !cr.Spec.Pause || cr.DeletionTimestamp != nil { + return nil + } + + log := logf.FromContext(ctx) + + backupRunning, err := r.isBackupRunning(ctx, cr) + if err != nil { + return errors.Wrap(err, "check if backup is running") + } + if backupRunning { + cr.Spec.Pause = false + if err := cr.CheckNSetDefaults(r.serverVersion.Platform, log); err != nil { + return errors.Wrap(err, "failed to set defaults") + } + log.Info("cluster will pause after all backups finished") + return nil + } + + for _, rs := range cr.Spec.Replsets { + if cr.Status.State == api.AppStateStopping { + log.Info("Pausing cluster", "replset", rs.Name) + } + rs.Arbiter.Enabled = false + rs.NonVoting.Enabled = false + } + + if err := r.deletePSMDBPods(ctx, cr); err != nil { + if err == errWaitingTermination { + log.Info("pausing cluster", "error", err.Error()) + return nil + } + return errors.Wrap(err, "delete psmdb pods") + } + return nil +} + func (r *ReconcilePerconaServerMongoDB) setCRVersion(ctx context.Context, cr *api.PerconaServerMongoDB) error { if len(cr.Spec.CRVersion) > 0 { return nil @@ -676,7 +722,7 @@ func (r *ReconcilePerconaServerMongoDB) checkIfPossibleToRemove(ctx context.Cont } }() - list, err := mongo.ListDBs(ctx, client) + list, err := client.ListDBs(ctx) if err != nil { log.Error(err, "failed to list databases", "rs", rsName) return errors.Wrapf(err, "failed to list databases for rs %s", rsName) @@ -864,29 +910,18 @@ func (r *ReconcilePerconaServerMongoDB) upgradeFCVIfNeeded(ctx context.Context, } func (r *ReconcilePerconaServerMongoDB) deleteMongos(ctx context.Context, cr *api.PerconaServerMongoDB) error { - svcList, err := psmdb.GetMongosServices(ctx, r.client, cr) - if err != nil { - return errors.Wrap(err, "failed to list mongos services") - } - var mongos client.Object if cr.CompareVersion("1.12.0") >= 0 { mongos = psmdb.MongosStatefulset(cr) } else { mongos = psmdb.MongosDeployment(cr) } - err = r.client.Delete(ctx, mongos) + + err := r.client.Delete(ctx, mongos) if err != nil && !k8serrors.IsNotFound(err) { return errors.Wrap(err, "failed to delete mongos statefulset") } - for _, svc := range svcList.Items { - err = r.client.Delete(ctx, &svc) - if err != nil && !k8serrors.IsNotFound(err) { - return errors.Wrap(err, "failed to delete mongos services") - } - } - return nil } @@ -904,6 +939,18 @@ func (r *ReconcilePerconaServerMongoDB) deleteMongosIfNeeded(ctx context.Context return nil } + ss, err := psmdb.GetMongosServices(ctx, r.client, cr) + if err != nil { + return errors.Wrap(err, "failed to list mongos services") + } + + for _, svc := range ss.Items { + err = r.client.Delete(ctx, &svc) + if err != nil && !k8serrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete mongos services") + } + } + return r.deleteMongos(ctx, cr) } diff --git a/pkg/controller/perconaservermongodb/secrets.go b/pkg/controller/perconaservermongodb/secrets.go index 5e3297fd13..e2ca420489 100644 --- a/pkg/controller/perconaservermongodb/secrets.go +++ b/pkg/controller/perconaservermongodb/secrets.go @@ -10,6 +10,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" "github.com/percona/percona-server-mongodb-operator/pkg/psmdb" @@ -42,19 +43,19 @@ const ( roleBackup UserRole = "backup" ) -func (r *ReconcilePerconaServerMongoDB) getUserSecret(ctx context.Context, cr *api.PerconaServerMongoDB, name string) (corev1.Secret, error) { +func getUserSecret(ctx context.Context, cl client.Reader, cr *api.PerconaServerMongoDB, name string) (corev1.Secret, error) { secrets := corev1.Secret{} - err := r.client.Get(ctx, types.NamespacedName{Name: name, Namespace: cr.Namespace}, &secrets) + err := cl.Get(ctx, types.NamespacedName{Name: name, Namespace: cr.Namespace}, &secrets) return secrets, errors.Wrap(err, "get user secrets") } -func (r *ReconcilePerconaServerMongoDB) getInternalCredentials(ctx context.Context, cr *api.PerconaServerMongoDB, role UserRole) (psmdb.Credentials, error) { - return r.getCredentials(ctx, cr, api.UserSecretName(cr), role) +func getInternalCredentials(ctx context.Context, cl client.Reader, cr *api.PerconaServerMongoDB, role UserRole) (psmdb.Credentials, error) { + return getCredentials(ctx, cl, cr, api.UserSecretName(cr), role) } -func (r *ReconcilePerconaServerMongoDB) getCredentials(ctx context.Context, cr *api.PerconaServerMongoDB, name string, role UserRole) (psmdb.Credentials, error) { +func getCredentials(ctx context.Context, cl client.Reader, cr *api.PerconaServerMongoDB, name string, role UserRole) (psmdb.Credentials, error) { creds := psmdb.Credentials{} - usersSecret, err := r.getUserSecret(ctx, cr, name) + usersSecret, err := getUserSecret(ctx, cl, cr, name) if err != nil { return creds, errors.Wrap(err, "failed to get user secret") } diff --git a/pkg/controller/perconaservermongodb/smart.go b/pkg/controller/perconaservermongodb/smart.go index b3018ff25d..17201cc803 100644 --- a/pkg/controller/perconaservermongodb/smart.go +++ b/pkg/controller/perconaservermongodb/smart.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "sort" - "strings" "time" "github.com/pkg/errors" @@ -19,7 +18,6 @@ import ( api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" "github.com/percona/percona-server-mongodb-operator/pkg/psmdb" "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/backup" - "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" ) func (r *ReconcilePerconaServerMongoDB) smartUpdate(ctx context.Context, cr *api.PerconaServerMongoDB, sfs *appsv1.StatefulSet, @@ -98,7 +96,7 @@ func (r *ReconcilePerconaServerMongoDB) smartUpdate(ctx context.Context, cr *api return nil } - hasActiveJobs, err := backup.HasActiveJobs(ctx, r.client, cr, backup.Job{}, backup.NotPITRLock) + hasActiveJobs, err := backup.HasActiveJobs(ctx, r.newPBM, r.client, cr, backup.Job{}, backup.NotPITRLock) if err != nil { return errors.Wrap(err, "failed to check active jobs") } @@ -116,24 +114,6 @@ func (r *ReconcilePerconaServerMongoDB) smartUpdate(ctx context.Context, cr *api } } - client, err := r.mongoClientWithRole(ctx, cr, *replset, roleClusterAdmin) - if err != nil { - return fmt.Errorf("failed to get mongo client: %v", err) - } - - defer func() { - err := client.Disconnect(ctx) - if err != nil { - log.Error(err, "failed to close connection") - } - }() - - primary, err := psmdb.GetPrimaryPod(ctx, client) - if err != nil { - return fmt.Errorf("get primary pod: %v", err) - } - log.Info("Got primary pod", "name", primary) - waitLimit := int(replset.LivenessProbe.InitialDelaySeconds) sort.Slice(list.Items, func(i, j int) bool { @@ -142,19 +122,11 @@ func (r *ReconcilePerconaServerMongoDB) smartUpdate(ctx context.Context, cr *api var primaryPod corev1.Pod for _, pod := range list.Items { - if replset.Expose.Enabled { - host, err := psmdb.MongoHost(ctx, r.client, cr, replset.Name, replset.Expose.Enabled, pod) - if err != nil { - return errors.Wrapf(err, "get mongo host for pod %s", pod.Name) - } - - if host == primary { - primaryPod = pod - continue - } + isPrimary, err := r.isPodPrimary(ctx, cr, pod, replset) + if err != nil { + return errors.Wrap(err, "is pod primary") } - - if strings.HasPrefix(primary, fmt.Sprintf("%s.%s.%s", pod.Name, sfs.Name, sfs.Namespace)) { + if isPrimary { primaryPod = pod continue } @@ -181,7 +153,19 @@ func (r *ReconcilePerconaServerMongoDB) smartUpdate(ctx context.Context, cr *api if sfs.Labels["app.kubernetes.io/component"] != "nonVoting" && len(primaryPod.Name) > 0 { forceStepDown := replset.Size == 1 log.Info("doing step down...", "force", forceStepDown) - err = mongo.StepDown(ctx, client, forceStepDown) + client, err := r.mongoClientWithRole(ctx, cr, *replset, roleClusterAdmin) + if err != nil { + return fmt.Errorf("failed to get mongo client: %v", err) + } + + defer func() { + err := client.Disconnect(ctx) + if err != nil { + log.Error(err, "failed to close connection") + } + }() + + err = client.StepDown(ctx, forceStepDown) if err != nil { return errors.Wrap(err, "failed to do step down") } @@ -197,6 +181,32 @@ func (r *ReconcilePerconaServerMongoDB) smartUpdate(ctx context.Context, cr *api return nil } +func (r *ReconcilePerconaServerMongoDB) isPodPrimary(ctx context.Context, cr *api.PerconaServerMongoDB, pod corev1.Pod, rs *api.ReplsetSpec) (bool, error) { + log := logf.FromContext(ctx) + + host, err := psmdb.MongoHost(ctx, r.client, cr, rs.Name, rs.Expose.Enabled, pod) + if err != nil { + return false, errors.Wrap(err, "failed to get mongo host") + } + mgoClient, err := r.standaloneClientWithRole(ctx, cr, roleClusterAdmin, host) + if err != nil { + return false, errors.Wrap(err, "failed to create standalone client") + } + defer func() { + err := mgoClient.Disconnect(ctx) + if err != nil { + log.Error(err, "failed to close connection") + } + }() + + isMaster, err := mgoClient.IsMaster(ctx) + if err != nil { + return false, errors.Wrap(err, "is master") + } + + return isMaster.IsMaster, nil +} + func (r *ReconcilePerconaServerMongoDB) smartMongosUpdate(ctx context.Context, cr *api.PerconaServerMongoDB, sts *appsv1.StatefulSet) error { log := logf.FromContext(ctx) @@ -229,7 +239,7 @@ func (r *ReconcilePerconaServerMongoDB) smartMongosUpdate(ctx context.Context, c return nil } - hasActiveJobs, err := backup.HasActiveJobs(ctx, r.client, cr, backup.Job{}, backup.NotPITRLock) + hasActiveJobs, err := backup.HasActiveJobs(ctx, r.newPBM, r.client, cr, backup.Job{}, backup.NotPITRLock) if err != nil { return errors.Wrap(err, "failed to check active jobs") } diff --git a/pkg/controller/perconaservermongodb/status.go b/pkg/controller/perconaservermongodb/status.go index 7f3a326647..d8e3337406 100644 --- a/pkg/controller/perconaservermongodb/status.go +++ b/pkg/controller/perconaservermongodb/status.go @@ -204,6 +204,7 @@ func (r *ReconcilePerconaServerMongoDB) updateStatus(ctx context.Context, cr *ap } case !inProgress && replsetsReady == len(repls) && clusterState == api.AppStateReady && cr.Status.Host != "": state = api.AppStateReady + if cr.Spec.Sharding.Enabled && cr.Status.Mongos.Status != api.AppStateReady { state = cr.Status.Mongos.Status } @@ -307,15 +308,26 @@ func (r *ReconcilePerconaServerMongoDB) rsStatus(ctx context.Context, cr *api.Pe } func (r *ReconcilePerconaServerMongoDB) mongosStatus(ctx context.Context, cr *api.PerconaServerMongoDB) (api.MongosStatus, error) { + status := api.MongosStatus{ + Status: api.AppStateInit, + } + + sts := psmdb.MongosStatefulset(cr) + err := r.client.Get(ctx, types.NamespacedName{Name: sts.Name, Namespace: sts.Namespace}, sts) + if err != nil && k8serrors.IsNotFound(err) { + return status, nil + } + if err != nil && !k8serrors.IsNotFound(err) { + return api.MongosStatus{}, errors.Wrapf(err, "get statefulset %s", sts.Name) + } + list, err := r.getMongosPods(ctx, cr) if err != nil { + return api.MongosStatus{}, fmt.Errorf("get list: %v", err) } - status := api.MongosStatus{ - Size: len(list.Items), - Status: api.AppStateInit, - } + status.Size = len(list.Items) for _, pod := range list.Items { for _, cntr := range pod.Status.ContainerStatuses { @@ -344,7 +356,7 @@ func (r *ReconcilePerconaServerMongoDB) mongosStatus(ctx context.Context, cr *ap status.Status = api.AppStateStopping case cr.Spec.Pause: status.Status = api.AppStatePaused - case status.Size == status.Ready: + case status.Size > 0 && status.Size == status.Ready: status.Status = api.AppStateReady } diff --git a/pkg/controller/perconaservermongodb/status_test.go b/pkg/controller/perconaservermongodb/status_test.go index 9c690e79f7..eeba547941 100644 --- a/pkg/controller/perconaservermongodb/status_test.go +++ b/pkg/controller/perconaservermongodb/status_test.go @@ -4,24 +4,34 @@ import ( "context" "testing" - api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" - appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" // nolint + + api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" + fakeBackup "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/backup/fake" ) // creates a fake client to mock API calls with the mock objects -func buildFakeClient(objs []runtime.Object) *ReconcilePerconaServerMongoDB { +func buildFakeClient(objs ...client.Object) *ReconcilePerconaServerMongoDB { s := scheme.Scheme - s.AddKnownTypes(api.SchemeGroupVersion, &api.PerconaServerMongoDB{}) + s.AddKnownTypes(api.SchemeGroupVersion, new(api.PerconaServerMongoDB)) + s.AddKnownTypes(api.SchemeGroupVersion, new(api.PerconaServerMongoDBBackup)) + s.AddKnownTypes(api.SchemeGroupVersion, new(api.PerconaServerMongoDBBackupList)) + s.AddKnownTypes(api.SchemeGroupVersion, new(api.PerconaServerMongoDBRestore)) + s.AddKnownTypes(api.SchemeGroupVersion, new(api.PerconaServerMongoDBRestoreList)) - cl := fake.NewFakeClientWithScheme(s, objs...) + cl := fake.NewClientBuilder().WithScheme(s).WithObjects(objs...).WithStatusSubresource(objs...).Build() - return &ReconcilePerconaServerMongoDB{client: cl, scheme: s} + return &ReconcilePerconaServerMongoDB{ + client: cl, + scheme: s, + lockers: newLockStore(), + newPBM: fakeBackup.NewPBM, + } } func TestUpdateStatus(t *testing.T) { @@ -29,15 +39,15 @@ func TestUpdateStatus(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "psmdb-mock", Namespace: "psmdb"}, Spec: api.PerconaServerMongoDBSpec{ CRVersion: "1.12.0", - Replsets: []*api.ReplsetSpec{{Name: "rs0", Size: 3}, {Name: "rs1", Size: 3}}, - Sharding: api.Sharding{Enabled: true, Mongos: &api.MongosSpec{Size: 3}}, + Replsets: []*api.ReplsetSpec{{Name: "rs0", Size: 3}, {Name: "rs1", Size: 3}}, + Sharding: api.Sharding{Enabled: true, Mongos: &api.MongosSpec{Size: 3}}, }, } rs0 := &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Name: "psmdb-mock-rs0", Namespace: "psmdb"}} rs1 := &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Name: "psmdb-mock-rs1", Namespace: "psmdb"}} - r := buildFakeClient([]runtime.Object{cr, rs0, rs1}) + r := buildFakeClient(cr, rs0, rs1) if err := r.updateStatus(context.TODO(), cr, nil, api.AppStateInit); err != nil { t.Error(err) diff --git a/pkg/controller/perconaservermongodb/users.go b/pkg/controller/perconaservermongodb/users.go index 6b4e7acbc9..c410ba111a 100644 --- a/pkg/controller/perconaservermongodb/users.go +++ b/pkg/controller/perconaservermongodb/users.go @@ -8,7 +8,6 @@ import ( "fmt" "github.com/pkg/errors" - mongod "go.mongodb.org/mongo-driver/mongo" "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -327,13 +326,13 @@ func (r *ReconcilePerconaServerMongoDB) updateUsers(ctx context.Context, cr *api return grp.Wait() } -func (u *systemUser) updateMongo(ctx context.Context, c *mongod.Client) error { +func (u *systemUser) updateMongo(ctx context.Context, c mongo.Client) error { if bytes.Equal(u.currName, u.name) { - err := mongo.UpdateUserPass(ctx, c, string(u.name), string(u.pass)) + err := c.UpdateUserPass(ctx, string(u.name), string(u.pass)) return errors.Wrapf(err, "change password for user %s", u.name) } - err := mongo.UpdateUser(ctx, c, string(u.currName), string(u.name), string(u.pass)) + err := c.UpdateUser(ctx, string(u.currName), string(u.name), string(u.pass)) return errors.Wrapf(err, "update user %s -> %s", u.currName, u.name) } diff --git a/pkg/controller/perconaservermongodb/version.go b/pkg/controller/perconaservermongodb/version.go index 78534a45e9..2eafcb9c96 100644 --- a/pkg/controller/perconaservermongodb/version.go +++ b/pkg/controller/perconaservermongodb/version.go @@ -21,7 +21,6 @@ import ( api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" v1 "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" "github.com/percona/percona-server-mongodb-operator/pkg/k8s" - "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" ) func (r *ReconcilePerconaServerMongoDB) deleteEnsureVersion(cr *api.PerconaServerMongoDB, id int) { @@ -442,7 +441,7 @@ func (r *ReconcilePerconaServerMongoDB) fetchVersionFromMongo(ctx context.Contex } }() - info, err := mongo.RSBuildInfo(ctx, session) + info, err := session.RSBuildInfo(ctx) if err != nil { return errors.Wrap(err, "get build info") } diff --git a/pkg/controller/perconaservermongodbbackup/backup.go b/pkg/controller/perconaservermongodbbackup/backup.go index 6c19bc1531..1ece92dd61 100644 --- a/pkg/controller/perconaservermongodbbackup/backup.go +++ b/pkg/controller/perconaservermongodbbackup/backup.go @@ -21,7 +21,7 @@ const ( ) type Backup struct { - pbm *backup.PBM + pbm backup.PBM spec api.BackupSpec } @@ -59,7 +59,7 @@ func (b *Backup) Start(ctx context.Context, k8sclient client.Client, cluster *ap compLevel = &l } - err = b.pbm.C.SendCmd(pbm.Cmd{ + err = b.pbm.SendCmd(pbm.Cmd{ Cmd: pbm.CmdBackup, Backup: &pbm.BackupCmd{ Name: name, @@ -108,7 +108,7 @@ func (b *Backup) Start(ctx context.Context, k8sclient client.Client, cluster *ap func (b *Backup) Status(ctx context.Context, cr *api.PerconaServerMongoDBBackup) (api.PerconaServerMongoDBBackupStatus, error) { status := cr.Status - meta, err := b.pbm.C.GetBackupMeta(cr.Status.PBMname) + meta, err := b.pbm.GetBackupMeta(cr.Status.PBMname) if err != nil && !errors.Is(err, pbm.ErrNotFound) { return status, errors.Wrap(err, "get pbm backup meta") } diff --git a/pkg/controller/perconaservermongodbbackup/perconaservermongodbbackup_controller.go b/pkg/controller/perconaservermongodbbackup/perconaservermongodbbackup_controller.go index 36e8953c26..a8f7af8688 100644 --- a/pkg/controller/perconaservermongodbbackup/perconaservermongodbbackup_controller.go +++ b/pkg/controller/perconaservermongodbbackup/perconaservermongodbbackup_controller.go @@ -45,7 +45,11 @@ func Add(mgr manager.Manager) error { // newReconciler returns a new reconcile.Reconciler func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcilePerconaServerMongoDBBackup{client: mgr.GetClient(), scheme: mgr.GetScheme()} + return &ReconcilePerconaServerMongoDBBackup{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + newPBMFunc: backup.NewPBM, + } } // add adds a new Controller to mgr with r as the reconcile.Reconciler @@ -82,6 +86,8 @@ type ReconcilePerconaServerMongoDBBackup struct { // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme + + newPBMFunc backup.NewPBMFunc } // Reconcile reads that state of the cluster for a PerconaServerMongoDBBackup object and makes changes based on the state read @@ -207,7 +213,7 @@ func (r *ReconcilePerconaServerMongoDBBackup) reconcile( return status, errors.Wrap(err, "failed to run backup") } - cjobs, err := backup.HasActiveJobs(ctx, r.client, cluster, backup.NewBackupJob(cr.Name), backup.NotPITRLock) + cjobs, err := backup.HasActiveJobs(ctx, r.newPBMFunc, r.client, cluster, backup.NewBackupJob(cr.Name), backup.NotPITRLock) if err != nil { return status, errors.Wrap(err, "check for concurrent jobs") } @@ -340,7 +346,7 @@ func (r *ReconcilePerconaServerMongoDBBackup) deleteBackupFinalizer(ctx context. var err error if b.pbm != nil { - meta, err = b.pbm.C.GetBackupMeta(cr.Status.PBMname) + meta, err = b.pbm.GetBackupMeta(cr.Status.PBMname) if err != nil { if !errors.Is(err, pbm.ErrNotFound) { return errors.Wrap(err, "get backup meta") @@ -378,14 +384,14 @@ func (r *ReconcilePerconaServerMongoDBBackup) deleteBackupFinalizer(ctx context. if err != nil { return errors.Wrapf(err, "set backup config with storage %s", cr.Spec.StorageName) } - e := b.pbm.C.Logger().NewEvent(string(pbm.CmdDeleteBackup), "", "", primitive.Timestamp{}) + e := b.pbm.Logger().NewEvent(string(pbm.CmdDeleteBackup), "", "", primitive.Timestamp{}) // We should delete PITR oplog chunks until `LastWriteTS` of the backup, // as it's not possible to delete backup if it is a base for the PITR timeline err = r.deletePITR(ctx, b, meta.LastWriteTS, e) if err != nil { return errors.Wrap(err, "failed to delete PITR") } - err = b.pbm.C.DeleteBackup(cr.Status.PBMname, e) + err = b.pbm.DeleteBackup(cr.Status.PBMname, e) if err != nil { return errors.Wrap(err, "failed to delete backup") } @@ -396,12 +402,12 @@ func (r *ReconcilePerconaServerMongoDBBackup) deleteBackupFinalizer(ctx context. func (r *ReconcilePerconaServerMongoDBBackup) deletePITR(ctx context.Context, b *Backup, until primitive.Timestamp, e *pbmLog.Event) error { log := logf.FromContext(ctx) - stg, err := b.pbm.C.GetStorage(e) + stg, err := b.pbm.GetStorage(e) if err != nil { return errors.Wrap(err, "get storage") } - chunks, err := b.pbm.C.PITRGetChunksSlice("", primitive.Timestamp{}, until) + chunks, err := b.pbm.PITRGetChunksSlice("", primitive.Timestamp{}, until) if err != nil { return errors.Wrap(err, "get pitr chunks") } @@ -415,7 +421,7 @@ func (r *ReconcilePerconaServerMongoDBBackup) deletePITR(ctx context.Context, b return errors.Wrapf(err, "delete pitr chunk '%s' (%v) from storage", chnk.FName, chnk) } - _, err = b.pbm.C.Conn.Database(pbm.DB).Collection(pbm.PITRChunksCollection).DeleteOne( + _, err = b.pbm.Conn().Database(pbm.DB).Collection(pbm.PITRChunksCollection).DeleteOne( ctx, bson.D{ {Key: "rs", Value: chnk.RS}, diff --git a/pkg/controller/perconaservermongodbrestore/logical.go b/pkg/controller/perconaservermongodbrestore/logical.go index 464247276e..32664dcfe9 100644 --- a/pkg/controller/perconaservermongodbrestore/logical.go +++ b/pkg/controller/perconaservermongodbrestore/logical.go @@ -42,7 +42,7 @@ func (r *ReconcilePerconaServerMongoDBRestore) reconcileLogicalRestore(ctx conte return status, errors.Wrapf(err, "set defaults for %s/%s", cluster.Namespace, cluster.Name) } - cjobs, err := backup.HasActiveJobs(ctx, r.client, cluster, backup.NewRestoreJob(cr), backup.NotPITRLock) + cjobs, err := backup.HasActiveJobs(ctx, r.newPBMFunc, r.client, cluster, backup.NewRestoreJob(cr), backup.NotPITRLock) if err != nil { return status, errors.Wrap(err, "check for concurrent jobs") } @@ -112,7 +112,7 @@ func (r *ReconcilePerconaServerMongoDBRestore) reconcileLogicalRestore(ctx conte return status, err } - meta, err := pbmc.C.GetRestoreMeta(cr.Status.PBMname) + meta, err := pbmc.GetRestoreMeta(cr.Status.PBMname) if err != nil && !errors.Is(err, pbm.ErrNotFound) { return status, errors.Wrap(err, "get pbm metadata") } @@ -144,12 +144,12 @@ func (r *ReconcilePerconaServerMongoDBRestore) reconcileLogicalRestore(ctx conte return status, nil } -func reEnablePITR(pbm *backup.PBM, backup psmdbv1.BackupSpec) (err error) { +func reEnablePITR(pbm backup.PBM, backup psmdbv1.BackupSpec) (err error) { if !backup.IsEnabledPITR() { return } - err = pbm.C.SetConfigVar("pitr.enabled", "true") + err = pbm.SetConfigVar("pitr.enabled", "true") if err != nil { return } @@ -157,9 +157,9 @@ func reEnablePITR(pbm *backup.PBM, backup psmdbv1.BackupSpec) (err error) { return } -func runRestore(ctx context.Context, backup string, pbmc *backup.PBM, pitr *psmdbv1.PITRestoreSpec) (string, error) { - e := pbmc.C.Logger().NewEvent(string(pbm.CmdResync), "", "", primitive.Timestamp{}) - err := pbmc.C.ResyncStorage(e) +func runRestore(ctx context.Context, backup string, pbmc backup.PBM, pitr *psmdbv1.PITRestoreSpec) (string, error) { + e := pbmc.Logger().NewEvent(string(pbm.CmdResync), "", "", primitive.Timestamp{}) + err := pbmc.ResyncStorage(e) if err != nil { return "", errors.Wrap(err, "set resync backup list from the store") } @@ -207,7 +207,7 @@ func runRestore(ctx context.Context, backup string, pbmc *backup.PBM, pitr *psmd } } - if err = pbmc.C.SendCmd(cmd); err != nil { + if err = pbmc.SendCmd(cmd); err != nil { return "", errors.Wrap(err, "send restore cmd") } diff --git a/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go b/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go index 5b90a05a66..fd8ef0f0af 100644 --- a/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go +++ b/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go @@ -24,6 +24,7 @@ import ( "github.com/percona/percona-server-mongodb-operator/clientcmd" psmdbv1 "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" + "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/backup" ) // Add creates a new PerconaServerMongoDBRestore Controller and adds it to the Manager. The Manager will set fields on the Controller @@ -45,9 +46,10 @@ func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) { } return &ReconcilePerconaServerMongoDBRestore{ - client: mgr.GetClient(), - scheme: mgr.GetScheme(), - clientcmd: cli, + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + clientcmd: cli, + newPBMFunc: backup.NewPBM, }, nil } @@ -85,6 +87,8 @@ type ReconcilePerconaServerMongoDBRestore struct { client client.Client scheme *runtime.Scheme clientcmd *clientcmd.Client + + newPBMFunc backup.NewPBMFunc } // Reconcile reads that state of the cluster for a PerconaServerMongoDBRestore object and makes changes based on the state read diff --git a/pkg/psmdb/backup/backup.go b/pkg/psmdb/backup/backup.go index 2ed648fd63..1bdc4c2d8f 100644 --- a/pkg/psmdb/backup/backup.go +++ b/pkg/psmdb/backup/backup.go @@ -45,7 +45,7 @@ func NewRestoreJob(cr *api.PerconaServerMongoDBRestore) Job { // HasActiveJobs returns true if there are running backups or restores // in given cluster and namespace -func HasActiveJobs(ctx context.Context, cl client.Client, cluster *api.PerconaServerMongoDB, current Job, allowLock ...LockHeaderPredicate) (bool, error) { +func HasActiveJobs(ctx context.Context, newPBMFunc NewPBMFunc, cl client.Client, cluster *api.PerconaServerMongoDB, current Job, allowLock ...LockHeaderPredicate) (bool, error) { l := log.FromContext(ctx) bcps := &api.PerconaServerMongoDBBackupList{} @@ -94,7 +94,7 @@ func HasActiveJobs(ctx context.Context, cl client.Client, cluster *api.PerconaSe } } - pbm, err := NewPBM(ctx, cl, cluster) + pbm, err := newPBMFunc(ctx, cl, cluster) if err != nil { return false, errors.Wrap(err, "getting PBM object") } diff --git a/pkg/psmdb/backup/fake/pbm.go b/pkg/psmdb/backup/fake/pbm.go new file mode 100644 index 0000000000..ab196ad25b --- /dev/null +++ b/pkg/psmdb/backup/fake/pbm.go @@ -0,0 +1,73 @@ +package fake + +import ( + "context" + + "github.com/percona/percona-backup-mongodb/pbm" + pbmLog "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/storage" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/mongo" + "sigs.k8s.io/controller-runtime/pkg/client" + + api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" + "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/backup" +) + +type fakePBM struct { +} + +func NewPBM(_ context.Context, _ client.Client, _ *api.PerconaServerMongoDB) (backup.PBM, error) { + return new(fakePBM), nil +} +func (p *fakePBM) Conn() *mongo.Client { + return nil +} +func (p *fakePBM) GetPITRChunkContains(ctx context.Context, unixTS int64) (*pbm.OplogChunk, error) { + return nil, nil +} +func (p *fakePBM) GetLatestTimelinePITR() (pbm.Timeline, error) { + return pbm.Timeline{}, nil +} +func (p *fakePBM) PITRGetChunksSlice(rs string, from, to primitive.Timestamp) ([]pbm.OplogChunk, error) { + return nil, nil +} +func (p *fakePBM) Logger() *pbmLog.Logger { + return nil +} +func (p *fakePBM) GetStorage(l *pbmLog.Event) (storage.Storage, error) { + return nil, nil +} +func (p *fakePBM) ResyncStorage(l *pbmLog.Event) error { + return nil +} +func (p *fakePBM) SendCmd(cmd pbm.Cmd) error { + return nil +} +func (p *fakePBM) Close(ctx context.Context) error { + return nil +} +func (p *fakePBM) HasLocks(predicates ...backup.LockHeaderPredicate) (bool, error) { + return false, nil +} +func (p *fakePBM) GetRestoreMeta(name string) (*pbm.RestoreMeta, error) { + return nil, nil +} +func (p *fakePBM) GetBackupMeta(name string) (*pbm.BackupMeta, error) { + return nil, nil +} +func (p *fakePBM) DeleteBackup(name string, l *pbmLog.Event) error { + return nil +} +func (p *fakePBM) SetConfig(ctx context.Context, k8sclient client.Client, cluster *api.PerconaServerMongoDB, stg api.BackupStorageSpec) error { + return nil +} +func (p *fakePBM) SetConfigVar(key, val string) error { + return nil +} +func (p *fakePBM) GetConfigVar(key string) (any, error) { + return nil, nil +} +func (p *fakePBM) DeleteConfigVar(key string) error { + return nil +} diff --git a/pkg/psmdb/backup/job.go b/pkg/psmdb/backup/job.go index 6a33b54174..73f955f159 100644 --- a/pkg/psmdb/backup/job.go +++ b/pkg/psmdb/backup/job.go @@ -6,7 +6,6 @@ import ( "time" batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -15,11 +14,11 @@ import ( "github.com/pkg/errors" ) -func BackupCronJob(cr *api.PerconaServerMongoDB, task *api.BackupTaskSpec) (batchv1beta1.CronJob, error) { +func BackupCronJob(cr *api.PerconaServerMongoDB, task *api.BackupTaskSpec) (batchv1.CronJob, error) { backupSpec := cr.Spec.Backup containerArgs, err := newBackupCronJobContainerArgs(cr, task) if err != nil { - return batchv1beta1.CronJob{}, errors.Wrap(err, "cannot generate container arguments") + return batchv1.CronJob{}, errors.Wrap(err, "cannot generate container arguments") } backupPod := corev1.PodSpec{ @@ -50,9 +49,9 @@ func BackupCronJob(cr *api.PerconaServerMongoDB, task *api.BackupTaskSpec) (batc RuntimeClassName: backupSpec.RuntimeClassName, } - return batchv1beta1.CronJob{ + return batchv1.CronJob{ TypeMeta: metav1.TypeMeta{ - APIVersion: "batch/v1beta1", + APIVersion: "batch/v1", Kind: "CronJob", }, ObjectMeta: metav1.ObjectMeta{ @@ -61,10 +60,10 @@ func BackupCronJob(cr *api.PerconaServerMongoDB, task *api.BackupTaskSpec) (batc Labels: NewBackupCronJobLabels(cr.Name, backupSpec.Labels), Annotations: backupSpec.Annotations, }, - Spec: batchv1beta1.CronJobSpec{ + Spec: batchv1.CronJobSpec{ Schedule: task.Schedule, - ConcurrencyPolicy: batchv1beta1.ForbidConcurrent, - JobTemplate: batchv1beta1.JobTemplateSpec{ + ConcurrencyPolicy: batchv1.ForbidConcurrent, + JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: NewBackupCronJobLabels(cr.Name, backupSpec.Labels), Annotations: backupSpec.Annotations, diff --git a/pkg/psmdb/backup/pbm.go b/pkg/psmdb/backup/pbm.go index 8b43275473..82355e42e0 100644 --- a/pkg/psmdb/backup/pbm.go +++ b/pkg/psmdb/backup/pbm.go @@ -9,6 +9,7 @@ import ( "time" "github.com/percona/percona-backup-mongodb/pbm" + pbmLog "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" @@ -33,12 +34,36 @@ const ( AzureStorageAccountKeySecretKey = "AZURE_STORAGE_ACCOUNT_KEY" ) -type PBM struct { - C *pbm.PBM +type pbmC struct { + *pbm.PBM k8c client.Client namespace string } +type PBM interface { + Conn() *mongo.Client + + GetPITRChunkContains(ctx context.Context, unixTS int64) (*pbm.OplogChunk, error) + GetLatestTimelinePITR() (pbm.Timeline, error) + PITRGetChunksSlice(rs string, from, to primitive.Timestamp) ([]pbm.OplogChunk, error) + + Logger() *pbmLog.Logger + GetStorage(l *pbmLog.Event) (storage.Storage, error) + ResyncStorage(l *pbmLog.Event) error + SendCmd(cmd pbm.Cmd) error + Close(ctx context.Context) error + HasLocks(predicates ...LockHeaderPredicate) (bool, error) + + GetRestoreMeta(name string) (*pbm.RestoreMeta, error) + GetBackupMeta(name string) (*pbm.BackupMeta, error) + DeleteBackup(name string, l *pbmLog.Event) error + + SetConfig(ctx context.Context, k8sclient client.Client, cluster *api.PerconaServerMongoDB, stg api.BackupStorageSpec) error + SetConfigVar(key, val string) error + GetConfigVar(key string) (any, error) + DeleteConfigVar(key string) error +} + func getMongoUri(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, addrs []string) (string, error) { usersSecretName := api.UserSecretName(cr) scr, err := getSecret(ctx, k8sclient, cr.Namespace, usersSecretName) @@ -97,9 +122,11 @@ func getMongoUri(ctx context.Context, k8sclient client.Client, cr *api.PerconaSe return murl, nil } +type NewPBMFunc func(ctx context.Context, c client.Client, cluster *api.PerconaServerMongoDB) (PBM, error) + // NewPBM creates a new connection to PBM. // It should be closed after the last use with. -func NewPBM(ctx context.Context, c client.Client, cluster *api.PerconaServerMongoDB) (*PBM, error) { +func NewPBM(ctx context.Context, c client.Client, cluster *api.PerconaServerMongoDB) (PBM, error) { rs := cluster.Spec.Replsets[0] pods, err := psmdb.GetRSPods(ctx, c, cluster, rs.Name, false) @@ -128,8 +155,8 @@ func NewPBM(ctx context.Context, c client.Client, cluster *api.PerconaServerMong pbmc.InitLogger("", "") - return &PBM{ - C: pbmc, + return &pbmC{ + PBM: pbmc, k8c: c, namespace: cluster.Namespace, }, nil @@ -254,26 +281,29 @@ func GetPBMConfig(ctx context.Context, k8sclient client.Client, cluster *api.Per return conf, nil } +func (b *pbmC) Conn() *mongo.Client { + return b.PBM.Conn +} + // SetConfig sets the pbm config with storage defined in the cluster CR // by given storageName -func (b *PBM) SetConfig(ctx context.Context, k8sclient client.Client, cluster *api.PerconaServerMongoDB, stg api.BackupStorageSpec) error { +func (b *pbmC) SetConfig(ctx context.Context, k8sclient client.Client, cluster *api.PerconaServerMongoDB, stg api.BackupStorageSpec) error { conf, err := GetPBMConfig(ctx, k8sclient, cluster, stg) if err != nil { return errors.Wrap(err, "get PBM config") } - if err := b.C.SetConfig(conf); err != nil { + if err := b.PBM.SetConfig(conf); err != nil { return errors.Wrap(err, "write config") } time.Sleep(11 * time.Second) // give time to init new storage - return nil } // Close close the PBM connection -func (b *PBM) Close(ctx context.Context) error { - return b.C.Conn.Disconnect(ctx) +func (b *pbmC) Close(ctx context.Context) error { + return b.PBM.Conn.Disconnect(ctx) } func getSecret(ctx context.Context, cl client.Client, namespace, secretName string) (*corev1.Secret, error) { @@ -316,8 +346,8 @@ func NotJobLock(j Job) LockHeaderPredicate { } } -func (b *PBM) HasLocks(predicates ...LockHeaderPredicate) (bool, error) { - locks, err := b.C.GetLocks(&pbm.LockHeader{}) +func (b *pbmC) HasLocks(predicates ...LockHeaderPredicate) (bool, error) { + locks, err := b.PBM.GetLocks(&pbm.LockHeader{}) if err != nil { return false, errors.Wrap(err, "getting lock data") } @@ -342,13 +372,13 @@ func (b *PBM) HasLocks(predicates ...LockHeaderPredicate) (bool, error) { var errNoOplogsForPITR = errors.New("there is no oplogs that can cover the date/time or no oplogs at all") -func (b *PBM) GetLastPITRChunk() (*pbm.OplogChunk, error) { - nodeInfo, err := b.C.GetNodeInfo() +func (b *pbmC) GetLastPITRChunk() (*pbm.OplogChunk, error) { + nodeInfo, err := b.PBM.GetNodeInfo() if err != nil { return nil, errors.Wrap(err, "getting node information") } - c, err := b.C.PITRLastChunkMeta(nodeInfo.SetName) + c, err := b.PBM.PITRLastChunkMeta(nodeInfo.SetName) if err != nil { if errors.Is(err, mongo.ErrNoDocuments) { return nil, errNoOplogsForPITR @@ -363,19 +393,19 @@ func (b *PBM) GetLastPITRChunk() (*pbm.OplogChunk, error) { return c, nil } -func (b *PBM) GetTimelinesPITR() ([]pbm.Timeline, error) { +func (b *pbmC) GetTimelinesPITR() ([]pbm.Timeline, error) { var ( now = time.Now().UTC().Unix() timelines [][]pbm.Timeline ) - shards, err := b.C.ClusterMembers() + shards, err := b.PBM.ClusterMembers() if err != nil { return nil, errors.Wrap(err, "getting cluster members") } for _, s := range shards { - rsTimelines, err := b.C.PITRGetValidTimelines(s.RS, primitive.Timestamp{T: uint32(now)}) + rsTimelines, err := b.PBM.PITRGetValidTimelines(s.RS, primitive.Timestamp{T: uint32(now)}) if err != nil { return nil, errors.Wrapf(err, "getting timelines for %s", s.RS) } @@ -386,7 +416,7 @@ func (b *PBM) GetTimelinesPITR() ([]pbm.Timeline, error) { return pbm.MergeTimelines(timelines...), nil } -func (b *PBM) GetLatestTimelinePITR() (pbm.Timeline, error) { +func (b *pbmC) GetLatestTimelinePITR() (pbm.Timeline, error) { timelines, err := b.GetTimelinesPITR() if err != nil { return pbm.Timeline{}, err @@ -401,8 +431,8 @@ func (b *PBM) GetLatestTimelinePITR() (pbm.Timeline, error) { // PITRGetChunkContains returns a pitr slice chunk that belongs to the // given replica set and contains the given timestamp -func (p *PBM) pitrGetChunkContains(ctx context.Context, rs string, ts primitive.Timestamp) (*pbm.OplogChunk, error) { - res := p.C.Conn.Database(pbm.DB).Collection(pbm.PITRChunksCollection).FindOne( +func (p *pbmC) pitrGetChunkContains(ctx context.Context, rs string, ts primitive.Timestamp) (*pbm.OplogChunk, error) { + res := p.PBM.Conn.Database(pbm.DB).Collection(pbm.PITRChunksCollection).FindOne( ctx, bson.D{ {"rs", rs}, @@ -419,8 +449,8 @@ func (p *PBM) pitrGetChunkContains(ctx context.Context, rs string, ts primitive. return chnk, errors.Wrap(err, "decode") } -func (b *PBM) GetPITRChunkContains(ctx context.Context, unixTS int64) (*pbm.OplogChunk, error) { - nodeInfo, err := b.C.GetNodeInfo() +func (b *pbmC) GetPITRChunkContains(ctx context.Context, unixTS int64) (*pbm.OplogChunk, error) { + nodeInfo, err := b.PBM.GetNodeInfo() if err != nil { return nil, errors.Wrap(err, "getting node information") } diff --git a/pkg/psmdb/client.go b/pkg/psmdb/client.go index d1dcc42213..2837560e2e 100644 --- a/pkg/psmdb/client.go +++ b/pkg/psmdb/client.go @@ -4,7 +4,6 @@ import ( "context" "github.com/pkg/errors" - mgo "go.mongodb.org/mongo-driver/mongo" "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" @@ -17,7 +16,7 @@ type Credentials struct { Password string } -func MongoClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, rs api.ReplsetSpec, c Credentials) (*mgo.Client, error) { +func MongoClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, rs api.ReplsetSpec, c Credentials) (mongo.Client, error) { pods, err := GetRSPods(ctx, k8sclient, cr, rs.Name, false) if err != nil { return nil, errors.Wrapf(err, "get pods list for replset %s", rs.Name) @@ -47,7 +46,7 @@ func MongoClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaSe return mongo.Dial(conf) } -func MongosClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, c Credentials) (*mgo.Client, error) { +func MongosClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, c Credentials) (mongo.Client, error) { hosts, err := GetMongosAddrs(ctx, k8sclient, cr) if err != nil { return nil, errors.Wrap(err, "get mongos addrs") @@ -70,7 +69,7 @@ func MongosClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaS return mongo.Dial(&conf) } -func StandaloneClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, c Credentials, host string) (*mgo.Client, error) { +func StandaloneClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, c Credentials, host string) (mongo.Client, error) { conf := mongo.Config{ Hosts: []string{host}, Username: c.Username, diff --git a/pkg/psmdb/getters.go b/pkg/psmdb/getters.go index 8bbe2db422..aa3dcc147d 100644 --- a/pkg/psmdb/getters.go +++ b/pkg/psmdb/getters.go @@ -4,7 +4,6 @@ import ( "context" "sort" - mgo "go.mongodb.org/mongo-driver/mongo" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" @@ -25,13 +24,13 @@ func clusterLabels(cr *api.PerconaServerMongoDB) map[string]string { } } -func rsLabels(cr *api.PerconaServerMongoDB, rsName string) map[string]string { +func RSLabels(cr *api.PerconaServerMongoDB, rsName string) map[string]string { lbls := clusterLabels(cr) lbls["app.kubernetes.io/replset"] = rsName return lbls } -func mongosLabels(cr *api.PerconaServerMongoDB) map[string]string { +func MongosLabels(cr *api.PerconaServerMongoDB) map[string]string { lbls := clusterLabels(cr) lbls["app.kubernetes.io/component"] = "mongos" return lbls @@ -43,7 +42,7 @@ func GetRSPods(ctx context.Context, k8sclient client.Client, cr *api.PerconaServ &pods, &client.ListOptions{ Namespace: cr.Namespace, - LabelSelector: labels.SelectorFromSet(rsLabels(cr, rsName)), + LabelSelector: labels.SelectorFromSet(RSLabels(cr, rsName)), }, ) if err != nil { @@ -103,8 +102,8 @@ func filterPodsByComponent(list corev1.PodList, component string) []corev1.Pod { return pods } -func GetPrimaryPod(ctx context.Context, client *mgo.Client) (string, error) { - status, err := mongo.RSStatus(ctx, client) +func GetPrimaryPod(ctx context.Context, mgoClient mongo.Client) (string, error) { + status, err := mgoClient.RSStatus(ctx) if err != nil { return "", errors.Wrap(err, "failed to get rs status") } @@ -118,7 +117,7 @@ func GetMongosPods(ctx context.Context, cl client.Client, cr *api.PerconaServerM &pods, &client.ListOptions{ Namespace: cr.Namespace, - LabelSelector: labels.SelectorFromSet(mongosLabels(cr)), + LabelSelector: labels.SelectorFromSet(MongosLabels(cr)), }, ) @@ -131,7 +130,7 @@ func GetMongosServices(ctx context.Context, cl client.Client, cr *api.PerconaSer list, &client.ListOptions{ Namespace: cr.Namespace, - LabelSelector: labels.SelectorFromSet(mongosLabels(cr)), + LabelSelector: labels.SelectorFromSet(MongosLabels(cr)), }, ) if err != nil { diff --git a/pkg/psmdb/mongo/fake/client.go b/pkg/psmdb/mongo/fake/client.go new file mode 100644 index 0000000000..8fbf16dbc5 --- /dev/null +++ b/pkg/psmdb/mongo/fake/client.go @@ -0,0 +1,149 @@ +package fake + +import ( + "context" + + "go.mongodb.org/mongo-driver/bson" + mgo "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" + + "github.com/percona/percona-server-mongodb-operator/pkg/psmdb/mongo" +) + +type fakeMongoClient struct { +} + +func NewClient() mongo.Client { + return &fakeMongoClient{} +} + +func (c *fakeMongoClient) Disconnect(ctx context.Context) error { + return nil +} + +type fakeMongoClientDatabase struct { +} + +func (c *fakeMongoClientDatabase) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *mgo.SingleResult { + return singleResult(mongo.OKResponse{ + OK: 1, + }) +} + +func singleResult(doc any) *mgo.SingleResult { + bsonData, err := bson.Marshal(doc) + if err != nil { + return mgo.NewSingleResultFromDocument(bson.D{}, err, nil) + } + var bsonD bson.D + err = bson.Unmarshal(bsonData, &bsonD) + if err != nil { + return mgo.NewSingleResultFromDocument(bson.D{}, err, nil) + } + return mgo.NewSingleResultFromDocument(bsonD, nil, nil) +} + +func (c *fakeMongoClient) Database(name string, opts ...*options.DatabaseOptions) mongo.ClientDatabase { + return new(fakeMongoClientDatabase) +} + +func (c *fakeMongoClient) Ping(ctx context.Context, rp *readpref.ReadPref) error { + return nil +} + +func (c *fakeMongoClient) SetDefaultRWConcern(ctx context.Context, readConcern, writeConcern string) error { + return nil +} + +func (c *fakeMongoClient) ReadConfig(ctx context.Context) (mongo.RSConfig, error) { + return mongo.RSConfig{}, nil +} + +func (c *fakeMongoClient) CreateRole(ctx context.Context, role string, privileges []mongo.RolePrivilege, roles []interface{}) error { + return nil +} + +func (c *fakeMongoClient) UpdateRole(ctx context.Context, role string, privileges []mongo.RolePrivilege, roles []interface{}) error { + return nil +} + +func (c *fakeMongoClient) GetRole(ctx context.Context, role string) (*mongo.Role, error) { + return nil, nil +} + +func (c *fakeMongoClient) CreateUser(ctx context.Context, user, pwd string, roles ...map[string]interface{}) error { + return nil +} + +func (c *fakeMongoClient) AddShard(ctx context.Context, rsName, host string) error { + return nil +} + +func (c *fakeMongoClient) WriteConfig(ctx context.Context, cfg mongo.RSConfig) error { + return nil +} + +func (c *fakeMongoClient) RSStatus(ctx context.Context) (mongo.Status, error) { + return mongo.Status{}, nil +} + +func (c *fakeMongoClient) StartBalancer(ctx context.Context) error { + return nil +} + +func (c *fakeMongoClient) StopBalancer(ctx context.Context) error { + return nil +} + +func (c *fakeMongoClient) IsBalancerRunning(ctx context.Context) (bool, error) { + return false, nil +} + +func (c *fakeMongoClient) GetFCV(ctx context.Context) (string, error) { + return "", nil +} + +func (c *fakeMongoClient) SetFCV(ctx context.Context, version string) error { + return nil +} + +func (c *fakeMongoClient) ListDBs(ctx context.Context) (mongo.DBList, error) { + return mongo.DBList{}, nil +} + +func (c *fakeMongoClient) ListShard(ctx context.Context) (mongo.ShardList, error) { + return mongo.ShardList{}, nil +} + +func (c *fakeMongoClient) RemoveShard(ctx context.Context, shard string) (mongo.ShardRemoveResp, error) { + return mongo.ShardRemoveResp{}, nil +} + +func (c *fakeMongoClient) RSBuildInfo(ctx context.Context) (mongo.BuildInfo, error) { + return mongo.BuildInfo{}, nil +} + +func (c *fakeMongoClient) StepDown(ctx context.Context, force bool) error { + return nil +} + +func (c *fakeMongoClient) IsMaster(ctx context.Context) (*mongo.IsMasterResp, error) { + return nil, nil +} + +func (c *fakeMongoClient) GetUserInfo(ctx context.Context, username string) (*mongo.User, error) { + return nil, nil +} + +func (c *fakeMongoClient) UpdateUserRoles(ctx context.Context, username string, roles []map[string]interface{}) error { + return nil +} + +func (c *fakeMongoClient) UpdateUserPass(ctx context.Context, name, pass string) error { + return nil +} + +func (c *fakeMongoClient) UpdateUser(ctx context.Context, currName, newName, pass string) error { + return nil +} diff --git a/pkg/psmdb/mongo/mongo.go b/pkg/psmdb/mongo/mongo.go index 99e75cd608..2d8c61ff25 100644 --- a/pkg/psmdb/mongo/mongo.go +++ b/pkg/psmdb/mongo/mongo.go @@ -27,7 +27,54 @@ type Config struct { Direct bool } -func Dial(conf *Config) (*mongo.Client, error) { +type Client interface { + Disconnect(ctx context.Context) error + Database(name string, opts ...*options.DatabaseOptions) ClientDatabase + Ping(ctx context.Context, rp *readpref.ReadPref) error + + SetDefaultRWConcern(ctx context.Context, readConcern, writeConcern string) error + ReadConfig(ctx context.Context) (RSConfig, error) + CreateRole(ctx context.Context, role string, privileges []RolePrivilege, roles []interface{}) error + UpdateRole(ctx context.Context, role string, privileges []RolePrivilege, roles []interface{}) error + GetRole(ctx context.Context, role string) (*Role, error) + CreateUser(ctx context.Context, user, pwd string, roles ...map[string]interface{}) error + AddShard(ctx context.Context, rsName, host string) error + WriteConfig(ctx context.Context, cfg RSConfig) error + RSStatus(ctx context.Context) (Status, error) + StartBalancer(ctx context.Context) error + StopBalancer(ctx context.Context) error + IsBalancerRunning(ctx context.Context) (bool, error) + GetFCV(ctx context.Context) (string, error) + SetFCV(ctx context.Context, version string) error + ListDBs(ctx context.Context) (DBList, error) + ListShard(ctx context.Context) (ShardList, error) + RemoveShard(ctx context.Context, shard string) (ShardRemoveResp, error) + RSBuildInfo(ctx context.Context) (BuildInfo, error) + StepDown(ctx context.Context, force bool) error + IsMaster(ctx context.Context) (*IsMasterResp, error) + GetUserInfo(ctx context.Context, username string) (*User, error) + UpdateUserRoles(ctx context.Context, username string, roles []map[string]interface{}) error + UpdateUserPass(ctx context.Context, name, pass string) error + UpdateUser(ctx context.Context, currName, newName, pass string) error +} + +type ClientDatabase interface { + RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *mongo.SingleResult +} + +type mongoClient struct { + *mongo.Client +} + +func (c *mongoClient) Database(name string, opts ...*options.DatabaseOptions) ClientDatabase { + return c.Client.Database(name, opts...) +} + +func ToInterface(client *mongo.Client) Client { + return &mongoClient{client} +} + +func Dial(conf *Config) (Client, error) { ctx, connectcancel := context.WithTimeout(context.Background(), 10*time.Second) defer connectcancel() @@ -64,10 +111,10 @@ func Dial(conf *Config) (*mongo.Client, error) { return nil, errors.Wrap(err, "ping mongo") } - return client, nil + return ToInterface(client), nil } -func SetDefaultRWConcern(ctx context.Context, client *mongo.Client, readConcern, writeConcern string) error { +func (client *mongoClient) SetDefaultRWConcern(ctx context.Context, readConcern, writeConcern string) error { cmd := bson.D{ {Key: "setDefaultRWConcern", Value: 1}, {Key: "defaultReadConcern", Value: bson.D{{Key: "level", Value: readConcern}}}, @@ -82,7 +129,7 @@ func SetDefaultRWConcern(ctx context.Context, client *mongo.Client, readConcern, return nil } -func ReadConfig(ctx context.Context, client *mongo.Client) (RSConfig, error) { +func (client *mongoClient) ReadConfig(ctx context.Context) (RSConfig, error) { resp := ReplSetGetConfig{} res := client.Database("admin").RunCommand(ctx, bson.D{{Key: "replSetGetConfig", Value: 1}}) if res.Err() != nil { @@ -99,7 +146,7 @@ func ReadConfig(ctx context.Context, client *mongo.Client) (RSConfig, error) { return *resp.Config, nil } -func CreateRole(ctx context.Context, client *mongo.Client, role string, privileges []RolePrivilege, roles []interface{}) error { +func (client *mongoClient) CreateRole(ctx context.Context, role string, privileges []RolePrivilege, roles []interface{}) error { resp := OKResponse{} privilegesArr := bson.A{} @@ -135,7 +182,7 @@ func CreateRole(ctx context.Context, client *mongo.Client, role string, privileg return nil } -func UpdateRole(ctx context.Context, client *mongo.Client, role string, privileges []RolePrivilege, roles []interface{}) error { +func (client *mongoClient) UpdateRole(ctx context.Context, role string, privileges []RolePrivilege, roles []interface{}) error { resp := OKResponse{} privilegesArr := bson.A{} @@ -171,7 +218,7 @@ func UpdateRole(ctx context.Context, client *mongo.Client, role string, privileg return nil } -func GetRole(ctx context.Context, client *mongo.Client, role string) (*Role, error) { +func (client *mongoClient) GetRole(ctx context.Context, role string) (*Role, error) { resp := RoleInfo{} res := client.Database("admin").RunCommand(ctx, bson.D{ @@ -195,7 +242,7 @@ func GetRole(ctx context.Context, client *mongo.Client, role string) (*Role, err return &resp.Roles[0], nil } -func CreateUser(ctx context.Context, client *mongo.Client, user, pwd string, roles ...map[string]interface{}) error { +func (client *mongoClient) CreateUser(ctx context.Context, user, pwd string, roles ...map[string]interface{}) error { resp := OKResponse{} res := client.Database("admin").RunCommand(ctx, bson.D{ @@ -219,7 +266,7 @@ func CreateUser(ctx context.Context, client *mongo.Client, user, pwd string, rol return nil } -func AddShard(ctx context.Context, client *mongo.Client, rsName, host string) error { +func (client *mongoClient) AddShard(ctx context.Context, rsName, host string) error { resp := OKResponse{} res := client.Database("admin").RunCommand(ctx, bson.D{{Key: "addShard", Value: rsName + "/" + host}}) @@ -238,7 +285,8 @@ func AddShard(ctx context.Context, client *mongo.Client, rsName, host string) er return nil } -func WriteConfig(ctx context.Context, client *mongo.Client, cfg RSConfig) error { +func (client *mongoClient) WriteConfig(ctx context.Context, cfg RSConfig) error { + log := logf.FromContext(ctx) resp := OKResponse{} log.V(1).Info("Running replSetReconfig config", "cfg", cfg) @@ -259,7 +307,7 @@ func WriteConfig(ctx context.Context, client *mongo.Client, cfg RSConfig) error return nil } -func RSStatus(ctx context.Context, client *mongo.Client) (Status, error) { +func (client *mongoClient) RSStatus(ctx context.Context) (Status, error) { status := Status{} resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: "replSetGetStatus", Value: 1}}) @@ -278,15 +326,15 @@ func RSStatus(ctx context.Context, client *mongo.Client) (Status, error) { return status, nil } -func StartBalancer(ctx context.Context, client *mongo.Client) error { +func (client *mongoClient) StartBalancer(ctx context.Context) error { return switchBalancer(ctx, client, "balancerStart") } -func StopBalancer(ctx context.Context, client *mongo.Client) error { +func (client *mongoClient) StopBalancer(ctx context.Context) error { return switchBalancer(ctx, client, "balancerStop") } -func switchBalancer(ctx context.Context, client *mongo.Client, command string) error { +func switchBalancer(ctx context.Context, client *mongoClient, command string) error { res := OKResponse{} resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: command, Value: 1}}) @@ -305,7 +353,7 @@ func switchBalancer(ctx context.Context, client *mongo.Client, command string) e return nil } -func IsBalancerRunning(ctx context.Context, client *mongo.Client) (bool, error) { +func (client *mongoClient) IsBalancerRunning(ctx context.Context) (bool, error) { res := BalancerStatus{} resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: "balancerStatus", Value: 1}}) @@ -324,7 +372,7 @@ func IsBalancerRunning(ctx context.Context, client *mongo.Client) (bool, error) return res.Mode == "full", nil } -func GetFCV(ctx context.Context, client *mongo.Client) (string, error) { +func (client *mongoClient) GetFCV(ctx context.Context) (string, error) { res := FCV{} resp := client.Database("admin").RunCommand(ctx, bson.D{ @@ -343,7 +391,7 @@ func GetFCV(ctx context.Context, client *mongo.Client) (string, error) { return res.FCV.Version, nil } -func SetFCV(ctx context.Context, client *mongo.Client, version string) error { +func (client *mongoClient) SetFCV(ctx context.Context, version string) error { res := OKResponse{} command := "setFeatureCompatibilityVersion" @@ -364,7 +412,7 @@ func SetFCV(ctx context.Context, client *mongo.Client, version string) error { return nil } -func ListDBs(ctx context.Context, client *mongo.Client) (DBList, error) { +func (client *mongoClient) ListDBs(ctx context.Context) (DBList, error) { dbList := DBList{} resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: "listDatabases", Value: 1}}) @@ -383,7 +431,7 @@ func ListDBs(ctx context.Context, client *mongo.Client) (DBList, error) { return dbList, nil } -func ListShard(ctx context.Context, client *mongo.Client) (ShardList, error) { +func (client *mongoClient) ListShard(ctx context.Context) (ShardList, error) { shardList := ShardList{} resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: "listShards", Value: 1}}) @@ -402,7 +450,7 @@ func ListShard(ctx context.Context, client *mongo.Client) (ShardList, error) { return shardList, nil } -func RemoveShard(ctx context.Context, client *mongo.Client, shard string) (ShardRemoveResp, error) { +func (client *mongoClient) RemoveShard(ctx context.Context, shard string) (ShardRemoveResp, error) { removeResp := ShardRemoveResp{} resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: "removeShard", Value: shard}}) @@ -421,7 +469,7 @@ func RemoveShard(ctx context.Context, client *mongo.Client, shard string) (Shard return removeResp, nil } -func RSBuildInfo(ctx context.Context, client *mongo.Client) (BuildInfo, error) { +func (client *mongoClient) RSBuildInfo(ctx context.Context) (BuildInfo, error) { bi := BuildInfo{} resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: "buildinfo", Value: 1}}) @@ -440,7 +488,7 @@ func RSBuildInfo(ctx context.Context, client *mongo.Client) (BuildInfo, error) { return bi, nil } -func StepDown(ctx context.Context, client *mongo.Client, force bool) error { +func (client *mongoClient) StepDown(ctx context.Context, force bool) error { resp := OKResponse{} res := client.Database("admin").RunCommand(ctx, bson.D{{Key: "replSetStepDown", Value: 60}, {Key: "force", Value: force}}) @@ -465,7 +513,7 @@ func StepDown(ctx context.Context, client *mongo.Client, force bool) error { return nil } -func IsMaster(ctx context.Context, client *mongo.Client) (*IsMasterResp, error) { +func (client *mongoClient) IsMaster(ctx context.Context) (*IsMasterResp, error) { cur := client.Database("admin").RunCommand(ctx, bson.D{{Key: "isMaster", Value: 1}}) if cur.Err() != nil { return nil, errors.Wrap(cur.Err(), "run isMaster") @@ -483,7 +531,7 @@ func IsMaster(ctx context.Context, client *mongo.Client) (*IsMasterResp, error) return &resp, nil } -func GetUserInfo(ctx context.Context, client *mongo.Client, username string) (*User, error) { +func (client *mongoClient) GetUserInfo(ctx context.Context, username string) (*User, error) { resp := UsersInfo{} res := client.Database("admin").RunCommand(ctx, bson.D{{Key: "usersInfo", Value: username}}) if res.Err() != nil { @@ -503,18 +551,18 @@ func GetUserInfo(ctx context.Context, client *mongo.Client, username string) (*U return &resp.Users[0], nil } -func UpdateUserRoles(ctx context.Context, client *mongo.Client, username string, roles []map[string]interface{}) error { +func (client *mongoClient) UpdateUserRoles(ctx context.Context, username string, roles []map[string]interface{}) error { return client.Database("admin").RunCommand(ctx, bson.D{{Key: "updateUser", Value: username}, {Key: "roles", Value: roles}}).Err() } // UpdateUserPass updates user's password -func UpdateUserPass(ctx context.Context, client *mongo.Client, name, pass string) error { +func (client *mongoClient) UpdateUserPass(ctx context.Context, name, pass string) error { return client.Database("admin").RunCommand(ctx, bson.D{{Key: "updateUser", Value: name}, {Key: "pwd", Value: pass}}).Err() } // UpdateUser recreates user with new name and password // should be used only when username was changed -func UpdateUser(ctx context.Context, client *mongo.Client, currName, newName, pass string) error { +func (client *mongoClient) UpdateUser(ctx context.Context, currName, newName, pass string) error { mu := struct { Users []struct { Roles interface{} `bson:"roles"` diff --git a/pkg/psmdb/mongos.go b/pkg/psmdb/mongos.go index 4054a84920..b714da18ab 100644 --- a/pkg/psmdb/mongos.go +++ b/pkg/psmdb/mongos.go @@ -24,7 +24,7 @@ func MongosStatefulset(cr *api.PerconaServerMongoDB) *appsv1.StatefulSet { ObjectMeta: metav1.ObjectMeta{ Name: cr.MongosNamespacedName().Name, Namespace: cr.MongosNamespacedName().Namespace, - Labels: mongosLabels(cr), + Labels: MongosLabels(cr), }, } } @@ -59,7 +59,7 @@ func MongosStatefulsetSpec(cr *api.PerconaServerMongoDB, template corev1.PodTemp return appsv1.StatefulSetSpec{ Replicas: &cr.Spec.Sharding.Mongos.Size, Selector: &metav1.LabelSelector{ - MatchLabels: mongosLabels(cr), + MatchLabels: MongosLabels(cr), }, Template: template, UpdateStrategy: updateStrategy, @@ -71,7 +71,7 @@ func MongosDeploymentSpec(cr *api.PerconaServerMongoDB, template corev1.PodTempl return appsv1.DeploymentSpec{ Replicas: &cr.Spec.Sharding.Mongos.Size, Selector: &metav1.LabelSelector{ - MatchLabels: mongosLabels(cr), + MatchLabels: MongosLabels(cr), }, Template: template, Strategy: appsv1.DeploymentStrategy{ @@ -84,7 +84,7 @@ func MongosDeploymentSpec(cr *api.PerconaServerMongoDB, template corev1.PodTempl } func MongosTemplateSpec(cr *api.PerconaServerMongoDB, initImage string, log logr.Logger, customConf CustomConfig, cfgInstances []string) (corev1.PodTemplateSpec, error) { - ls := mongosLabels(cr) + ls := MongosLabels(cr) if cr.Spec.Sharding.Mongos.Labels != nil { for k, v := range cr.Spec.Sharding.Mongos.Labels { @@ -390,7 +390,7 @@ func MongosService(cr *api.PerconaServerMongoDB, name string) corev1.Service { }, } if cr.CompareVersion("1.12.0") >= 0 { - svc.Labels = mongosLabels(cr) + svc.Labels = MongosLabels(cr) } if cr.Spec.Sharding.Mongos != nil { @@ -406,7 +406,7 @@ func MongosService(cr *api.PerconaServerMongoDB, name string) corev1.Service { } func MongosServiceSpec(cr *api.PerconaServerMongoDB, podName string) corev1.ServiceSpec { - ls := mongosLabels(cr) + ls := MongosLabels(cr) if cr.Spec.Sharding.Mongos.Expose.ServicePerPod { ls["statefulset.kubernetes.io/pod-name"] = podName diff --git a/pkg/psmdb/service.go b/pkg/psmdb/service.go index 0764abc39a..0d136b2d42 100644 --- a/pkg/psmdb/service.go +++ b/pkg/psmdb/service.go @@ -145,7 +145,7 @@ func GetServiceAddr(ctx context.Context, svc corev1.Service, pod corev1.Pod, cl } case corev1.ServiceTypeLoadBalancer: - host, err := getIngressPoint(ctx, pod, cl) + host, err := getIngressPoint(ctx, cl, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}) if err != nil { return nil, errors.Wrap(err, "get ingress endpoint") } @@ -171,7 +171,7 @@ func GetServiceAddr(ctx context.Context, svc corev1.Service, pod corev1.Pod, cl var ErrNoIngressPoints = errors.New("ingress points not found") -func getIngressPoint(ctx context.Context, pod corev1.Pod, cl client.Client) (string, error) { +func getIngressPoint(ctx context.Context, cl client.Client, serviceNN types.NamespacedName) (string, error) { var retries uint64 = 0 var ip string @@ -188,7 +188,7 @@ func getIngressPoint(ctx context.Context, pod corev1.Pod, cl client.Client) (str } svc := &corev1.Service{} - err := cl.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, svc) + err := cl.Get(ctx, serviceNN, svc) if err != nil { return "", errors.Wrap(err, "failed to fetch service") } @@ -302,12 +302,14 @@ func MongosHost(ctx context.Context, cl client.Client, cr *api.PerconaServerMong if cr.Spec.Sharding.Mongos.Expose.ServicePerPod { svcName = pod.Name } + + nn := types.NamespacedName{ + Namespace: cr.Namespace, + Name: svcName, + } + svc := new(corev1.Service) - err := cl.Get(ctx, - types.NamespacedName{ - Namespace: cr.Namespace, - Name: svcName, - }, svc) + err := cl.Get(ctx, nn, svc) if err != nil { if k8serrors.IsNotFound(err) { return "", nil @@ -316,18 +318,16 @@ func MongosHost(ctx context.Context, cl client.Client, cr *api.PerconaServerMong return "", errors.Wrap(err, "failed to get mongos service") } - var host string - if mongos := cr.Spec.Sharding.Mongos; mongos.Expose.ExposeType == corev1.ServiceTypeLoadBalancer { - for _, i := range svc.Status.LoadBalancer.Ingress { - host = i.IP - if len(i.Hostname) > 0 { - host = i.Hostname - } + mongos := cr.Spec.Sharding.Mongos + if mongos.Expose.ExposeType == corev1.ServiceTypeLoadBalancer && svc.Spec.Type == corev1.ServiceTypeLoadBalancer { + host, err := getIngressPoint(ctx, cl, nn) + if err != nil { + return "", errors.Wrap(err, "get ingress endpoint") } - } else { - host = svc.Name + "." + cr.Namespace + "." + cr.Spec.ClusterServiceDNSSuffix + return host, nil } - return host, nil + + return svc.Name + "." + cr.Namespace + "." + cr.Spec.ClusterServiceDNSSuffix, nil } func getExtAddr(ctx context.Context, cl client.Client, namespace string, pod corev1.Pod) (string, error) {