diff --git a/e2e-tests/custom-users-roles-sharded/compare/role-one-50.json b/e2e-tests/custom-users-roles-sharded/compare/role-one-50.json new file mode 100644 index 0000000000..3d66df219e --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/role-one-50.json @@ -0,0 +1,122 @@ +switched to db admin +{ + "_id" : "admin.role-one", + "role" : "role-one", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "cluster" : true + }, + "actions" : [ + "addShard" + ] + }, + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find", + "insert", + "remove", + "update" + ] + } + ], + "roles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "authenticationRestrictions" : [ + [ + { + "clientSource" : [ + "127.0.0.1" + ], + "serverAddress" : [ + "127.0.0.1" + ] + } + ] + ], + "inheritedRoles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "inheritedPrivileges" : [ + { + "resource" : { + "cluster" : true + }, + "actions" : [ + "addShard" + ] + }, + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find", + "insert", + "remove", + "update" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "planCacheRead" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "system.js" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "planCacheRead" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ + [ + { + "clientSource" : [ + "127.0.0.1" + ], + "serverAddress" : [ + "127.0.0.1" + ] + } + ] + ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/role-two-50.json b/e2e-tests/custom-users-roles-sharded/compare/role-two-50.json new file mode 100644 index 0000000000..d8bf5f535d --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/role-two-50.json @@ -0,0 +1,78 @@ +switched to db admin +{ + "_id" : "admin.role-two", + "role" : "role-two", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "planCacheRead" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "system.js" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "planCacheRead" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/run b/e2e-tests/custom-users-roles-sharded/run index 78f8114bd0..22025b0854 100755 --- a/e2e-tests/custom-users-roles-sharded/run +++ b/e2e-tests/custom-users-roles-sharded/run @@ -8,6 +8,10 @@ compare() { local uri="$3" local target="$4" + if [[ $IMAGE_MONGOD =~ 5\.0 ]] && [ -f ${test_dir}/compare/$target-50.json ]; then + target=$target-50 + fi + run_mongos "use ${database}\n ${command}" "$uri" "mongodb" \ | egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' \ | $sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' \ diff --git a/e2e-tests/custom-users-roles/compare/role-one-50.json b/e2e-tests/custom-users-roles/compare/role-one-50.json new file mode 100644 index 0000000000..3d66df219e --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/role-one-50.json @@ -0,0 +1,122 @@ +switched to db admin +{ + "_id" : "admin.role-one", + "role" : "role-one", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "cluster" : true + }, + "actions" : [ + "addShard" + ] + }, + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find", + "insert", + "remove", + "update" + ] + } + ], + "roles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "authenticationRestrictions" : [ + [ + { + "clientSource" : [ + "127.0.0.1" + ], + "serverAddress" : [ + "127.0.0.1" + ] + } + ] + ], + "inheritedRoles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "inheritedPrivileges" : [ + { + "resource" : { + "cluster" : true + }, + "actions" : [ + "addShard" + ] + }, + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find", + "insert", + "remove", + "update" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "planCacheRead" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "system.js" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "planCacheRead" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ + [ + { + "clientSource" : [ + "127.0.0.1" + ], + "serverAddress" : [ + "127.0.0.1" + ] + } + ] + ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles/compare/role-two-50.json b/e2e-tests/custom-users-roles/compare/role-two-50.json new file mode 100644 index 0000000000..d8bf5f535d --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/role-two-50.json @@ -0,0 +1,78 @@ +switched to db admin +{ + "_id" : "admin.role-two", + "role" : "role-two", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "planCacheRead" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "system.js" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "planCacheRead" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles/run b/e2e-tests/custom-users-roles/run index 92dc628aaf..965db48b65 100755 --- a/e2e-tests/custom-users-roles/run +++ b/e2e-tests/custom-users-roles/run @@ -8,6 +8,10 @@ compare() { local uri="$3" local target="$4" + if [[ $IMAGE_MONGOD =~ 5\.0 ]] && [ -f ${test_dir}/compare/$target-50.json ]; then + target=$target-50 + fi + run_mongo "use ${database}\n ${command}" "$uri" "mongodb" \ | egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' \ | $sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' \ diff --git a/e2e-tests/data-at-rest-encryption/run b/e2e-tests/data-at-rest-encryption/run index ff2c08b65d..44982a0247 100755 --- a/e2e-tests/data-at-rest-encryption/run +++ b/e2e-tests/data-at-rest-encryption/run @@ -57,7 +57,7 @@ sleep 5 desc "check backup and restore -- minio" backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ +retry 3 8 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ | grep myApp.test.gz diff --git a/e2e-tests/demand-backup-physical-sharded/run b/e2e-tests/demand-backup-physical-sharded/run index 1ae02ff797..4cfb61bc15 100755 --- a/e2e-tests/demand-backup-physical-sharded/run +++ b/e2e-tests/demand-backup-physical-sharded/run @@ -29,13 +29,13 @@ run_recovery_check() { local backup_name=$1 local compare_suffix=${2:-"_restore"} - wait_restore "${backup_name}" "${cluster}" "requested" "0" "1200" + wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000" echo compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix} # we don't wait for cluster readiness here because the annotation gets removed then - wait_restore "${backup_name}" "${cluster}" "ready" "0" "1800" + wait_restore "${backup_name}" "${cluster}" "ready" "0" "3000" kubectl_bin get psmdb ${cluster} -o yaml if [ $(kubectl_bin get psmdb ${cluster} -o yaml | yq '.metadata.annotations."percona.com/resync-pbm"') == null ]; then echo "psmdb/${cluster} should be annotated with percona.com/resync-pbm after a physical restore" diff --git a/e2e-tests/demand-backup-physical/run b/e2e-tests/demand-backup-physical/run index 2c1a1cbedc..4ce2ea040d 100755 --- a/e2e-tests/demand-backup-physical/run +++ b/e2e-tests/demand-backup-physical/run @@ -29,7 +29,7 @@ run_recovery_check() { local backup_name=$1 local compare_suffix=${2:-"_restore"} - wait_restore "${backup_name}" "${cluster}" "requested" "0" "1200" + wait_restore "${backup_name}" "${cluster}" "requested" "0" "3000" echo compare_kubectl "statefulset/${cluster}-rs0" ${compare_suffix} diff --git a/e2e-tests/ignore-labels-annotations/run b/e2e-tests/ignore-labels-annotations/run index 9c6666107a..ffda3d1151 100755 --- a/e2e-tests/ignore-labels-annotations/run +++ b/e2e-tests/ignore-labels-annotations/run @@ -61,7 +61,7 @@ check_service() { } }]' - sleep 5 # waiting for reconcile + sleep 7 # waiting for reconcile desc "check if annotations/labels are not deleted from service $svc_name" compare_kubectl "service/$svc_name" "-manual" @@ -78,7 +78,7 @@ check_service() { "value": ["ignoredLabel"] } ]' - sleep 5 + sleep 7 desc "check if annotations/labels are not deleted from service $svc_name" compare_kubectl "service/$svc_name" "-manual" diff --git a/e2e-tests/pvc-resize/run b/e2e-tests/pvc-resize/run index 47e3f088aa..7689e35f48 100755 --- a/e2e-tests/pvc-resize/run +++ b/e2e-tests/pvc-resize/run @@ -157,6 +157,11 @@ if [[ $EKS == 1 || -n ${OPENSHIFT} ]]; then else spinup_psmdb "${cluster}-rs0" "$test_dir/conf/$cluster.yml" fi + echo "Enabling PVC resize after recreating PSMDB cluster ${cluster} " + kubectl_bin patch psmdb "${cluster}" --type=json -p='[{"op": "add", "path": "/spec/enableVolumeExpansion", "value":true }]' + sleep 10 + + wait_cluster_consistency "$cluster" fi desc 'create resourcequota' diff --git a/e2e-tests/serviceless-external-nodes/compare/statefulset_mydb-rs0-oc.yml b/e2e-tests/serviceless-external-nodes/compare/statefulset_mydb-rs0-oc.yml new file mode 100644 index 0000000000..8ec308dcd1 --- /dev/null +++ b/e2e-tests/serviceless-external-nodes/compare/statefulset_mydb-rs0-oc.yml @@ -0,0 +1,217 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: mydb + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: mydb-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: mydb +spec: + podManagementPolicy: OrderedReady + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: mydb + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: mydb-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: mydb + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=keyFile + - --keyFile=/etc/mongodb-secrets/mongodb-key + - --tlsMode=allowTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: mydb + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-mydb-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: mydb-custom-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: mydb-custom-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 300m + memory: 500M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: mydb-custom-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: mydb-custom-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: mydb-rs0-mongod + optional: true + name: config + - name: mydb-custom-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: mydb-custom-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: mydb-custom-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: mydb-custom-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-mydb-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/serviceless-external-nodes/run b/e2e-tests/serviceless-external-nodes/run index 2d87c765bf..b7a1272737 100755 --- a/e2e-tests/serviceless-external-nodes/run +++ b/e2e-tests/serviceless-external-nodes/run @@ -22,7 +22,7 @@ apply_cluster "$test_dir/conf/main.yml" wait_for_running "$cluster-rs0" 1 compare_kubectl statefulset/mydb-rs0 -secrets_count=$(kubectl_bin get secret -o yaml | yq '.items | length') +secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length') if [[ $secrets_count != 6 ]]; then echo "It's expected to have 6 secrets. Currently have $secrets_count" exit 1 @@ -41,7 +41,7 @@ apply_cluster "$test_dir/conf/external.yml" wait_pod ${cluster}-rs0-0 wait_pod ${cluster}-rs0-1 -secrets_count=$(kubectl_bin get secret -o yaml | yq '.items | length') +secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length') if [[ $secrets_count != 6 ]]; then echo "It's expected to have 6 secrets. Currently have $secrets_count" exit 1 diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1162.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1190.yml similarity index 100% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1162.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-cfg-1190.yml diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1162.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1190.yml similarity index 100% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1162.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/service_some-name-rs0-1190.yml diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1170.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1170.yml index 6c10a1135c..a4aceed30f 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1170.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1170.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 5 + generation: 3 labels: app.kubernetes.io/component: cfg app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1180.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1180.yml index bfd388030a..6c10a1135c 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1180.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1180.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 7 + generation: 5 labels: app.kubernetes.io/component: cfg app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162-oc.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190-oc.yml similarity index 99% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162-oc.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190-oc.yml index e1b011548f..1640b7b614 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162-oc.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190-oc.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 3 + generation: 7 labels: app.kubernetes.io/component: cfg app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190.yml similarity index 99% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190.yml index a4aceed30f..bfd388030a 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1162.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-cfg-1190.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 3 + generation: 7 labels: app.kubernetes.io/component: cfg app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1170.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1170.yml index a0ce6a988b..abc93591b5 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1170.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1170.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 5 + generation: 3 labels: app.kubernetes.io/component: mongod app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1180.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1180.yml index 148a09afe6..a0ce6a988b 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1180.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1180.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 7 + generation: 5 labels: app.kubernetes.io/component: mongod app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162-oc.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190-oc.yml similarity index 99% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162-oc.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190-oc.yml index 87ad33b990..794034ad9b 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162-oc.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190-oc.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 3 + generation: 7 labels: app.kubernetes.io/component: mongod app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162.yml b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190.yml similarity index 99% rename from e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162.yml rename to e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190.yml index abc93591b5..148a09afe6 100644 --- a/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1162.yml +++ b/e2e-tests/upgrade-consistency-sharded-tls/compare/statefulset_some-name-rs0-1190.yml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: {} - generation: 3 + generation: 7 labels: app.kubernetes.io/component: mongod app.kubernetes.io/instance: some-name diff --git a/e2e-tests/upgrade-consistency-sharded-tls/run b/e2e-tests/upgrade-consistency-sharded-tls/run index 3eed33edd1..4f7a0a88cf 100755 --- a/e2e-tests/upgrade-consistency-sharded-tls/run +++ b/e2e-tests/upgrade-consistency-sharded-tls/run @@ -24,7 +24,7 @@ main() { kubectl_bin apply -f "$conf_dir/client_with_tls.yml" deploy_cmctl - desc "create first PSMDB cluster 1.16.2 $CLUSTER" + desc "create first PSMDB cluster 1.17.0 $CLUSTER" apply_cluster "$test_dir/conf/${CLUSTER}.yml" desc 'check if Pod started' @@ -51,14 +51,14 @@ main() { compare_generation "3" "statefulset" "${CLUSTER}-cfg" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1162" - compare_kubectl service/${CLUSTER}-cfg "-1162" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1162" - compare_kubectl statefulset/${CLUSTER}-cfg "-1162" + compare_kubectl service/${CLUSTER}-rs0 "-1170" + compare_kubectl service/${CLUSTER}-cfg "-1170" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1170" + compare_kubectl statefulset/${CLUSTER}-cfg "-1170" - desc 'test 1.17.0' + desc 'test 1.18.0' kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{ - "spec": {"crVersion":"1.17.0"} + "spec": {"crVersion":"1.18.0"} }' # Wait for at least one reconciliation sleep 20 @@ -80,14 +80,14 @@ main() { compare_generation "5" "statefulset" "${CLUSTER}-cfg" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1170" - compare_kubectl service/${CLUSTER}-cfg "-1170" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1170" - compare_kubectl statefulset/${CLUSTER}-cfg "-1170" + compare_kubectl service/${CLUSTER}-rs0 "-1180" + compare_kubectl service/${CLUSTER}-cfg "-1180" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1180" + compare_kubectl statefulset/${CLUSTER}-cfg "-1180" - desc 'test 1.18.0' + desc 'test 1.19.0' kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{ - "spec": {"crVersion":"1.18.0"} + "spec": {"crVersion":"1.19.0"} }' # Wait for at least one reconciliation sleep 20 @@ -109,10 +109,10 @@ main() { compare_generation "7" "statefulset" "${CLUSTER}-cfg" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1180" - compare_kubectl service/${CLUSTER}-cfg "-1180" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1180" - compare_kubectl statefulset/${CLUSTER}-cfg "-1180" + compare_kubectl service/${CLUSTER}-rs0 "-1190" + compare_kubectl service/${CLUSTER}-cfg "-1190" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1190" + compare_kubectl statefulset/${CLUSTER}-cfg "-1190" destroy "$namespace" diff --git a/e2e-tests/upgrade-consistency/compare/service_some-name-rs0-1162.yml b/e2e-tests/upgrade-consistency/compare/service_some-name-rs0-1190.yml similarity index 100% rename from e2e-tests/upgrade-consistency/compare/service_some-name-rs0-1162.yml rename to e2e-tests/upgrade-consistency/compare/service_some-name-rs0-1190.yml diff --git a/e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1162-oc.yml b/e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1190-oc.yml similarity index 100% rename from e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1162-oc.yml rename to e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1190-oc.yml diff --git a/e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1162.yml b/e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1190.yml similarity index 100% rename from e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1162.yml rename to e2e-tests/upgrade-consistency/compare/statefulset_some-name-rs0-1190.yml diff --git a/e2e-tests/upgrade-consistency/conf/some-name-rs0.yml b/e2e-tests/upgrade-consistency/conf/some-name-rs0.yml index 67dec005fa..00f375a790 100644 --- a/e2e-tests/upgrade-consistency/conf/some-name-rs0.yml +++ b/e2e-tests/upgrade-consistency/conf/some-name-rs0.yml @@ -3,7 +3,7 @@ kind: PerconaServerMongoDB metadata: name: some-name spec: - crVersion: 1.16.2 + crVersion: 1.17.0 #platform: openshift image: imagePullPolicy: Always diff --git a/e2e-tests/upgrade-consistency/run b/e2e-tests/upgrade-consistency/run index fcd87a6fb6..b2021a2184 100755 --- a/e2e-tests/upgrade-consistency/run +++ b/e2e-tests/upgrade-consistency/run @@ -12,22 +12,21 @@ main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" + kubectl_bin apply -f "${conf_dir}/client.yml" -f "${conf_dir}/secrets.yml" - desc "create first PSMDB cluster 1.16.2 $CLUSTER" + desc "create first PSMDB cluster 1.17.0 $CLUSTER" apply_cluster "$test_dir/conf/${CLUSTER}-rs0.yml" desc 'check if Pod started' wait_for_running "${CLUSTER}-rs0" "3" "true" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1162" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1162" + compare_kubectl service/${CLUSTER}-rs0 "-1170" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1170" - desc 'test 1.17.0' + desc 'test 1.18.0' kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{ - "spec": {"crVersion":"1.17.0"} + "spec": {"crVersion":"1.18.0"} }' # Wait for at least one reconciliation sleep 10 @@ -35,12 +34,12 @@ main() { wait_for_running "${CLUSTER}-rs0" "3" "true" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1170" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1170" + compare_kubectl service/${CLUSTER}-rs0 "-1180" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1180" - desc 'test 1.18.0' + desc 'test 1.19.0' kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{ - "spec": {"crVersion":"1.18.0"} + "spec": {"crVersion":"1.19.0"} }' # Wait for at least one reconciliation sleep 10 @@ -48,8 +47,8 @@ main() { wait_for_running "${CLUSTER}-rs0" "3" "true" desc 'check if service and statefulset created with expected config' - compare_kubectl service/${CLUSTER}-rs0 "-1180" - compare_kubectl statefulset/${CLUSTER}-rs0 "-1180" + compare_kubectl service/${CLUSTER}-rs0 "-1190" + compare_kubectl statefulset/${CLUSTER}-rs0 "-1190" destroy $namespace diff --git a/go.mod b/go.mod index e278e20256..6351e0c9a2 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,8 @@ require ( github.com/go-openapi/validate v0.24.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 github.com/hashicorp/go-version v1.7.0 - github.com/onsi/ginkgo/v2 v2.21.0 - github.com/onsi/gomega v1.35.1 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 github.com/percona/percona-backup-mongodb v1.8.1-0.20241002124601-957ac501f939 github.com/pkg/errors v0.9.1 github.com/robfig/cron/v3 v3.0.1 @@ -40,7 +40,7 @@ require ( github.com/go-asn1-ber/asn1-ber v1.5.6 // indirect github.com/go-ldap/ldap/v3 v3.4.8 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/google/pprof v0.0.0-20241101162523-b92577c0c142 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/go.sum b/go.sum index cbb39e8f16..d78c651e8a 100644 --- a/go.sum +++ b/go.sum @@ -255,8 +255,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20241101162523-b92577c0c142 h1:sAGdeJj0bnMgUNVeUpp6AYlVdCt3/GdI3pGRqsNSQLs= -github.com/google/pprof v0.0.0-20241101162523-b92577c0c142/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -399,15 +399,15 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= diff --git a/pkg/controller/perconaservermongodb/psmdb_controller.go b/pkg/controller/perconaservermongodb/psmdb_controller.go index 3d49b471bd..17e113b538 100644 --- a/pkg/controller/perconaservermongodb/psmdb_controller.go +++ b/pkg/controller/perconaservermongodb/psmdb_controller.go @@ -25,15 +25,13 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/percona/percona-server-mongodb-operator/clientcmd" api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" @@ -134,21 +132,14 @@ func getOperatorPodImage(ctx context.Context) (string, error) { // add adds a new Controller to mgr with r as the reconcile.Reconciler func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("psmdb-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to primary resource PerconaServerMongoDB - err = c.Watch(source.Kind(mgr.GetCache(), &api.PerconaServerMongoDB{}, &handler.TypedEnqueueRequestForObject[*api.PerconaServerMongoDB]{})) - if err != nil { - return err - } - - return nil + return builder.ControllerManagedBy(mgr). + For(&api.PerconaServerMongoDB{}). + Named("psmdb-controller"). + Complete(r) } +var _ reconcile.Reconciler = &ReconcilePerconaServerMongoDB{} + type CronRegistry struct { crons *cron.Cron ensureVersionJobs *sync.Map @@ -178,8 +169,6 @@ func NewCronRegistry() CronRegistry { return c } -var _ reconcile.Reconciler = &ReconcilePerconaServerMongoDB{} - // ReconcilePerconaServerMongoDB reconciles a PerconaServerMongoDB object type ReconcilePerconaServerMongoDB struct { // This client, initialized using mgr.Client() above, is a split client diff --git a/pkg/controller/perconaservermongodbbackup/perconaservermongodbbackup_controller.go b/pkg/controller/perconaservermongodbbackup/perconaservermongodbbackup_controller.go index fe2fe90651..7e861939f4 100644 --- a/pkg/controller/perconaservermongodbbackup/perconaservermongodbbackup_controller.go +++ b/pkg/controller/perconaservermongodbbackup/perconaservermongodbbackup_controller.go @@ -14,20 +14,18 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" pbmBackup "github.com/percona/percona-backup-mongodb/pbm/backup" pbmErrors "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" - "github.com/percona/percona-server-mongodb-operator/clientcmd" psmdbv1 "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" "github.com/percona/percona-server-mongodb-operator/pkg/naming" @@ -67,29 +65,20 @@ func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) { } // add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("psmdbbackup-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to primary resource PerconaServerMongoDBBackup - err = c.Watch(source.Kind(mgr.GetCache(), &psmdbv1.PerconaServerMongoDBBackup{}, &handler.TypedEnqueueRequestForObject[*psmdbv1.PerconaServerMongoDBBackup]{})) - if err != nil { - return err - } - // TODO(user): Modify this to be the types you create that are owned by the primary resource - // Watch for changes to secondary resource Pods and requeue the owner PerconaServerMongoDBBackup - err = c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, handler.TypedEnqueueRequestForOwner[*corev1.Pod]( - mgr.GetScheme(), mgr.GetRESTMapper(), &psmdbv1.PerconaServerMongoDBBackup{}, handler.OnlyControllerOwner(), - ))) - if err != nil { - return err - } - - return nil +func add(mgr manager.Manager, r reconcile.Reconciler) error { + return builder.ControllerManagedBy(mgr). + Named("psmdbbackup-controller"). + For(&psmdbv1.PerconaServerMongoDBBackup{}). + Watches( + &corev1.Pod{}, + handler.EnqueueRequestForOwner( + mgr.GetScheme(), mgr.GetRESTMapper(), + &psmdbv1.PerconaServerMongoDBBackup{}, + handler.OnlyControllerOwner(), + ), + ). + Complete(r) } var _ reconcile.Reconciler = &ReconcilePerconaServerMongoDBBackup{} @@ -177,15 +166,6 @@ func (r *ReconcilePerconaServerMongoDBBackup) Reconcile(ctx context.Context, req if err != nil { return rr, errors.Wrapf(err, "set defaults for %s/%s", cluster.Namespace, cluster.Name) } - // TODO: Remove after 1.15 - if cluster.CompareVersion("1.12.0") >= 0 && cr.Spec.ClusterName == "" { - cr.Spec.ClusterName = cr.Spec.PSMDBCluster - cr.Spec.PSMDBCluster = "" - err = r.client.Update(ctx, cr) - if err != nil { - return rr, errors.Wrap(err, "failed to update clusterName") - } - } } bcp, err := r.newBackup(ctx, cluster) diff --git a/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go b/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go index ac2d8c784b..8cff8f259e 100644 --- a/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go +++ b/pkg/controller/perconaservermongodbrestore/perconaservermongodbrestore_controller.go @@ -13,13 +13,12 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/percona/percona-backup-mongodb/pbm/defs" @@ -56,29 +55,21 @@ func newReconciler(mgr manager.Manager) (reconcile.Reconciler, error) { }, nil } -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("psmdbrestore-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } +//add adds a new Controller to mgr with r as the reconcile.Reconciler - // Watch for changes to primary resource PerconaServerMongoDBRestore - err = c.Watch(source.Kind(mgr.GetCache(), &psmdbv1.PerconaServerMongoDBRestore{}, &handler.TypedEnqueueRequestForObject[*psmdbv1.PerconaServerMongoDBRestore]{})) - if err != nil { - return err - } - - // Watch for changes to secondary resource Pods and requeue the owner PerconaServerMongoDBRestore - err = c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, handler.TypedEnqueueRequestForOwner[*corev1.Pod]( - mgr.GetScheme(), mgr.GetRESTMapper(), &psmdbv1.PerconaServerMongoDBRestore{}, handler.OnlyControllerOwner(), - ))) - if err != nil { - return err - } - - return nil +func add(mgr manager.Manager, r reconcile.Reconciler) error { + return builder.ControllerManagedBy(mgr). + Named("psmdbrestore-controller"). + For(&psmdbv1.PerconaServerMongoDBRestore{}). + Watches( + &corev1.Pod{}, + handler.EnqueueRequestForOwner( + mgr.GetScheme(), mgr.GetRESTMapper(), + &psmdbv1.PerconaServerMongoDBRestore{}, + handler.OnlyControllerOwner(), + ), + ). + Complete(r) } var _ reconcile.Reconciler = &ReconcilePerconaServerMongoDBRestore{}