diff --git a/deploy/cr.yaml b/deploy/cr.yaml index b9ae82c0bc..de0620a07c 100644 --- a/deploy/cr.yaml +++ b/deploy/cr.yaml @@ -297,24 +297,13 @@ spec: # nodeSelector: # disktype: ssd # schedulerName: "default" - resources: - limits: - cpu: "300m" - memory: "0.5G" - requests: - cpu: "300m" - memory: "0.5G" - volumeSpec: -# emptyDir: {} -# hostPath: -# path: /data -# type: Directory - persistentVolumeClaim: -# storageClassName: standard -# accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 3Gi + resources: + limits: + cpu: "300m" + memory: "0.5G" + requests: + cpu: "300m" + memory: "0.5G" # hostAliases: # - ip: "10.10.0.2" # hostnames: diff --git a/e2e-tests/data-sharded/conf/some-name.yml b/e2e-tests/data-sharded/conf/some-name.yml index b587de2ba4..29570fda43 100644 --- a/e2e-tests/data-sharded/conf/some-name.yml +++ b/e2e-tests/data-sharded/conf/some-name.yml @@ -94,7 +94,7 @@ spec: requests: storage: 1Gi - size: 3 + size: 4 configuration: | net: tls: @@ -119,6 +119,9 @@ spec: journalCompressor: snappy indexConfig: prefixCompression: true + arbiter: + enabled: true + size: 1 - name: rs2 affinity: antiAffinityTopologyKey: none @@ -159,6 +162,28 @@ spec: journalCompressor: snappy indexConfig: prefixCompression: true + nonvoting: + enabled: true + size: 3 + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 1Gi secrets: users: some-users diff --git a/e2e-tests/data-sharded/run b/e2e-tests/data-sharded/run index 7e29411f88..c37339d9c3 100755 --- a/e2e-tests/data-sharded/run +++ b/e2e-tests/data-sharded/run @@ -6,96 +6,138 @@ test_dir=$(realpath "$(dirname "$0")") . "${test_dir}/../functions" set_debug -if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then - MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*([0-9].[0-9])$/\1/') -else - MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') -fi - -deploy_cert_manager -create_infra "$namespace" - -desc 'create secrets and start client' -kubectl_bin apply -f "$conf_dir/secrets.yml" -kubectl_bin apply -f "$conf_dir/client_with_tls.yml" - -cluster="some-name" -desc "create first PSMDB cluster $cluster" -apply_cluster "$test_dir/conf/$cluster.yml" - -desc 'check if all Pods started' -wait_for_running $cluster-cfg 3 -wait_for_running $cluster-rs0 3 -wait_for_running $cluster-rs1 3 -wait_for_running $cluster-rs2 3 -wait_for_running $cluster-mongos 3 - -desc 'create user' -run_mongos \ - 'db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' \ - "userAdmin:userAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ - "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" -sleep 2 - -desc 'set chunk size to 32 MB' -run_mongos \ - "use config\n db.settings.save( { _id:\"chunksize\", value: 32 } )" \ - "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ - "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" -sleep 2 - -desc 'write data' -run_script_mongos "${test_dir}/data.js" "user:pass@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ - "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" - -desc 'shard collection' -run_mongos \ - 'sh.enableSharding("app")' \ - "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ - "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" -sleep 2 - -run_mongos \ - 'sh.shardCollection("app.city", { _id: 1 } )' \ - "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ - "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" -sleep 120 - -desc 'check chunks' -chunks_param1="ns" -chunks_param2='"app.city"' - -if [[ ${MONGO_VER} == "6.0" || ${MONGO_VER} == "5.0" ]]; then - chunks_param1="uuid" - chunks_param2=$(run_mongos \ - "use app\n db.getCollectionInfos({ \"name\": \"city\" })[0].info.uuid" \ - "user:pass@$cluster-mongos.$namespace" \ - '' \ - '' \ - "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" \ - | grep "switched to db app" -A 1 | grep -v "switched to db app") -fi - -shards=0 -for i in "rs0" "rs1" "rs2"; do - out=$(run_mongos \ - "use config\n db.chunks.count({\"${chunks_param1}\": ${chunks_param2}, \"shard\": \"$i\"})" \ +check_rs_proper_component_deletion() { + local cluster="$1" + local rs_name="$2" + + rs_idx=$(kubectl_bin get psmdb ${cluster} -ojson | jq --arg RS $rs_name '.spec.replsets | map(.name == $RS) | index(true)') + kubectl_bin patch psmdb ${cluster} --type=json -p="[{'op': 'remove', 'path': '/spec/replsets/$rs_idx'}]" + + echo -n "Deleting replset $rs_name" + until [[ $(kubectl_bin get sts -l app.kubernetes.io/instance=${cluster},app.kubernetes.io/replset=${rs_name} -ojson | jq '.items | length') -eq 0 ]]; do + let retry+=1 + if [ $retry -ge 70 ]; then + sts_count=$(kubectl_bin get sts -l app.kubernetes.io/instance=${cluster},app.kubernetes.io/replset=${rs_name} -ojson | jq '.items | length') + echo "Replset $rs_name not properly removed, expected sts count of 0 but got $sts_count. Exiting after $retry tries..." + exit 1 + fi + echo -n . + sleep 30 + done + + echo "OK" +} + +main() { + if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then + MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*([0-9].[0-9])$/\1/') + else + MONGO_VER=$(echo -n "${IMAGE_MONGOD}" | $sed -r 's/.*:([0-9]+\.[0-9]+).*$/\1/') + fi + + deploy_cert_manager + create_infra "$namespace" + + desc 'create secrets and start client' + kubectl_bin apply -f "$conf_dir/secrets.yml" + kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + + cluster="some-name" + desc "create first PSMDB cluster $cluster" + apply_cluster "$test_dir/conf/$cluster.yml" + + desc 'check if all Pods started' + wait_for_running $cluster-cfg 3 + wait_for_running $cluster-rs0 3 + wait_for_running $cluster-rs1 3 + wait_for_running $cluster-rs2 3 + wait_for_running $cluster-mongos 3 + + desc 'create user' + run_mongos \ + 'db.createUser({user:"user",pwd:"pass",roles:[{db:"app",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ + "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" + sleep 2 + + desc 'set chunk size to 32 MB' + run_mongos \ + "use config\n db.settings.save( { _id:\"chunksize\", value: 32 } )" \ "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ - "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" \ - | grep "switched to db config" -A 1 | grep -v "switched to db config") + "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" + sleep 2 - desc "$i has $out chunks" + desc 'write data' + run_script_mongos "${test_dir}/data.js" "user:pass@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ + "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" + + desc 'shard collection' + run_mongos \ + 'sh.enableSharding("app")' \ + "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ + "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" + sleep 2 - if [[ $out -ne 0 ]]; then - ((shards = shards + 1)) + run_mongos \ + 'sh.shardCollection("app.city", { _id: 1 } )' \ + "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ + "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" + sleep 120 + + desc 'check chunks' + chunks_param1="ns" + chunks_param2='"app.city"' + + if [[ ${MONGO_VER} == "6.0" || ${MONGO_VER} == "5.0" ]]; then + chunks_param1="uuid" + chunks_param2=$(run_mongos \ + "use app\n db.getCollectionInfos({ \"name\": \"city\" })[0].info.uuid" \ + "user:pass@$cluster-mongos.$namespace" \ + '' \ + '' \ + "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" \ + | grep "switched to db app" -A 1 | grep -v "switched to db app") fi -done -if [[ $shards -lt 3 ]]; then - echo "data is only on some of the shards, maybe sharding is not working" - exit 1 -fi + shards=0 + for i in "rs0" "rs1" "rs2"; do + out=$(run_mongos \ + "use config\n db.chunks.count({\"${chunks_param1}\": ${chunks_param2}, \"shard\": \"$i\"})" \ + "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ + "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls" \ + | grep "switched to db config" -A 1 | grep -v "switched to db config") + + desc "$i has $out chunks" + + if [[ $out -ne 0 ]]; then + ((shards = shards + 1)) + fi + done + + if [[ $shards -lt 3 ]]; then + echo "data is only on some of the shards, maybe sharding is not working" + exit 1 + fi + + # Drop non system database so we can remove shards + res=$(run_mongos \ + "use app\n db.dropDatabase()" \ + "clusterAdmin:clusterAdmin123456@$cluster-mongos.$namespace" "mongodb" ".svc.cluster.local" \ + "--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls") + if ! echo $res | grep -q '"ok" : 1'; then + echo "app database not dropped. Exiting.." + exit 1 + fi + + desc 'check if rs1 and all its related stateful sets are properly removed' + check_rs_proper_component_deletion $cluster rs1 + + desc 'check if rs2 and all its related stateful sets are properly removed' + check_rs_proper_component_deletion $cluster rs2 + + destroy "$namespace" -destroy "$namespace" + desc 'test passed' +} -desc 'test passed' +main diff --git a/e2e-tests/split-horizon/run b/e2e-tests/split-horizon/run index 0c8c2b7008..eef56b1a53 100755 --- a/e2e-tests/split-horizon/run +++ b/e2e-tests/split-horizon/run @@ -43,7 +43,7 @@ sleep 15 set -o xtrace -run_mongo "rs.conf().members.map(function(member) { return member.horizons })" \ +run_mongo "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \ "clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \ mongodb "" "--quiet" >${tmp_dir}/horizons-3.json diff $test_dir/compare/horizons-3.json $tmp_dir/horizons-3.json @@ -78,7 +78,7 @@ kubectl_bin patch psmdb ${cluster} \ wait_for_running "${cluster}-rs0" 5 wait_cluster_consistency ${cluster} -run_mongo "rs.conf().members.map(function(member) { return member.horizons })" \ +run_mongo "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \ "clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \ mongodb "" "--quiet" >${tmp_dir}/horizons-5.json diff $test_dir/compare/horizons-5.json $tmp_dir/horizons-5.json @@ -89,7 +89,7 @@ kubectl_bin patch psmdb ${cluster} \ wait_for_running "${cluster}-rs0" 3 wait_cluster_consistency ${cluster} -run_mongo "rs.conf().members.map(function(member) { return member.horizons })" \ +run_mongo "rs.conf().members.map(function(member) { return member.horizons }).sort((a, b) => a.external.localeCompare(b.external))" \ "clusterAdmin:clusterAdmin123456@some-name-rs0-0.clouddemo.xyz,some-name-rs0-1.clouddemo.xyz,some-name-rs0-2.clouddemo.xyz" \ mongodb "" "--quiet" >${tmp_dir}/horizons.json diff $test_dir/compare/horizons-3.json $tmp_dir/horizons-3.json @@ -98,4 +98,4 @@ apply_cluster ${test_dir}/conf/${cluster}.yml wait_for_running "${cluster}-rs0" 3 wait_cluster_consistency ${cluster} -destroy ${namespace} \ No newline at end of file +destroy ${namespace} diff --git a/pkg/controller/perconaservermongodb/mgo.go b/pkg/controller/perconaservermongodb/mgo.go index e259b472b5..1c1f5c82ba 100644 --- a/pkg/controller/perconaservermongodb/mgo.go +++ b/pkg/controller/perconaservermongodb/mgo.go @@ -50,7 +50,7 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr return api.AppStateReady, nil } - pods, err := psmdb.GetRSPods(ctx, r.client, cr, replset.Name, false) + pods, err := psmdb.GetRSPods(ctx, r.client, cr, replset.Name) if err != nil { return api.AppStateInit, errors.Wrap(err, "failed to get replset pods") } @@ -238,7 +238,7 @@ func (r *ReconcilePerconaServerMongoDB) updateConfigMembers(ctx context.Context, // Primary with a Secondary and an Arbiter (PSA) unsafePSA := cr.Spec.UnsafeConf && rs.Arbiter.Enabled && rs.Arbiter.Size == 1 && !rs.NonVoting.Enabled && rs.Size == 2 - pods, err := psmdb.GetRSPods(ctx, r.client, cr, rs.Name, false) + pods, err := psmdb.GetRSPods(ctx, r.client, cr, rs.Name) if err != nil { return 0, errors.Wrap(err, "get rs pods") } diff --git a/pkg/controller/perconaservermongodb/psmdb_controller.go b/pkg/controller/perconaservermongodb/psmdb_controller.go index 9feb2996bd..152ef9d78d 100644 --- a/pkg/controller/perconaservermongodb/psmdb_controller.go +++ b/pkg/controller/perconaservermongodb/psmdb_controller.go @@ -315,25 +315,30 @@ func (r *ReconcilePerconaServerMongoDB) Reconcile(ctx context.Context, request r } } - removed, err := r.getRemovedSfs(ctx, cr) + removed, err := r.getSTSforRemoval(ctx, cr) if err != nil { return reconcile.Result{}, err } - for _, v := range removed { - rsName := v.Labels["app.kubernetes.io/replset"] + for _, sts := range removed { + rsName := sts.Labels["app.kubernetes.io/replset"] + + log.Info("Deleting STS component from replst", "sts", sts.Name, "rs", rsName) err = r.checkIfPossibleToRemove(ctx, cr, rsName) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "check remove posibility for rs %s", rsName) } - err = r.removeRSFromShard(ctx, cr, rsName) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to remove rs %s", rsName) + if sts.Labels["app.kubernetes.io/component"] == "mongod" { + log.Info("Removing RS from shard", "rs", rsName) + err = r.removeRSFromShard(ctx, cr, rsName) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to remove rs %s", rsName) + } } - err = r.client.Delete(ctx, &v) + err = r.client.Delete(ctx, &sts) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to remove rs %s", rsName) } @@ -402,7 +407,7 @@ func (r *ReconcilePerconaServerMongoDB) Reconcile(ctx context.Context, request r "app.kubernetes.io/component": "mongod", } - pods, err := psmdb.GetRSPods(ctx, r.client, cr, replset.Name, false) + pods, err := psmdb.GetRSPods(ctx, r.client, cr, replset.Name) if err != nil { err = errors.Errorf("get pods list for replset %s: %v", replset.Name, err) return reconcile.Result{}, err @@ -667,11 +672,11 @@ func (r *ReconcilePerconaServerMongoDB) safeDownscale(ctx context.Context, cr *a return isDownscale, nil } -func (r *ReconcilePerconaServerMongoDB) getRemovedSfs(ctx context.Context, cr *api.PerconaServerMongoDB) ([]appsv1.StatefulSet, error) { +func (r *ReconcilePerconaServerMongoDB) getSTSforRemoval(ctx context.Context, cr *api.PerconaServerMongoDB) ([]appsv1.StatefulSet, error) { removed := make([]appsv1.StatefulSet, 0) - sfsList := appsv1.StatefulSetList{} - if err := r.client.List(ctx, &sfsList, + stsList := appsv1.StatefulSetList{} + if err := r.client.List(ctx, &stsList, &client.ListOptions{ Namespace: cr.Namespace, LabelSelector: labels.SelectorFromSet(map[string]string{ @@ -683,23 +688,24 @@ func (r *ReconcilePerconaServerMongoDB) getRemovedSfs(ctx context.Context, cr *a } appliedRSNames := make(map[string]struct{}, len(cr.Spec.Replsets)) - for _, v := range cr.Spec.Replsets { - appliedRSNames[cr.Name+"-"+v.Name] = struct{}{} + + for _, rs := range cr.Spec.Replsets { + appliedRSNames[rs.Name] = struct{}{} } - for _, v := range sfsList.Items { - if v.Name == cr.Name+"-"+api.ConfigReplSetName { + for _, sts := range stsList.Items { + component := sts.Labels["app.kubernetes.io/component"] + if component == "mongos" || sts.Name == cr.Name+"-"+api.ConfigReplSetName { continue } - component := v.Labels["app.kubernetes.io/component"] - if component == "arbiter" || component == "nonVoting" || component == "mongos" { + rsName := sts.Labels["app.kubernetes.io/replset"] + + if _, ok := appliedRSNames[rsName]; ok { continue } - if _, ok := appliedRSNames[v.Name]; !ok { - removed = append(removed, v) - } + removed = append(removed, sts) } return removed, nil @@ -1166,7 +1172,7 @@ func (r *ReconcilePerconaServerMongoDB) reconcileMongos(ctx context.Context, cr return errors.Wrap(err, "check if mongos custom configuration exists") } - cfgPods, err := psmdb.GetRSPods(ctx, r.client, cr, api.ConfigReplSetName, false) + cfgPods, err := psmdb.GetRSPods(ctx, r.client, cr, api.ConfigReplSetName) if err != nil { return errors.Wrap(err, "get configsvr pods") } diff --git a/pkg/controller/perconaservermongodb/smart.go b/pkg/controller/perconaservermongodb/smart.go index c4d3d45e6b..829fd32d5f 100644 --- a/pkg/controller/perconaservermongodb/smart.go +++ b/pkg/controller/perconaservermongodb/smart.go @@ -70,7 +70,7 @@ func (r *ReconcilePerconaServerMongoDB) smartUpdate(ctx context.Context, cr *api if err != nil { return errors.Wrapf(err, "get config statefulset %s/%s", cr.Namespace, cr.Name+"-"+api.ConfigReplSetName) } - cfgList, err := psmdb.GetRSPods(ctx, r.client, cr, api.ConfigReplSetName, false) + cfgList, err := psmdb.GetRSPods(ctx, r.client, cr, api.ConfigReplSetName) if err != nil { return errors.Wrap(err, "get cfg pod list") } diff --git a/pkg/controller/perconaservermongodb/status.go b/pkg/controller/perconaservermongodb/status.go index 2f5f9954b4..80ca64e80c 100644 --- a/pkg/controller/perconaservermongodb/status.go +++ b/pkg/controller/perconaservermongodb/status.go @@ -256,7 +256,7 @@ func (r *ReconcilePerconaServerMongoDB) writeStatus(ctx context.Context, cr *api } func (r *ReconcilePerconaServerMongoDB) rsStatus(ctx context.Context, cr *api.PerconaServerMongoDB, rsSpec *api.ReplsetSpec) (api.ReplsetStatus, error) { - list, err := psmdb.GetRSPods(ctx, r.client, cr, rsSpec.Name, false) + list, err := psmdb.GetRSPods(ctx, r.client, cr, rsSpec.Name) if err != nil { return api.ReplsetStatus{}, fmt.Errorf("get list: %v", err) } diff --git a/pkg/controller/perconaservermongodb/users.go b/pkg/controller/perconaservermongodb/users.go index 82fd5b94bb..6bf8db5a3e 100644 --- a/pkg/controller/perconaservermongodb/users.go +++ b/pkg/controller/perconaservermongodb/users.go @@ -113,7 +113,7 @@ func (r *ReconcilePerconaServerMongoDB) reconcileUsers(ctx context.Context, cr * pods = append(pods, mongosList.Items...) - cfgPodlist, err := psmdb.GetRSPods(ctx, r.client, cr, api.ConfigReplSetName, false) + cfgPodlist, err := psmdb.GetRSPods(ctx, r.client, cr, api.ConfigReplSetName) if err != nil { return errors.Wrap(err, "failed to get mongos pods") } diff --git a/pkg/controller/perconaservermongodbrestore/physical.go b/pkg/controller/perconaservermongodbrestore/physical.go index fce9063a1a..e1b9a6b921 100644 --- a/pkg/controller/perconaservermongodbrestore/physical.go +++ b/pkg/controller/perconaservermongodbrestore/physical.go @@ -591,7 +591,7 @@ func (r *ReconcilePerconaServerMongoDBRestore) prepareReplsetsForPhysicalRestore for _, rs := range replsets { log.Info("Preparing replset for physical restore", "replset", rs.Name) - podList, err := psmdb.GetRSPods(ctx, r.client, cluster, rs.Name, false) + podList, err := psmdb.GetRSPods(ctx, r.client, cluster, rs.Name) if err != nil { return errors.Wrapf(err, "get pods of replset %s", rs.Name) } diff --git a/pkg/psmdb/backup/pbm.go b/pkg/psmdb/backup/pbm.go index e8f87283a6..51c13eb64a 100644 --- a/pkg/psmdb/backup/pbm.go +++ b/pkg/psmdb/backup/pbm.go @@ -129,7 +129,7 @@ type NewPBMFunc func(ctx context.Context, c client.Client, cluster *api.PerconaS func NewPBM(ctx context.Context, c client.Client, cluster *api.PerconaServerMongoDB) (PBM, error) { rs := cluster.Spec.Replsets[0] - pods, err := psmdb.GetRSPods(ctx, c, cluster, rs.Name, false) + pods, err := psmdb.GetRSPods(ctx, c, cluster, rs.Name) if err != nil { return nil, errors.Wrapf(err, "get pods list for replset %s", rs.Name) } diff --git a/pkg/psmdb/client.go b/pkg/psmdb/client.go index f08ef3d9a1..edcb932bfa 100644 --- a/pkg/psmdb/client.go +++ b/pkg/psmdb/client.go @@ -17,7 +17,7 @@ type Credentials struct { } func MongoClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, rs api.ReplsetSpec, c Credentials) (mongo.Client, error) { - pods, err := GetRSPods(ctx, k8sclient, cr, rs.Name, false) + pods, err := GetRSPods(ctx, k8sclient, cr, rs.Name) if err != nil { return nil, errors.Wrapf(err, "get pods list for replset %s", rs.Name) } diff --git a/pkg/psmdb/getters.go b/pkg/psmdb/getters.go index aa3dcc147d..2a5df0b070 100644 --- a/pkg/psmdb/getters.go +++ b/pkg/psmdb/getters.go @@ -2,8 +2,8 @@ package psmdb import ( "context" - "sort" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,70 +36,41 @@ func MongosLabels(cr *api.PerconaServerMongoDB) map[string]string { return lbls } -func GetRSPods(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, rsName string, includeOutdated bool) (corev1.PodList, error) { - pods := corev1.PodList{} - err := k8sclient.List(ctx, - &pods, +func GetRSPods(ctx context.Context, k8sclient client.Client, cr *api.PerconaServerMongoDB, rsName string) (corev1.PodList, error) { + rsPods := corev1.PodList{} + + stsList := appsv1.StatefulSetList{} // All statefulsets related to replset `rsName` + if err := k8sclient.List(ctx, &stsList, &client.ListOptions{ - Namespace: cr.Namespace, - LabelSelector: labels.SelectorFromSet(RSLabels(cr, rsName)), + Namespace: cr.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + "app.kubernetes.io/instance": cr.Name, + "app.kubernetes.io/replset": rsName, + }), }, - ) - if err != nil { - return pods, errors.Wrap(err, "failed to list pods") - } - - if includeOutdated { - return pods, nil - } - - sort.Slice(pods.Items, func(i, j int) bool { - return pods.Items[i].Name < pods.Items[j].Name - }) - - rs := cr.Spec.Replset(rsName) - if rs == nil { - return pods, errors.Errorf("replset %s is not found", rsName) + ); err != nil { + return rsPods, errors.Wrapf(err, "failed to get statefulset list related to replset %s", rsName) } - var rsPods []corev1.Pod - if rs.ClusterRole == api.ClusterRoleConfigSvr { - rsPods = filterPodsByComponent(pods, api.ConfigReplSetName) - } else { - rsPods = filterPodsByComponent(pods, "mongod") - } - - arbiterPods := filterPodsByComponent(pods, "arbiter") - nvPods := filterPodsByComponent(pods, "nonVoting") + for _, sts := range stsList.Items { + lbls := RSLabels(cr, rsName) + lbls["app.kubernetes.io/component"] = sts.Labels["app.kubernetes.io/component"] + compPods := corev1.PodList{} + err := k8sclient.List(ctx, + &compPods, + &client.ListOptions{ + Namespace: cr.Namespace, + LabelSelector: labels.SelectorFromSet(lbls), + }, + ) + if err != nil { + return rsPods, errors.Wrap(err, "failed to list pods") + } - if len(rsPods) >= int(rs.Size) { - rsPods = rsPods[:rs.Size] - } - if len(arbiterPods) >= int(rs.Arbiter.Size) { - arbiterPods = arbiterPods[:rs.Arbiter.Size] - } - if len(nvPods) >= int(rs.NonVoting.Size) { - nvPods = nvPods[:rs.NonVoting.Size] + rsPods.Items = append(rsPods.Items, compPods.Items...) } - pods.Items = rsPods - pods.Items = append(pods.Items, arbiterPods...) - pods.Items = append(pods.Items, nvPods...) - return pods, nil -} - -func filterPodsByComponent(list corev1.PodList, component string) []corev1.Pod { - var pods []corev1.Pod - for _, pod := range list.Items { - v, ok := pod.Labels["app.kubernetes.io/component"] - if !ok { - continue - } - if v == component { - pods = append(pods, pod) - } - } - return pods + return rsPods, nil } func GetPrimaryPod(ctx context.Context, mgoClient mongo.Client) (string, error) {