diff --git a/e2e-tests/custom-replset-name/compare/find-2nd.json b/e2e-tests/custom-replset-name/compare/find-2nd.json new file mode 100644 index 0000000000..d0a0868e32 --- /dev/null +++ b/e2e-tests/custom-replset-name/compare/find-2nd.json @@ -0,0 +1,4 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/custom-replset-name/compare/find.json b/e2e-tests/custom-replset-name/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/custom-replset-name/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/custom-replset-name/conf/backup-aws-s3.yml b/e2e-tests/custom-replset-name/conf/backup-aws-s3.yml new file mode 100644 index 0000000000..d7601c50e0 --- /dev/null +++ b/e2e-tests/custom-replset-name/conf/backup-aws-s3.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + name: backup-aws-s3 +spec: + psmdbCluster: some-name + storageName: aws-s3 diff --git a/e2e-tests/custom-replset-name/conf/backup-gcp-cs.yml b/e2e-tests/custom-replset-name/conf/backup-gcp-cs.yml new file mode 100644 index 0000000000..f176bdd05b --- /dev/null +++ b/e2e-tests/custom-replset-name/conf/backup-gcp-cs.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + name: backup-gcp-cs +spec: + psmdbCluster: some-name + storageName: gcp-cs diff --git a/e2e-tests/custom-replset-name/conf/backup-minio.yml b/e2e-tests/custom-replset-name/conf/backup-minio.yml new file mode 100644 index 0000000000..adfc0ba899 --- /dev/null +++ b/e2e-tests/custom-replset-name/conf/backup-minio.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + name: backup-minio +spec: + psmdbCluster: some-name + storageName: minio diff --git a/e2e-tests/custom-replset-name/conf/restore.yml b/e2e-tests/custom-replset-name/conf/restore.yml new file mode 100644 index 0000000000..32ab3c4b9a --- /dev/null +++ b/e2e-tests/custom-replset-name/conf/restore.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: diff --git a/e2e-tests/custom-replset-name/conf/some-name.yml b/e2e-tests/custom-replset-name/conf/some-name.yml new file mode 100644 index 0000000000..4ac3efb960 --- /dev/null +++ b/e2e-tests/custom-replset-name/conf/some-name.yml @@ -0,0 +1,199 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name +spec: + crVersion: 1.14.0 + allowUnsafeConfigurations: true + backup: + enabled: true + image: percona/percona-backup-mongodb:2.0.4 + pitr: + enabled: false + serviceAccountName: percona-server-mongodb-operator + storages: + aws-s3: + type: s3 + s3: + credentialsSecret: aws-s3-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb + minio: + type: s3 + s3: + credentialsSecret: minio-secret + region: us-east-1 + bucket: operator-testing + endpointUrl: http://minio-service:9000/ + gcp-cs: + type: s3 + s3: + credentialsSecret: gcp-cs-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb + endpointUrl: https://storage.googleapis.com + image: percona/percona-server-mongodb:4.4.10-11 + imagePullPolicy: Always + pmm: + enabled: false + replsets: + - affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + arbiter: + affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + enabled: false + size: 1 + configuration: | + storage: + directoryPerDB: true + wiredTiger: + engineConfig: + directoryForIndexes: true + expose: + enabled: true + exposeType: ClusterIP + serviceAnnotations: + networking.gke.io/load-balancer-type: Internal + name: shard1 + podDisruptionBudget: + maxUnavailable: 1 + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: standard-rwo + - affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + arbiter: + affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + enabled: false + size: 1 + configuration: | + storage: + directoryPerDB: true + wiredTiger: + engineConfig: + directoryForIndexes: true + expose: + enabled: true + exposeType: ClusterIP + serviceAnnotations: + networking.gke.io/load-balancer-type: Internal + name: shard3 + podDisruptionBudget: + maxUnavailable: 1 + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: standard-rwo + - affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + arbiter: + affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + enabled: false + size: 1 + configuration: | + storage: + directoryPerDB: true + wiredTiger: + engineConfig: + directoryForIndexes: true + expose: + enabled: true + exposeType: ClusterIP + serviceAnnotations: + networking.gke.io/load-balancer-type: Internal + name: shard5 + podDisruptionBudget: + maxUnavailable: 1 + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: standard-rwo + - affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + arbiter: + affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + enabled: false + size: 1 + configuration: | + storage: + directoryPerDB: true + wiredTiger: + engineConfig: + directoryForIndexes: true + expose: + enabled: true + exposeType: ClusterIP + serviceAnnotations: + networking.gke.io/load-balancer-type: Internal + name: shard7 + podDisruptionBudget: + maxUnavailable: 1 + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: standard-rwo + secrets: + users: some-users + sharding: + configsvrReplSet: + affinity: + antiAffinityTopologyKey: topology.kubernetes.io/zone + configuration: | + replication: + replSetName: csReplSet + storage: + directoryPerDB: true + wiredTiger: + engineConfig: + directoryForIndexes: true + expose: + enabled: true + exposeType: ClusterIP + serviceAnnotations: + networking.gke.io/load-balancer-type: Internal + podDisruptionBudget: + maxUnavailable: 1 + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 5Gi + storageClassName: standard-rwo + enabled: true + mongos: + affinity: + antiAffinityTopologyKey: kubernetes.io/hostname + expose: + exposeType: LoadBalancer + serviceAnnotations: + networking.gke.io/load-balancer-type: Internal + podDisruptionBudget: + maxUnavailable: 1 + size: 3 + unmanaged: false + updateStrategy: SmartUpdate + upgradeOptions: + apply: Disabled + schedule: 0 2 * * * + setFCV: false + versionServiceEndpoint: https://check.percona.com diff --git a/e2e-tests/custom-replset-name/run b/e2e-tests/custom-replset-name/run new file mode 100755 index 0000000000..7d10accef3 --- /dev/null +++ b/e2e-tests/custom-replset-name/run @@ -0,0 +1,75 @@ +#!/bin/bash + +set -o errexit +set -o xtrace + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions + +create_namespace $namespace +deploy_operator +apply_s3_storage_secrets +deploy_minio + +kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +cluster="some-name" + +desc 'create first PSMDB cluster' +apply_cluster $test_dir/conf/${cluster}.yml +wait_cluster_consistency $cluster + +desc 'write data, read from all' +run_mongos \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@$cluster-mongos.$namespace" +sleep 2 +run_mongos \ + 'use myApp\n db.test.insert({ x: 100500 })' \ + "myApp:myPass@$cluster-mongos.$namespace" +# compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" + +wait_backup_agent $cluster-shard1-0 +wait_backup_agent $cluster-shard1-1 +wait_backup_agent $cluster-shard1-2 +wait_backup_agent $cluster-shard3-0 +wait_backup_agent $cluster-shard3-1 +wait_backup_agent $cluster-shard3-2 +wait_backup_agent $cluster-shard5-0 +wait_backup_agent $cluster-shard5-1 +wait_backup_agent $cluster-shard5-2 +wait_backup_agent $cluster-shard7-0 +wait_backup_agent $cluster-shard7-1 +wait_backup_agent $cluster-shard7-2 +backup_name_aws="backup-aws-s3" +backup_name_minio="backup-minio" +backup_name_gcp="backup-gcp-cs" + +desc 'run backups' +run_backup minio +wait_backup "$backup_name_minio" + +if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + run_backup aws-s3 + run_backup gcp-cs + + wait_backup "$backup_name_aws" + wait_backup "$backup_name_gcp" +fi + +if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + desc 'check backup and restore -- aws-s3' + run_restore $backup_name_aws 3 1 "-mongos" + wait_restore $backup_name_aws $cluster "ready" + + desc 'check backup and restore -- gcp-cs' + run_restore $backup_name_gcp 3 1 "-mongos" + wait_restore $backup_name_gcp $cluster "ready" +fi + +desc 'check backup and restore -- minio' +run_restore $backup_name_minio 3 1 "-mongos" + +sleep 120 +wait_restore $backup_name_minio $cluster "ready" + +destroy $namespace diff --git a/e2e-tests/init-deploy/compare/backup-44.json b/e2e-tests/init-deploy/compare/backup-44.json index 99be5f1e7d..3d371a8940 100644 --- a/e2e-tests/init-deploy/compare/backup-44.json +++ b/e2e-tests/init-deploy/compare/backup-44.json @@ -388,7 +388,6 @@ }, "actions": [ "appendOplogNote", - "applyOps", "checkFreeMonitoringStatus", "connPoolStats", "forceUUID", diff --git a/e2e-tests/init-deploy/compare/clusterAdmin-44.json b/e2e-tests/init-deploy/compare/clusterAdmin-44.json index 134049ab79..462ed06e56 100644 --- a/e2e-tests/init-deploy/compare/clusterAdmin-44.json +++ b/e2e-tests/init-deploy/compare/clusterAdmin-44.json @@ -142,23 +142,6 @@ "update" ] }, - { - "resource": { - "db": "local", - "collection": "system.healthlog" - }, - "actions": [ - "changeStream", - "collStats", - "dbHash", - "dbStats", - "find", - "killCursors", - "listCollections", - "listIndexes", - "planCacheRead" - ] - }, { "resource": { "db": "local", @@ -245,14 +228,6 @@ "unlock", "useUUID" ] - }, - { - "resource": { - "anyResource": true - }, - "actions": [ - "dbCheck" - ] } ] } diff --git a/e2e-tests/run-distro.csv b/e2e-tests/run-distro.csv index a54525bff8..d73d497ffc 100644 --- a/e2e-tests/run-distro.csv +++ b/e2e-tests/run-distro.csv @@ -1,5 +1,6 @@ arbiter balancer +custom-replset-name data-at-rest-encryption data-sharded default-cr diff --git a/e2e-tests/run-minikube.csv b/e2e-tests/run-minikube.csv index 214373bee7..988f90ba10 100644 --- a/e2e-tests/run-minikube.csv +++ b/e2e-tests/run-minikube.csv @@ -1,5 +1,6 @@ arbiter balancer +custom-replset-name default-cr demand-backup demand-backup-physical diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index a3a83ce35f..38b46419bb 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -1,5 +1,6 @@ arbiter balancer +custom-replset-name cross-site-sharded data-at-rest-encryption data-sharded diff --git a/e2e-tests/run-release.csv b/e2e-tests/run-release.csv index bda706adb8..3cc5378a5e 100644 --- a/e2e-tests/run-release.csv +++ b/e2e-tests/run-release.csv @@ -1,5 +1,6 @@ arbiter balancer +custom-replset-name cross-site-sharded data-at-rest-encryption data-sharded diff --git a/pkg/apis/psmdb/v1/psmdb_types.go b/pkg/apis/psmdb/v1/psmdb_types.go index 914769b5b0..d1f3735484 100644 --- a/pkg/apis/psmdb/v1/psmdb_types.go +++ b/pkg/apis/psmdb/v1/psmdb_types.go @@ -6,13 +6,12 @@ import ( "strconv" "strings" - "gopkg.in/yaml.v2" - "github.com/go-logr/logr" v "github.com/hashicorp/go-version" "github.com/percona/percona-backup-mongodb/pbm" "github.com/percona/percona-backup-mongodb/pbm/compress" "github.com/pkg/errors" + "gopkg.in/yaml.v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -530,6 +529,25 @@ func (r *ReplsetSpec) PodFQDNWithPort(cr *PerconaServerMongoDB, podName string) return fmt.Sprintf("%s:%d", r.PodFQDN(cr, podName), DefaultMongodPort) } +func (r ReplsetSpec) CustomReplsetName() (string, error) { + var cfg struct { + Replication struct { + ReplSetName string `yaml:"replSetName,omitempty"` + } `yaml:"replication,omitempty"` + } + + err := yaml.Unmarshal([]byte(r.Configuration), &cfg) + if err != nil { + return cfg.Replication.ReplSetName, errors.Wrap(err, "unmarshal configuration") + } + + if len(cfg.Replication.ReplSetName) == 0 { + return cfg.Replication.ReplSetName, errors.New("replSetName is not configured") + } + + return cfg.Replication.ReplSetName, nil +} + type LivenessProbeExtended struct { corev1.Probe `json:",inline"` StartupDelaySeconds int `json:"startupDelaySeconds,omitempty"` diff --git a/pkg/controller/perconaservermongodb/mgo.go b/pkg/controller/perconaservermongodb/mgo.go index c77bbbf69d..0a18bb5707 100644 --- a/pkg/controller/perconaservermongodb/mgo.go +++ b/pkg/controller/perconaservermongodb/mgo.go @@ -185,19 +185,25 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCluster(ctx context.Context, cr return api.AppStateError, errors.Wrap(err, "set default RW concern") } - in, err := inShard(ctx, mongosSession, replset.Name) + rsName := replset.Name + name, err := replset.CustomReplsetName() + if err == nil { + rsName = name + } + + in, err := inShard(ctx, mongosSession, rsName) if err != nil { return api.AppStateError, errors.Wrap(err, "get shard") } if !in { - log.Info("adding rs to shard", "rs", replset.Name) + log.Info("adding rs to shard", "rs", rsName) err := r.handleRsAddToShard(ctx, cr, replset, pods.Items[0], mongosPods[0]) if err != nil { return api.AppStateError, errors.Wrap(err, "add shard") } - log.Info("added to shard", "rs", replset.Name) + log.Info("added to shard", "rs", rsName) } rs := cr.Status.Replsets[replset.Name] @@ -500,7 +506,13 @@ func (r *ReconcilePerconaServerMongoDB) handleRsAddToShard(ctx context.Context, } }() - err = cli.AddShard(ctx, replset.Name, host) + rsName := replset.Name + name, err := replset.CustomReplsetName() + if err == nil { + rsName = name + } + + err = cli.AddShard(ctx, rsName, host) if err != nil { return errors.Wrap(err, "failed to add shard") } @@ -520,7 +532,13 @@ func (r *ReconcilePerconaServerMongoDB) handleReplsetInit(ctx context.Context, c continue } - log.Info("initiating replset", "replset", replset.Name, "pod", pod.Name) + replsetName := replset.Name + name, err := replset.CustomReplsetName() + if err == nil { + replsetName = name + } + + log.Info("initiating replset", "replset", replsetName, "pod", pod.Name) host, err := psmdb.MongoHost(ctx, r.client, cr, replset.Name, replset.Expose.Enabled, pod) if err != nil { @@ -557,7 +575,7 @@ func (r *ReconcilePerconaServerMongoDB) handleReplsetInit(ctx context.Context, c } ) EOF - `, mongoCmd, replset.Name, host), + `, mongoCmd, replsetName, host), } errb.Reset() @@ -582,7 +600,7 @@ func (r *ReconcilePerconaServerMongoDB) handleReplsetInit(ctx context.Context, c return fmt.Errorf("exec add admin user: %v / %s / %s", err, outb.String(), errb.String()) } - log.Info("replset initialized", "replset", replset.Name, "pod", pod.Name) + log.Info("replset initialized", "replset", replsetName, "pod", pod.Name) return nil } diff --git a/pkg/controller/perconaservermongodb/psmdb_controller.go b/pkg/controller/perconaservermongodb/psmdb_controller.go index 054af288a2..be8b718edc 100644 --- a/pkg/controller/perconaservermongodb/psmdb_controller.go +++ b/pkg/controller/perconaservermongodb/psmdb_controller.go @@ -1560,8 +1560,11 @@ func (r *ReconcilePerconaServerMongoDB) reconcileStatefulSet( } if cr.Spec.Backup.Enabled { - agentC := backup.AgentContainer(cr, replset.Name) - sfsSpec.Template.Spec.Containers = append(sfsSpec.Template.Spec.Containers, agentC) + rsName := replset.Name + if name, err := replset.CustomReplsetName(); err == nil { + rsName = name + } + sfsSpec.Template.Spec.Containers = append(sfsSpec.Template.Spec.Containers, backup.AgentContainer(cr, rsName)) } pmmC := psmdb.AddPMMContainer(ctx, cr, secret, cr.Spec.PMM.MongodParams) diff --git a/pkg/psmdb/client.go b/pkg/psmdb/client.go index 2837560e2e..24694d2b27 100644 --- a/pkg/psmdb/client.go +++ b/pkg/psmdb/client.go @@ -27,8 +27,14 @@ func MongoClient(ctx context.Context, k8sclient client.Client, cr *api.PerconaSe return nil, errors.Wrap(err, "get replset addr") } + rsName := rs.Name + name, err := rs.CustomReplsetName() + if err == nil { + rsName = name + } + conf := &mongo.Config{ - ReplSetName: rs.Name, + ReplSetName: rsName, Hosts: rsAddrs, Username: c.Username, Password: c.Password, diff --git a/pkg/psmdb/container.go b/pkg/psmdb/container.go index 1e445d1bc6..c28b75c4a3 100644 --- a/pkg/psmdb/container.go +++ b/pkg/psmdb/container.go @@ -80,6 +80,12 @@ func container(ctx context.Context, cr *api.PerconaServerMongoDB, replset *api.R MountPath: "/etc/users-secret", }) } + + rsName := replset.Name + if name, err := replset.CustomReplsetName(); err == nil { + rsName = name + } + container := corev1.Container{ Name: name, Image: cr.Spec.Image, @@ -107,7 +113,7 @@ func container(ctx context.Context, cr *api.PerconaServerMongoDB, replset *api.R }, { Name: "MONGODB_REPLSET", - Value: replset.Name, + Value: rsName, }, }, EnvFrom: []corev1.EnvFromSource{ @@ -150,8 +156,7 @@ func container(ctx context.Context, cr *api.PerconaServerMongoDB, replset *api.R } // containerArgs returns the args to pass to the mSpec container -func containerArgs(ctx context.Context, cr *api.PerconaServerMongoDB, replset *api.ReplsetSpec, resources corev1.ResourceRequirements, - useConfigFile bool) []string { +func containerArgs(ctx context.Context, cr *api.PerconaServerMongoDB, replset *api.ReplsetSpec, resources corev1.ResourceRequirements, useConfigFile bool) []string { // TODO(andrew): in the safe mode `sslAllowInvalidCertificates` should be set only with the external services args := []string{ "--bind_ip_all", @@ -164,6 +169,11 @@ func containerArgs(ctx context.Context, cr *api.PerconaServerMongoDB, replset *a "--sslAllowInvalidCertificates", } + name, err := replset.CustomReplsetName() + if err == nil { + args[4] = "--replSet=" + name + } + if cr.Spec.UnsafeConf { args = append(args, "--clusterAuthMode=keyFile", diff --git a/pkg/psmdb/mongos.go b/pkg/psmdb/mongos.go index b714da18ab..cc4467b6ab 100644 --- a/pkg/psmdb/mongos.go +++ b/pkg/psmdb/mongos.go @@ -239,6 +239,11 @@ func mongosContainer(cr *api.PerconaServerMongoDB, useConfigFile bool, cfgInstan container.Command = []string{BinMountPath + "/ps-entry.sh"} } + if cr.CompareVersion("1.15.0") >= 0 { + container.LivenessProbe.Exec.Command[0] = "/opt/percona/mongodb-healthcheck" + container.ReadinessProbe.Exec.Command[0] = "/opt/percona/mongodb-healthcheck" + } + return container, nil } @@ -246,9 +251,15 @@ func mongosContainerArgs(cr *api.PerconaServerMongoDB, resources corev1.Resource msSpec := cr.Spec.Sharding.Mongos cfgRs := cr.Spec.Sharding.ConfigsvrReplSet + cfgRsName := cfgRs.Name + name, err := cfgRs.CustomReplsetName() + if err == nil { + cfgRsName = name + } + // sort config instances to prevent unnecessary updates sort.Strings(cfgInstances) - configDB := fmt.Sprintf("%s/%s", cfgRs.Name, strings.Join(cfgInstances, ",")) + configDB := fmt.Sprintf("%s/%s", cfgRsName, strings.Join(cfgInstances, ",")) args := []string{ "mongos", "--bind_ip_all",