diff --git a/README.md b/README.md index 721f50c7ef..a46b99aabf 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ You can get early access to new product features, invite-only ”ask me anything # Roadmap -We have an experimental public roadmap which can be found [here](https://github.com/percona/roadmap/projects/1). Please feel free to contribute and propose new features by following the roadmap [guidelines](https://github.com/percona/roadmap). +We have a public roadmap which can be found [here](https://github.com/orgs/percona/projects/10). Please feel free to contribute and propose new features by following the roadmap [guidelines](https://github.com/percona/roadmap). # Submitting Bug Reports diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/compare/find-2nd.json b/e2e-tests/demand-backup-eks-credentials-irsa/compare/find-2nd.json new file mode 100644 index 0000000000..d0a0868e32 --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/compare/find-2nd.json @@ -0,0 +1,4 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +{ "_id" : , "x" : 100501 } +bye diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/compare/find-3nd.json b/e2e-tests/demand-backup-eks-credentials-irsa/compare/find-3nd.json new file mode 100644 index 0000000000..a5e665128c --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/compare/find-3nd.json @@ -0,0 +1,4 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +{ "_id" : , "x" : 100502 } +bye diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/compare/find.json b/e2e-tests/demand-backup-eks-credentials-irsa/compare/find.json new file mode 100644 index 0000000000..74495091bf --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/compare/find.json @@ -0,0 +1,3 @@ +switched to db myApp +{ "_id" : , "x" : 100500 } +bye diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/compare/statefulset_some-name-rs0.yml b/e2e-tests/demand-backup-eks-credentials-irsa/compare/statefulset_some-name-rs0.yml new file mode 100644 index 0000000000..7d0ed280db --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/compare/statefulset_some-name-rs0.yml @@ -0,0 +1,269 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + - args: + - pbm-agent-entrypoint + command: + - /opt/percona/pbm-entry.sh + env: + - name: PBM_AGENT_MONGODB_USERNAME + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_USER + name: internal-some-name-users + optional: false + - name: PBM_AGENT_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + key: MONGODB_BACKUP_PASSWORD + name: internal-some-name-users + optional: false + - name: PBM_MONGODB_REPLSET + value: rs0 + - name: PBM_MONGODB_PORT + value: "27017" + - name: PBM_AGENT_SIDECAR + value: "true" + - name: PBM_AGENT_SIDECAR_SLEEP + value: "5" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PBM_MONGODB_URI + value: mongodb://$(PBM_AGENT_MONGODB_USERNAME):$(PBM_AGENT_MONGODB_PASSWORD)@$(POD_NAME) + - name: PBM_AGENT_TLS_ENABLED + value: "true" + imagePullPolicy: Always + name: backup-agent + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /opt/percona + name: bin + readOnly: true + - mountPath: /data/db + name: mongod-data + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/conf/backup-aws-s3.yml b/e2e-tests/demand-backup-eks-credentials-irsa/conf/backup-aws-s3.yml new file mode 100644 index 0000000000..469bc6df52 --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/conf/backup-aws-s3.yml @@ -0,0 +1,9 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBBackup +metadata: + finalizers: + - percona.com/delete-backup + name: backup-aws-s3 +spec: + clusterName: some-name + storageName: aws-s3 diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/conf/pitr.yml b/e2e-tests/demand-backup-eks-credentials-irsa/conf/pitr.yml new file mode 100644 index 0000000000..28279f5871 --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/conf/pitr.yml @@ -0,0 +1,11 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: + storageName: aws-s3 + pitr: + type: date + date: \ No newline at end of file diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/conf/restore.yml b/e2e-tests/demand-backup-eks-credentials-irsa/conf/restore.yml new file mode 100644 index 0000000000..32ab3c4b9a --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/conf/restore.yml @@ -0,0 +1,7 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDBRestore +metadata: + name: +spec: + clusterName: some-name + backupName: diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/conf/role-trust-policy.json b/e2e-tests/demand-backup-eks-credentials-irsa/conf/role-trust-policy.json new file mode 100644 index 0000000000..b3bc5aa9f9 --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/conf/role-trust-policy.json @@ -0,0 +1,17 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::119175775298:oidc-provider/oidc.eks.eu-west-3.amazonaws.com/id/D3BF3A9D31066A3A7AB57C03F9543A3C" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks.eu-west-3.amazonaws.com/id/D3BF3A9D31066A3A7AB57C03F9543A3C:aud": "sts.amazonaws.com" + } + } + } + ] +} diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/conf/s3-bucket-policy.json b/e2e-tests/demand-backup-eks-credentials-irsa/conf/s3-bucket-policy.json new file mode 100644 index 0000000000..1d9c26c3cf --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/conf/s3-bucket-policy.json @@ -0,0 +1,15 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:*" + ], + "Resource": [ + "arn:aws:s3:::operator-testing", + "arn:aws:s3:::operator-testing/*" + ] + } + ] +} diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/conf/some-name.yml b/e2e-tests/demand-backup-eks-credentials-irsa/conf/some-name.yml new file mode 100644 index 0000000000..dfb7977493 --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/conf/some-name.yml @@ -0,0 +1,70 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + pitr: + compressionLevel: 6 + compressionType: gzip + enabled: true + oplogOnly: false + oplogSpanMin: 1 + storages: + aws-s3: + type: s3 + s3: + region: us-east-1 + bucket: operator-testing + prefix: psmdb-demand-backup-eks-irsa + tasks: + - name: weekly + enabled: true + schedule: "0 0 * * 0" + compressionType: gzip + storageName: aws-s3 + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 1Gi + size: 3 + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + secrets: + users: some-users diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/conf/template.json b/e2e-tests/demand-backup-eks-credentials-irsa/conf/template.json new file mode 100644 index 0000000000..e60aa5c2f6 --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/conf/template.json @@ -0,0 +1,16 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::119175775298:oidc-provider/${eks_cluster_oidc}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + } + } + } + ] +} \ No newline at end of file diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/run b/e2e-tests/demand-backup-eks-credentials-irsa/run new file mode 100755 index 0000000000..d9c79bcf47 --- /dev/null +++ b/e2e-tests/demand-backup-eks-credentials-irsa/run @@ -0,0 +1,153 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions +set_debug +# EKS cluster should be run without the policy AmazonS3FullAccess +# This policy makes the test false passed. + +if [ $EKS -ne 1 ]; then + echo "Skip the test. We run it for EKS only " + exit 0 +fi + +cluster="some-name" + +desc "get cluster oidc" +eks_cluster=$(kubectl config view --minify -o jsonpath='{.contexts[0].context.cluster}' | awk -F/ '{print $NF}') +IFS='.' read -r eks_cluster_name eks_cluster_region _ <<<"$eks_cluster" + +eks_cluster_oidc=$(aws eks describe-cluster --name $eks_cluster_name --region=$eks_cluster_region --query "cluster.identity.oidc.issuer" --output text | sed 's|https://||') +policy_arn="arn:aws:iam::119175775298:policy/operator-testing-access-s3" +role_name="$cluster-psmdb-access-s3-bucket" + +desc "delete role" +echo $role_name +echo $policy_arn +aws iam detach-role-policy --role-name "$role_name" --policy-arn "$policy_arn" || true +aws iam delete-role --role-name "$role_name" || true + +# Create policy. Already done, we don't need to do it every time. But all steps should be illustrated in the test +#aws iam create-policy --policy-name operator-testing-allow-access-s3 --policy-document file://conf/s3-bucket-policy.json + +desc "create role" +jq --arg eks_cluster_oidc "$eks_cluster_oidc" \ + '.Statement[0].Principal.Federated = "arn:aws:iam::119175775298:oidc-provider/\($eks_cluster_oidc)" | + .Statement[0].Condition.StringEquals[($eks_cluster_oidc + ":aud")] = "sts.amazonaws.com"' \ + $test_dir/conf/template.json >$test_dir/conf/role-trust-policy.json + +role_arn=$(aws iam create-role \ + --role-name "$role_name" \ + --assume-role-policy-document file://$test_dir/conf/role-trust-policy.json \ + --description "Allow access to s3 bucket" \ + --query "Role.Arn" \ + --output text) + +desc "connect role and policy" +aws iam attach-role-policy --role-name "$role_name" --policy-arn $policy_arn + +create_infra "$namespace" + +desc "create secrets and start client" +kubectl_bin apply \ + -f "$conf_dir/secrets.yml" \ + -f "$conf_dir/client.yml" + +desc "create PSMDB cluster $cluster" +apply_cluster $test_dir/conf/$cluster.yml + +desc 'check if all 3 Pods started' +wait_for_running $cluster-rs0 3 + +desc 'check if service and statefulset created with expected config' +compare_kubectl statefulset/$cluster-rs0 + +desc "update service accounts for operator and default (our cluster uses this one)" + +kubectl_bin annotate serviceaccount default \ + eks.amazonaws.com/role-arn="$role_arn" \ + --overwrite + +kubectl_bin annotate serviceaccount percona-server-mongodb-operator \ + eks.amazonaws.com/role-arn="$role_arn" \ + --overwrite + +desc "restart operator and cluster" +operator_pod=$(get_operator_pod) +kubectl_bin delete pod $operator_pod + +kubectl_bin delete pod "$cluster-rs0-0" +kubectl_bin delete pod "$cluster-rs0-1" +kubectl_bin delete pod "$cluster-rs0-2" + +wait_for_running $cluster-rs0 3 + +kubectl exec $cluster-rs0-0 -c backup-agent -- sh -c 'if [ -z "$AWS_ROLE_ARN" ]; then echo "Variable AWS_ROLE_ARN not set" && exit 1; else echo "Variable AWS_ROLE_ARN is set"; fi' + +desc 'create user' +run_mongo \ + 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ + "userAdmin:userAdmin123456@$cluster-rs0.$namespace" +sleep 2 + +desc 'write data, read from all' +run_mongo \ + 'use myApp\n db.test.insert({ x: 100500 })' \ + "myApp:myPass@$cluster-rs0.$namespace" + +desc "compare mongo cmd" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-0.$cluster-rs0.$namespace" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-1.$cluster-rs0.$namespace" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-2.$cluster-rs0.$namespace" + +desc "wait backup agent" +wait_backup_agent $cluster-rs0-0 +wait_backup_agent $cluster-rs0-1 +wait_backup_agent $cluster-rs0-2 + +backup_name_aws="backup-aws-s3" + +desc 'run backups' +run_backup aws-s3 +wait_backup "$backup_name_aws" +sleep 5 + +desc 'check backup and restore -- aws-s3' +backup_dest_aws=$(get_backup_dest "$backup_name_aws") +curl -s "https://s3.amazonaws.com/${backup_dest_aws}/rs0/myApp.test.gz" | gunzip >/dev/null +run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster-rs0.$namespace" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-0.$cluster-rs0.$namespace" "-2nd" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-1.$cluster-rs0.$namespace" "-2nd" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-2.$cluster-rs0.$namespace" "-2nd" + +run_restore "$backup_name_aws" + +wait_restore "$backup_name_aws" "${cluster}" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-0.$cluster-rs0.$namespace" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-1.$cluster-rs0.$namespace" +compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-2.$cluster-rs0.$namespace" +desc 'delete backup and check if it is removed from bucket -- aws-s3' +kubectl_bin delete psmdb-backup --all + +desc 'check pitr -- aws-s3' + +backup_name_aws="backup-aws-s3-pitr" +backup_dest_aws=$(get_backup_dest "$backup_name_aws") +run_backup aws-s3 "${backup_name_aws}" "logical" +desc "wait backup" +wait_backup "$backup_name_aws" +sleep 5 + +run_mongo 'use myApp\n db.test.insert({ x: 100502 })' "myApp:myPass@$cluster-rs0.$namespace" +desc 'compare' +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-0.$cluster-rs0.$namespace" "-3nd" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-1.$cluster-rs0.$namespace" "-3nd" +compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0-2.$cluster-rs0.$namespace" "-3nd" + +run_pitr_check "${backup_name_aws}" "${cluster}" "-3nd" + +destroy $namespace + +desc 'test passed' diff --git a/e2e-tests/demand-backup-fs/run b/e2e-tests/demand-backup-fs/run index 044837f026..a19931b205 100755 --- a/e2e-tests/demand-backup-fs/run +++ b/e2e-tests/demand-backup-fs/run @@ -33,57 +33,6 @@ run_recovery_check() { compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0.${namespace}" "${find_prefix_after}" ".svc.cluster.local" "myApp" "test" } -get_latest_oplog_chunk_ts() { - local cluster=$1 - echo $(kubectl_bin exec ${cluster}-rs0-0 -c backup-agent -- pbm status -o json | jq '.backups.pitrChunks.pitrChunks | last | .range.end') -} - -format_date() { - local timestamp=$1 - echo $(TZ=UTC $date -d@${timestamp} '+%Y-%m-%d %H:%M:%S') -} - -wait_for_oplogs() { - local cluster1=$1 - - local backup_last_write=$(kubectl_bin exec ${cluster}-rs0-0 -c backup-agent -- pbm status -o json | jq .backups.snapshot[0].restoreTo) - - local retries=0 - local last_chunk=$(get_latest_oplog_chunk_ts ${cluster}) - until [[ ${last_chunk} -gt ${backup_last_write} ]]; do - if [[ $retries -gt 30 ]]; then - log "Last oplog chunk ($(format_date ${last_chunk})) is not greater than last write ($(format_date ${backup_last_write}))" - exit 1 - fi - last_chunk=$(get_latest_oplog_chunk_ts ${cluster}) - retries=$((retries + 1)) - log "Waiting for last oplog chunk ($(format_date ${last_chunk})) to be greater than last write ($(format_date ${backup_last_write}))" - sleep 10 - done -} - -run_pitr_check() { - local backup=$1 - local cluster=$2 - local find_prefix=$3 - - wait_for_oplogs "${cluster}" - local target_time=$(format_date $(get_latest_oplog_chunk_ts ${cluster})) - - log "dropping test collection" - run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" - - log "checking pitr... backup: ${backup} target: ${target_time}" - cat $test_dir/conf/pitr.yml \ - | yq eval ".metadata.name = \"restore-${backup}\"" \ - | yq eval ".spec.backupName = \"${backup}\"" \ - | yq eval ".spec.pitr.date = \"${target_time}\"" \ - | kubectl_bin apply -f - - - wait_restore "${backup}" "${cluster}" - compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0.${namespace}" "${find_prefix}" ".svc.cluster.local" "myApp" "test" -} - write_data() { local x=$1 local find_prefix=$2 diff --git a/e2e-tests/functions b/e2e-tests/functions index 90921a8444..ad78834815 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -341,7 +341,7 @@ wait_restore() { local cluster_name=$2 local target_state=${3:-"ready"} local wait_cluster_consistency=${4:-1} - local wait_time=${5:-780} + local wait_time=${5:-1780} set +o xtrace retry=0 @@ -1607,3 +1607,55 @@ wait_for_cluster_state() { done echo } + +get_latest_oplog_chunk_ts() { + local cluster=$1 + echo $(kubectl_bin exec ${cluster}-rs0-0 -c backup-agent -- pbm status -o json | jq '.backups.pitrChunks.pitrChunks | last | .range.end') +} + +format_date() { + local timestamp=$1 + echo $(TZ=UTC $date -d@${timestamp} '+%Y-%m-%d %H:%M:%S') +} + +run_pitr_check() { + local backup=$1 + local cluster=$2 + local find_prefix=$3 + + wait_for_oplogs "${cluster}" + local target_time=$(format_date $(get_latest_oplog_chunk_ts ${cluster})) + + log "dropping test collection" + run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" + + log "checking pitr... backup: ${backup} target: ${target_time}" + cat $test_dir/conf/pitr.yml \ + | yq eval ".metadata.name = \"restore-${backup}\"" \ + | yq eval ".spec.backupName = \"${backup}\"" \ + | yq eval ".spec.pitr.date = \"${target_time}\"" \ + | kubectl_bin apply -f - + + wait_restore "${backup}" "${cluster}" + compare_mongo_cmd "find" "myApp:myPass@${cluster}-rs0.${namespace}" "${find_prefix}" ".svc.cluster.local" "myApp" "test" +} + +wait_for_oplogs() { + local cluster1=$1 + + local backup_last_write=$(kubectl_bin exec ${cluster}-rs0-0 -c backup-agent -- pbm status -o json | jq .backups.snapshot[0].restoreTo) + + local retries=0 + local last_chunk=$(get_latest_oplog_chunk_ts ${cluster}) + until [[ ${last_chunk} -gt ${backup_last_write} ]]; do + if [[ $retries -gt 30 ]]; then + log "Last oplog chunk ($(format_date ${last_chunk})) is not greater than last write ($(format_date ${backup_last_write}))" + exit 1 + fi + last_chunk=$(get_latest_oplog_chunk_ts ${cluster}) + retries=$((retries + 1)) + log "Waiting for last oplog chunk ($(format_date ${last_chunk})) to be greater than last write ($(format_date ${backup_last_write}))" + sleep 10 + done +} + diff --git a/e2e-tests/run b/e2e-tests/run index d36d445f53..e90e6b3ff1 100755 --- a/e2e-tests/run +++ b/e2e-tests/run @@ -17,6 +17,7 @@ fail() { "$dir/data-sharded/run" || fail "data-sharded" "$dir/default-cr/run" || fail "default-cr" "$dir/demand-backup-eks-credentials/run" || fail "demand-backup-eks-credentials" +"$dir/demand-backup-eks-credentials-irsa/run" || fail "demand-backup-eks-credentials-irsa" "$dir/demand-backup-sharded/run" || fail "demand-backup-sharded" "$dir/demand-backup/run" || fail "demand-backup" "$dir/demand-backup-physical/run" || fail "demand-backup-physical" diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 3eb32cd924..8b3b46e4e6 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -9,7 +9,7 @@ data-at-rest-encryption data-sharded demand-backup demand-backup-fs -demand-backup-eks-credentials +demand-backup-eks-credentials-irsa demand-backup-physical demand-backup-physical-sharded demand-backup-sharded diff --git a/e2e-tests/run-release.csv b/e2e-tests/run-release.csv index c49fb773cd..dcf177adee 100644 --- a/e2e-tests/run-release.csv +++ b/e2e-tests/run-release.csv @@ -10,7 +10,7 @@ data-sharded default-cr demand-backup demand-backup-fs -demand-backup-eks-credentials +demand-backup-eks-credentials-irsa demand-backup-physical demand-backup-physical-sharded demand-backup-sharded diff --git a/go.mod b/go.mod index 4a5ad3c4a6..e278e20256 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.6 require ( github.com/Percona-Lab/percona-version-service v0.0.0-20230216094301-f9489c81b52a github.com/alecthomas/kingpin v2.2.6+incompatible - github.com/cert-manager/cert-manager v1.16.1 + github.com/cert-manager/cert-manager v1.16.2 github.com/go-logr/logr v1.4.2 github.com/go-openapi/errors v0.22.0 github.com/go-openapi/runtime v0.28.0 diff --git a/go.sum b/go.sum index 8e3a57e225..cbb39e8f16 100644 --- a/go.sum +++ b/go.sum @@ -64,8 +64,8 @@ github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cert-manager/cert-manager v1.16.1 h1:1ceFMqTtwiqY2vyfaRT85CNiVmK7pJjt3GebYCx9awY= -github.com/cert-manager/cert-manager v1.16.1/go.mod h1:MfLVTL45hFZsqmaT1O0+b2ugaNNQQZttSFV9hASHUb0= +github.com/cert-manager/cert-manager v1.16.2 h1:c9UU2E+8XWGruyvC/mdpc1wuLddtgmNr8foKdP7a8Jg= +github.com/cert-manager/cert-manager v1.16.2/go.mod h1:MfLVTL45hFZsqmaT1O0+b2ugaNNQQZttSFV9hASHUb0= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=