-
Notifications
You must be signed in to change notification settings - Fork 146
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
K8SPSMDB-807: Allow customizing replset name with custom config #1252
Changes from 11 commits
ca74211
4553100
2d990c0
27e25ba
3931224
eed68cb
7465ee7
e7915cd
aac7e59
b75021b
86f1941
5d9ea86
c1a7588
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
switched to db myApp | ||
{ "_id" : , "x" : 100500 } | ||
{ "_id" : , "x" : 100501 } | ||
bye |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
switched to db myApp | ||
{ "_id" : , "x" : 100500 } | ||
bye |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
apiVersion: psmdb.percona.com/v1 | ||
kind: PerconaServerMongoDBBackup | ||
metadata: | ||
name: backup-aws-s3 | ||
spec: | ||
psmdbCluster: some-name | ||
storageName: aws-s3 |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
apiVersion: psmdb.percona.com/v1 | ||
kind: PerconaServerMongoDBBackup | ||
metadata: | ||
name: backup-gcp-cs | ||
spec: | ||
psmdbCluster: some-name | ||
storageName: gcp-cs |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
apiVersion: psmdb.percona.com/v1 | ||
kind: PerconaServerMongoDBBackup | ||
metadata: | ||
name: backup-minio | ||
spec: | ||
psmdbCluster: some-name | ||
storageName: minio |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
apiVersion: psmdb.percona.com/v1 | ||
kind: PerconaServerMongoDBRestore | ||
metadata: | ||
name: | ||
spec: | ||
clusterName: some-name | ||
backupName: |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,230 @@ | ||
apiVersion: psmdb.percona.com/v1 | ||
kind: PerconaServerMongoDB | ||
metadata: | ||
name: some-name | ||
spec: | ||
crVersion: 1.14.0 | ||
allowUnsafeConfigurations: true | ||
backup: | ||
enabled: true | ||
image: percona/percona-backup-mongodb:2.0.4 | ||
pitr: | ||
enabled: false | ||
serviceAccountName: percona-server-mongodb-operator | ||
storages: | ||
aws-s3: | ||
type: s3 | ||
s3: | ||
credentialsSecret: aws-s3-secret | ||
region: us-east-1 | ||
bucket: operator-testing | ||
prefix: psmdb | ||
minio: | ||
type: s3 | ||
s3: | ||
credentialsSecret: minio-secret | ||
region: us-east-1 | ||
bucket: operator-testing | ||
endpointUrl: http://minio-service:9000/ | ||
gcp-cs: | ||
type: s3 | ||
s3: | ||
credentialsSecret: gcp-cs-secret | ||
region: us-east-1 | ||
bucket: operator-testing | ||
prefix: psmdb | ||
endpointUrl: https://storage.googleapis.com | ||
image: percona/percona-server-mongodb:4.4.10-11 | ||
imagePullPolicy: Always | ||
mongod: | ||
net: | ||
hostPort: 0 | ||
port: 27017 | ||
operationProfiling: | ||
mode: slowOp | ||
rateLimit: 100 | ||
slowOpThresholdMs: 100 | ||
security: | ||
enableEncryption: false | ||
encryptionCipherMode: AES256-CBC | ||
encryptionKeySecret: tee-pro-mongodb-encryption-key | ||
redactClientLogData: false | ||
setParameter: | ||
ttlMonitorSleepSecs: 60 | ||
wiredTigerConcurrentReadTransactions: 128 | ||
wiredTigerConcurrentWriteTransactions: 128 | ||
storage: | ||
engine: wiredTiger | ||
inMemory: | ||
engineConfig: | ||
inMemorySizeRatio: 0.9 | ||
wiredTiger: | ||
collectionConfig: | ||
blockCompressor: snappy | ||
engineConfig: | ||
cacheSizeRatio: 0.5 | ||
directoryForIndexes: false | ||
journalCompressor: snappy | ||
indexConfig: | ||
prefixCompression: true | ||
pmm: | ||
enabled: false | ||
replsets: | ||
- affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
arbiter: | ||
affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
enabled: false | ||
size: 1 | ||
configuration: | | ||
storage: | ||
directoryPerDB: true | ||
wiredTiger: | ||
engineConfig: | ||
directoryForIndexes: true | ||
expose: | ||
enabled: true | ||
exposeType: ClusterIP | ||
serviceAnnotations: | ||
networking.gke.io/load-balancer-type: Internal | ||
name: shard1 | ||
podDisruptionBudget: | ||
maxUnavailable: 1 | ||
size: 3 | ||
volumeSpec: | ||
persistentVolumeClaim: | ||
resources: | ||
requests: | ||
storage: 2Gi | ||
storageClassName: standard-rwo | ||
- affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
arbiter: | ||
affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
enabled: false | ||
size: 1 | ||
configuration: | | ||
storage: | ||
directoryPerDB: true | ||
wiredTiger: | ||
engineConfig: | ||
directoryForIndexes: true | ||
expose: | ||
enabled: true | ||
exposeType: ClusterIP | ||
serviceAnnotations: | ||
networking.gke.io/load-balancer-type: Internal | ||
name: shard3 | ||
podDisruptionBudget: | ||
maxUnavailable: 1 | ||
size: 3 | ||
volumeSpec: | ||
persistentVolumeClaim: | ||
resources: | ||
requests: | ||
storage: 2Gi | ||
storageClassName: standard-rwo | ||
- affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
arbiter: | ||
affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
enabled: false | ||
size: 1 | ||
configuration: | | ||
storage: | ||
directoryPerDB: true | ||
wiredTiger: | ||
engineConfig: | ||
directoryForIndexes: true | ||
expose: | ||
enabled: true | ||
exposeType: ClusterIP | ||
serviceAnnotations: | ||
networking.gke.io/load-balancer-type: Internal | ||
name: shard5 | ||
podDisruptionBudget: | ||
maxUnavailable: 1 | ||
size: 3 | ||
volumeSpec: | ||
persistentVolumeClaim: | ||
resources: | ||
requests: | ||
storage: 2Gi | ||
storageClassName: standard-rwo | ||
- affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
arbiter: | ||
affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
enabled: false | ||
size: 1 | ||
configuration: | | ||
storage: | ||
directoryPerDB: true | ||
wiredTiger: | ||
engineConfig: | ||
directoryForIndexes: true | ||
expose: | ||
enabled: true | ||
exposeType: ClusterIP | ||
serviceAnnotations: | ||
networking.gke.io/load-balancer-type: Internal | ||
name: shard7 | ||
podDisruptionBudget: | ||
maxUnavailable: 1 | ||
size: 3 | ||
volumeSpec: | ||
persistentVolumeClaim: | ||
resources: | ||
requests: | ||
storage: 2Gi | ||
storageClassName: standard-rwo | ||
secrets: | ||
users: some-users | ||
sharding: | ||
configsvrReplSet: | ||
affinity: | ||
antiAffinityTopologyKey: topology.kubernetes.io/zone | ||
configuration: | | ||
replication: | ||
replSetName: csReplSet | ||
storage: | ||
directoryPerDB: true | ||
wiredTiger: | ||
engineConfig: | ||
directoryForIndexes: true | ||
expose: | ||
enabled: true | ||
exposeType: ClusterIP | ||
serviceAnnotations: | ||
networking.gke.io/load-balancer-type: Internal | ||
podDisruptionBudget: | ||
maxUnavailable: 1 | ||
size: 3 | ||
volumeSpec: | ||
persistentVolumeClaim: | ||
resources: | ||
requests: | ||
storage: 5Gi | ||
storageClassName: standard-rwo | ||
enabled: true | ||
mongos: | ||
affinity: | ||
antiAffinityTopologyKey: kubernetes.io/hostname | ||
expose: | ||
exposeType: LoadBalancer | ||
serviceAnnotations: | ||
networking.gke.io/load-balancer-type: Internal | ||
podDisruptionBudget: | ||
maxUnavailable: 1 | ||
size: 3 | ||
unmanaged: false | ||
updateStrategy: SmartUpdate | ||
upgradeOptions: | ||
apply: Disabled | ||
schedule: 0 2 * * * | ||
setFCV: false | ||
versionServiceEndpoint: https://check.percona.com |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
#!/bin/bash | ||
|
||
set -o errexit | ||
set -o xtrace | ||
|
||
test_dir=$(realpath $(dirname $0)) | ||
. ${test_dir}/../functions | ||
|
||
create_namespace $namespace | ||
deploy_operator | ||
apply_s3_storage_secrets | ||
deploy_minio | ||
|
||
kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml | ||
cluster="some-name" | ||
|
||
desc 'create first PSMDB cluster' | ||
apply_cluster $test_dir/conf/${cluster}.yml | ||
wait_cluster_consistency $cluster | ||
|
||
desc 'write data, read from all' | ||
run_mongos \ | ||
'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \ | ||
"userAdmin:userAdmin123456@$cluster-mongos.$namespace" | ||
sleep 2 | ||
run_mongos \ | ||
'use myApp\n db.test.insert({ x: 100500 })' \ | ||
"myApp:myPass@$cluster-mongos.$namespace" | ||
# compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" | ||
|
||
wait_backup_agent $cluster-shard1-0 | ||
wait_backup_agent $cluster-shard1-1 | ||
wait_backup_agent $cluster-shard1-2 | ||
wait_backup_agent $cluster-shard3-0 | ||
wait_backup_agent $cluster-shard3-1 | ||
wait_backup_agent $cluster-shard3-2 | ||
wait_backup_agent $cluster-shard5-0 | ||
wait_backup_agent $cluster-shard5-1 | ||
wait_backup_agent $cluster-shard5-2 | ||
wait_backup_agent $cluster-shard7-0 | ||
wait_backup_agent $cluster-shard7-1 | ||
wait_backup_agent $cluster-shard7-2 | ||
backup_name_aws="backup-aws-s3" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do we use only even shard numbers? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the CR is from a customer environment, following their setup There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I remember it. Oks. Could we add comment about it? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. sure, what do you want to see in comment? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As for me, It would be nice to add an explanation why we use this shards configuration |
||
backup_name_minio="backup-minio" | ||
backup_name_gcp="backup-gcp-cs" | ||
|
||
desc 'run backups' | ||
run_backup minio | ||
wait_backup "$backup_name_minio" | ||
|
||
if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then | ||
run_backup aws-s3 | ||
run_backup gcp-cs | ||
|
||
wait_backup "$backup_name_aws" | ||
wait_backup "$backup_name_gcp" | ||
fi | ||
|
||
if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then | ||
desc 'check backup and restore -- aws-s3' | ||
run_restore $backup_name_aws 3 1 "-mongos" | ||
wait_restore $backup_name_aws $cluster "ready" | ||
|
||
desc 'check backup and restore -- gcp-cs' | ||
run_restore $backup_name_gcp 3 1 "-mongos" | ||
wait_restore $backup_name_gcp $cluster "ready" | ||
fi | ||
|
||
desc 'check backup and restore -- minio' | ||
run_restore $backup_name_minio 3 1 "-mongos" | ||
|
||
sleep 120 | ||
wait_restore $backup_name_minio $cluster "ready" | ||
|
||
destroy $namespace |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,10 +7,6 @@ OPERATOR_VERSION="1.15.0" | |
IMAGE=${IMAGE:-"perconalab/percona-server-mongodb-operator:${GIT_BRANCH}"} | ||
IMAGE_PMM=${IMAGE_PMM:-"perconalab/pmm-client:dev-latest"} | ||
IMAGE_MONGOD=${IMAGE_MONGOD:-"perconalab/percona-server-mongodb-operator:main-mongod5.0"} | ||
IMAGE_MONGOD_CHAIN=${IMAGE_MONGOD_CHAIN:-$' | ||
perconalab/percona-server-mongodb-operator:main-mongod4.4 | ||
perconalab/percona-server-mongodb-operator:main-mongod5.0 | ||
perconalab/percona-server-mongodb-operator:main-mongod6.0'} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We use this var in mongod-major-upgrade test. |
||
IMAGE_BACKUP=${IMAGE_BACKUP:-"perconalab/percona-server-mongodb-operator:main-backup"} | ||
SKIP_BACKUPS_TO_AWS_GCP_AZURE=${SKIP_BACKUPS_TO_AWS_GCP_AZURE:-1} | ||
PMM_SERVER_VER=${PMM_SERVER_VER:-"9.9.9"} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@egegunes, we do not have mongod at all now. That is why you have a problem with this test.