Skip to content

Commit

Permalink
Merge branch 'main' into K8SPSMDB-813-healthcheck-tls
Browse files Browse the repository at this point in the history
  • Loading branch information
t-yrka authored Sep 7, 2023
2 parents 1abca95 + 9727623 commit 44150f6
Show file tree
Hide file tree
Showing 67 changed files with 4,407 additions and 431 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ percona-server-mongodb-operator
mongodb-healthcheck

!cmd/percona-server-mongodb-operator
!cmd/mongodb-healthcheck

# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode

Expand Down
19 changes: 12 additions & 7 deletions cmd/mongodb-healthcheck/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@ package main
import (
"context"
"os"
"os/signal"
"strconv"
"strings"
"syscall"

uzap "go.uber.org/zap"
"go.uber.org/zap/zapcore"
Expand All @@ -36,6 +38,9 @@ var (
)

func main() {
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, os.Interrupt)
defer stop()

app := tool.New("Performs health and readiness checks for MongoDB", GitCommit, GitBranch)

k8sCmd := app.Command("k8s", "Performs liveness check for MongoDB on Kubernetes")
Expand Down Expand Up @@ -77,14 +82,14 @@ func main() {
os.Exit(1)
}

client, err := db.Dial(cnf)
client, err := db.Dial(ctx, cnf)
if err != nil {
log.Error(err, "connection error")
os.Exit(1)
}

defer func() {
if err := client.Disconnect(context.TODO()); err != nil {
if err := client.Disconnect(ctx); err != nil {
log.Error(err, "failed to disconnect")
os.Exit(1)
}
Expand All @@ -99,7 +104,7 @@ func main() {
case "mongod":
memberState, err := healthcheck.HealthCheckMongodLiveness(client, int64(*startupDelaySeconds))
if err != nil {
client.Disconnect(context.TODO()) // nolint:golint,errcheck
client.Disconnect(ctx) // nolint:golint,errcheck
log.Error(err, "Member failed Kubernetes liveness check")
os.Exit(1)
}
Expand All @@ -108,7 +113,7 @@ func main() {
case "mongos":
err := healthcheck.HealthCheckMongosLiveness(client)
if err != nil {
client.Disconnect(context.TODO()) // nolint:golint,errcheck
client.Disconnect(ctx) // nolint:golint,errcheck
log.Error(err, "Member failed Kubernetes liveness check")
os.Exit(1)
}
Expand All @@ -120,14 +125,14 @@ func main() {
switch *component {

case "mongod":
client.Disconnect(context.TODO()) // nolint:golint,errcheck
client.Disconnect(ctx) // nolint:golint,errcheck
log.Error(err, "readiness check for mongod is not implemented")
os.Exit(1)

case "mongos":
err := healthcheck.MongosReadinessCheck(client)
err := healthcheck.MongosReadinessCheck(ctx, client)
if err != nil {
client.Disconnect(context.TODO()) // nolint:golint,errcheck
client.Disconnect(ctx) // nolint:golint,errcheck
log.Error(err, "Member failed Kubernetes readiness check")
os.Exit(1)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ spec:
cpu: 100m
memory: 0.1G
expose:
exposeType: ClusterIP
exposeType: LoadBalancer
configsvrReplSet:
affinity:
antiAffinityTopologyKey: none
Expand Down
19 changes: 19 additions & 0 deletions e2e-tests/demand-backup-physical-sharded/run
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,15 @@ run_recovery_check() {
set -o xtrace
}

check_exported_mongos_service_endpoint() {
local host=$1

if [ "$host" != "$(kubectl_bin get psmdb $cluster -o=jsonpath='{.status.host}')" ]; then
echo "Exported host is not correct after the restore"
exit 1
fi
}

create_infra "${namespace}"

deploy_minio
Expand All @@ -68,6 +77,13 @@ wait_for_running ${cluster}-cfg 3
wait_for_running ${cluster}-mongos 3
wait_cluster_consistency ${cluster}

lbEndpoint=$(kubectl_bin get svc $cluster-mongos -o=jsonpath='{.status}' \
| jq -r 'select(.loadBalancer != null and .loadBalancer.ingress != null and .loadBalancer.ingress != []) | .loadBalancer.ingress[0].ip')
if [ -z $lbEndpoint ]; then
echo "mongos service not exported correctly"
exit 1
fi

run_mongos \
'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' \
"userAdmin:userAdmin123456@${cluster}-mongos.${namespace}"
Expand Down Expand Up @@ -102,18 +118,21 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then
echo 'check backup and restore -- aws-s3'
run_restore ${backup_name_aws} "_restore_sharded"
run_recovery_check ${backup_name_aws} "_restore_sharded"
check_exported_mongos_service_endpoint "$lbEndpoint"

echo "drop collection"
run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}"
echo 'check backup and restore -- gcp-cs'
run_restore ${backup_name_gcp} "_restore_sharded"
run_recovery_check ${backup_name_gcp} "_restore_sharded"
check_exported_mongos_service_endpoint "$lbEndpoint"

echo "drop collection"
run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}"
echo 'check backup and restore -- azure-blob'
run_restore ${backup_name_azure} "_restore_sharded"
run_recovery_check ${backup_name_azure} "_restore_sharded"
check_exported_mongos_service_endpoint "$lbEndpoint"
fi

echo "drop collection"
Expand Down
6 changes: 3 additions & 3 deletions e2e-tests/demand-backup-sharded/run
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then
insert_data "100501"
check_data "-2nd"
run_restore "$backup_name_gcp"
wait_restore "$backup_name_aws" "$cluster"
wait_restore "$backup_name_gcp" "$cluster"
check_data

desc 'check backup and restore -- azure-blob'
Expand All @@ -142,7 +142,7 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then
insert_data "100501"
check_data "-2nd"
run_restore "$backup_name_azure"
wait_restore "$backup_name_aws" "$cluster"
wait_restore "$backup_name_azure" "$cluster"
check_data
fi

Expand All @@ -155,7 +155,7 @@ kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \
insert_data "100501"
check_data "-2nd"
run_restore "$backup_name_minio"
wait_restore "$backup_name_aws" "$cluster"
wait_restore "$backup_name_minio" "$cluster"
check_data

desc 'delete backup and check if it is removed from bucket -- minio'
Expand Down
85 changes: 85 additions & 0 deletions e2e-tests/finalizer/conf/some-name.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
apiVersion: psmdb.percona.com/v1
kind: PerconaServerMongoDB
metadata:
name: some-name
finalizers:
- delete-psmdb-pods-in-order
- delete-psmdb-pvc
spec:
# platform: openshift
image:
imagePullPolicy: Always
allowUnsafeConfigurations: false
updateStrategy: SmartUpdate
secrets:
users: some-users
replsets:
- name: rs0
size: 3
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
expose:
enabled: true
exposeType: ClusterIP
resources:
limits:
cpu: "500m"
memory: "0.5G"
requests:
cpu: "100m"
memory: "0.1G"
volumeSpec:
persistentVolumeClaim:
resources:
requests:
storage: 1Gi
sharding:
enabled: true

configsvrReplSet:
size: 3
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
expose:
enabled: true
exposeType: ClusterIP
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
volumeSpec:
persistentVolumeClaim:
resources:
requests:
storage: 3Gi

mongos:
size: 3
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
podDisruptionBudget:
maxUnavailable: 1
resources:
limits:
cpu: "300m"
memory: "0.5G"
requests:
cpu: "300m"
memory: "0.5G"
expose:
exposeType: ClusterIP
servicePerPod: true

backup:
enabled: false
image: perconalab/percona-server-mongodb-operator:main-backup
serviceAccountName: percona-server-mongodb-operator
pitr:
enabled: false
30 changes: 30 additions & 0 deletions e2e-tests/finalizer/run
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/bin/bash

set -o errexit
set -o xtrace

test_dir=$(realpath "$(dirname "$0")")
. "${test_dir}/../functions"

create_infra "$namespace"
cluster="some-name"

apply_cluster "$test_dir/conf/$cluster.yml"
desc 'check if all 3 Pods started'
wait_for_running "$cluster-rs0" 3

kubectl_bin delete psmdb $cluster

desc "Wait for delete cluster $cluster"
wait_for_delete psmdb/$cluster

desc "Wait for delete PVCs"
wait_for_delete pvc/mongod-data-$cluster-cfg-0
wait_for_delete pvc/mongod-data-$cluster-cfg-1
wait_for_delete pvc/mongod-data-$cluster-cfg-2
wait_for_delete pvc/mongod-data-$cluster-rs0-0
wait_for_delete pvc/mongod-data-$cluster-rs0-1
wait_for_delete pvc/mongod-data-$cluster-rs0-2

desc "Test passed"
destroy "$namespace"
2 changes: 1 addition & 1 deletion e2e-tests/functions
Original file line number Diff line number Diff line change
Expand Up @@ -621,6 +621,7 @@ compare_kubectl() {
local new_result="${tmp_dir}/${resource//\//_}.yml"

if [ -n "$OPENSHIFT" -a -f ${expected_result//.yml/-oc.yml} ]; then
desc "OPENSHIFT"
expected_result=${expected_result//.yml/-oc.yml}
if [ "$OPENSHIFT" = 4 -a -f ${expected_result//-oc.yml/-4-oc.yml} ]; then
expected_result=${expected_result//-oc.yml/-4-oc.yml}
Expand Down Expand Up @@ -674,7 +675,6 @@ compare_kubectl() {
yq -i eval 'del(.metadata.generation)' ${new_result}
fi
fi

diff -u "$expected_result" "$new_result"
}

Expand Down
18 changes: 0 additions & 18 deletions e2e-tests/mongod-major-upgrade-sharded/run
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ function main() {
create_infra "${namespace}"

apply_s3_storage_secrets
deploy_minio

kubectl_bin apply -f "${conf_dir}/client.yml" \
-f "${conf_dir}/secrets.yml"
Expand Down Expand Up @@ -93,11 +92,6 @@ function main() {

target_generation=2
for version in ${versions_to_verify[@]}; do

backup_name_minio="backup-minio-${target_generation}"
run_backup minio ${backup_name_minio}
wait_backup ${backup_name_minio}

kubectl_bin patch psmdb/${cluster%%-rs0} \
--type=json \
-p='[
Expand All @@ -119,18 +113,6 @@ function main() {
'use myApp\n db.test.insert({ x: 10050'${target_generation}' })' \
"myApp:myPass@${cluster}-mongos.${namespace}"
compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}" "-${target_generation}"

backup_dest_minio=$(get_backup_dest "${backup_name_minio}")
kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \
/usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \
/usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/${backup_dest_minio}/rs0/ \
| grep myApp.test.gz

run_mongos 'use myApp\n db.test.insert({ x: 100600 })' "myApp:myPass@${cluster}-mongos.${namespace}"
run_restore "${backup_name_minio}"
wait_restore "${backup_name_minio}" "${cluster}"
compare_mongos_cmd "find" "myApp:myPass@${cluster}-mongos.${namespace}"

target_generation=$((target_generation + 1))
done

Expand Down
2 changes: 2 additions & 0 deletions e2e-tests/run
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ fail() {
"$dir/expose-sharded/run" || fail "expose-sharded"
"$dir/ignore-labels-annotations/run" || fail "ignore-labels-annotations"
"$dir/init-deploy/run" || fail "init-deploy"
"$dir/finalizer/run" || fail "finalizer"
"$dir/limits/run" || fail "limits"
"$dir/liveness/run" || fail "liveness"
"$dir/multi-cluster-service/run" || fail "multi-cluster-service"
Expand All @@ -46,6 +47,7 @@ fail() {
"$dir/smart-update/run" || fail "smart-update"
"$dir/storage/run" || fail "storage"
"$dir/upgrade-consistency/run" || fail "upgrade-consistency"
"$dir/upgrade-consistency-sharded/run" || fail "upgrade-consistency-sharded"
"$dir/upgrade-sharded/run" || fail "upgrade-sharded"
"$dir/upgrade/run" || fail "upgrade"
"$dir/users/run" || fail "users"
Expand Down
2 changes: 2 additions & 0 deletions e2e-tests/run-minikube.csv
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ arbiter
default-cr
demand-backup
demand-backup-physical
finalizer
limits
liveness
mongod-major-upgrade
Expand All @@ -14,5 +15,6 @@ security-context
self-healing-chaos
smart-update
upgrade-consistency
upgrade-consistency-sharded
users
version-service
2 changes: 2 additions & 0 deletions e2e-tests/run-pr.csv
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ demand-backup-sharded
expose-sharded
ignore-labels-annotations
init-deploy
finalizer
limits
liveness
mongod-major-upgrade
Expand All @@ -33,6 +34,7 @@ smart-update
storage
upgrade
upgrade-consistency
upgrade-consistency-sharded
upgrade-sharded
users
version-service
Loading

0 comments on commit 44150f6

Please sign in to comment.