Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

K8SPS-110 certify openshift #857

Open
wants to merge 21 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 3 additions & 7 deletions build/orc-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ set -o xtrace

OPERATOR_BINDIR=/opt/percona
ORC_CONF_PATH=${ORC_CONF_PATH:-/etc/orchestrator}
ORC_CONF_FILE=${ORC_CONF_FILE:-"${ORC_CONF_PATH}/orchestrator.conf.json"}
ORC_CONF_FILE=${ORC_CONF_FILE:-"${ORC_CONF_PATH}/config/orchestrator.conf.json"}
TOPOLOGY_USER=${ORC_TOPOLOGY_USER:-orchestrator}
CUSTOM_CONF_FILE=${ORC_CONF_PATH}/config/orchestrator.conf.json
CUSTOM_CONF_FILE=${ORC_CONF_PATH}/custom/orchestrator.conf.json

if [ -f ${OPERATOR_BINDIR}/orchestrator.conf.json ]; then
cp "${OPERATOR_BINDIR}/orchestrator.conf.json" "${ORC_CONF_FILE}"
Expand Down Expand Up @@ -39,15 +39,11 @@ if [ -f "$PATH_TO_SECRET/$TOPOLOGY_USER" ]; then
TOPOLOGY_PASSWORD=$(<"${PATH_TO_SECRET}/${TOPOLOGY_USER}")
fi

if [ ! -d "/var/lib/orchestrator" ]; then
mkdir /var/lib/orchestrator
fi

set +o xtrace
temp=$(mktemp)
sed -r "s|^[#]?user=.*$|user=${TOPOLOGY_USER}|" "${ORC_CONF_PATH}/orc-topology.cnf" >"${temp}"
sed -r "s|^[#]?password=.*$|password=${TOPOLOGY_PASSWORD:-$ORC_TOPOLOGY_PASSWORD}|" "${ORC_CONF_PATH}/orc-topology.cnf" >"${temp}"
cat "${temp}" >"${ORC_CONF_PATH}/orc-topology.cnf"
cat "${temp}" >"${ORC_CONF_PATH}/config/orc-topology.cnf"
rm "${temp}"
set -o xtrace

Expand Down
6 changes: 3 additions & 3 deletions build/orchestrator.conf.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"ListenAddress": ":3000",
"MasterFailoverDetachReplicaMasterHost": true,
"MySQLHostnameResolveMethod": "@@report_host",
"MySQLTopologyCredentialsConfigFile": "/etc/orchestrator/orc-topology.cnf",
"MySQLTopologyCredentialsConfigFile": "/etc/orchestrator/config/orc-topology.cnf",
"OnFailureDetectionProcesses": [
"echo 'Detected {failureType} on {failureCluster}. Affected replicas: {countSlaves}'"
],
Expand All @@ -37,7 +37,7 @@
"ProcessesShellCommand": "sh",
"RaftAdvertise": "127.0.0.1",
"RaftBind": "0.0.0.0",
"RaftDataDir": "/var/lib/orchestrator",
"RaftDataDir": "/etc/orchestrator/config",
"RaftEnabled": true,
"RaftNodes": [
"127.0.0.1"
Expand All @@ -52,7 +52,7 @@
"RecoveryIgnoreHostnameFilters": [],
"RecoveryPeriodBlockSeconds": 5,
"RemoveTextFromHostnameDisplay": ":3306",
"SQLite3DataFile": "/var/lib/orchestrator/orc.db",
"SQLite3DataFile": "/etc/orchestrator/config/orc.db",
"UnseenInstanceForgetHours": 1,
"StatusEndpoint": "/api/status",
"UseSuperReadOnly": true,
Expand Down
3 changes: 2 additions & 1 deletion deploy/cr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,13 @@ spec:
image: perconalab/percona-server-mysql-operator:main-psmysql
imagePullPolicy: Always
# initImage: perconalab/percona-server-mysql-operator:main

size: 3

# env:
# - name: BOOTSTRAP_READ_TIMEOUT
# value: "600"
# - name: MYSQLSH_USER_CONFIG_HOME
# value: "/tmp"
resources:
requests:
memory: 1G
Expand Down
53 changes: 30 additions & 23 deletions e2e-tests/functions
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@ ROOT_REPO=${ROOT_REPO:-$(realpath ../../..)}
test_name=$(basename "$(pwd)")
source "${ROOT_REPO}/e2e-tests/vars.sh"

if oc get projects 2>/dev/null; then
OPENSHIFT=4
fi

init_temp_dir() {
rm -rf "$TEMP_DIR"
mkdir -p "$TEMP_DIR"
Expand All @@ -13,14 +17,14 @@ init_temp_dir() {
create_namespace() {
local namespace=$1

if [[ $OPENSHIFT ]]; then
if [[ -n $OPENSHIFT ]]; then
set -o pipefail
if [[ $OPERATOR_NS ]] && (oc get project "$OPERATOR_NS" -o json >/dev/null 2>&1 | jq -r '.metadata.name' >/dev/null 2>&1); then
oc delete --grace-period=0 --force=true project "$namespace" && sleep 120 || :
else
oc delete project "$namespace" && sleep 40 || :
fi
wait_for_delete "project/$namespace"
wait_for_delete "project/$namespace" || :

oc new-project "$namespace"
oc project "$namespace"
Expand Down Expand Up @@ -87,38 +91,37 @@ apply_s3_storage_secrets() {
}

deploy_pmm_server() {
if [[ $OPENSHIFT ]]; then
oc create sa pmm-server -n "${NAMESPACE}" || :
oc adm policy add-scc-to-user privileged -z pmm-server -n "${NAMESPACE}" || :
oc create rolebinding pmm-ps-operator-namespace-only --role percona-server-for-mysql-operator-role --serviceaccount=$NAMESPACE:pmm-server -n "${NAMESPACE}" || :
oc patch role/percona-server-for-mysql-operator-role --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' -n "${NAMESPACE}" || :

local additional_params="--set platform=openshift --set sa=pmm-server --set supresshttp2=false"
fi

helm uninstall -n "${NAMESPACE}" monitoring || :
helm repo remove percona || :
kubectl delete clusterrole monitoring --ignore-not-found
kubectl delete clusterrolebinding monitoring --ignore-not-found

helm repo add percona https://percona.github.io/percona-helm-charts/
helm install monitoring percona/pmm -n "${NAMESPACE}" \
helm repo update

if [[ -n $OPENSHIFT ]]; then
platform=openshift
oc create sa pmm-server -n "${NAMESPACE}" || :
oc adm policy add-scc-to-user privileged -z pmm-server -n "${NAMESPACE}" || :

if [[ $OPERATOR_NS ]]; then
timeout 30 oc delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'pmm-ps-operator-' | awk '{print $1}') || :
oc create clusterrolebinding pmm-ps-operator-cluster-wide --clusterrole=percona-server-mysql-operator --serviceaccount=$NAMESPACE:pmm-server -n "$NAMESPACE"
oc patch clusterrole/percona-server-mysql-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' ${OPERATOR_NS:+-n $OPERATOR_NS} || :
else
oc create rolebinding pmm-ps-operator-namespace-only --role percona-server-mysql-operator --serviceaccount=$NAMESPACE:pmm-server -n "$NAMESPACE"
oc patch role/percona-server-mysql-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' -n "$NAMESPACE" || :
fi
local additional_params="--set platform=openshift --set supresshttp2=false --set serviceAccount.create=false --set serviceAccount.name=pmm-server"
fi

retry 10 120 helm install monitoring percona/pmm -n "${NAMESPACE}" \
--set fullnameOverride=monitoring \
--version ${PMM_SERVER_VERSION} \
--set imageTag=${IMAGE_PMM_SERVER#*:} \
--set imageRepo=${IMAGE_PMM_SERVER%:*} \
--set service.type=LoadBalancer \
$additional_params \
--force

until kubectl -n "${NAMESPACE}" exec monitoring-0 -- bash -c "ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null"; do
echo "Retry $retry"
sleep 5
let retry+=1
if [ $retry -ge 20 ]; then
echo "Max retry count $retry reached. Pmm-server can't start"
exit 1
fi
done
}

get_pmm_server_token() {
Expand Down Expand Up @@ -736,6 +739,7 @@ check_passwords_leak() {
echo logs saved in: ${TEMP_DIR}/logs_output-$p-$c.txt
for pass in $passwords; do
count=$(grep -c --fixed-strings -- "$pass" ${TEMP_DIR}/logs_output-$p-$c.txt || :)
count=$(echo "$count" | awk '{if ($1 ~ /^[0-9]+$/) print $1; else print 0}')
if [[ $count != 0 ]]; then
echo leaked passwords are found in log ${TEMP_DIR}/logs_output-$p-$c.txt
false
Expand All @@ -762,6 +766,9 @@ deploy_chaos_mesh() {
else
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1
fi
if [[ -n $OPENSHIFT ]]; then
oc adm policy add-scc-to-user privileged -z chaos-daemon --namespace=${NAMESPACE}
fi
sleep 10
}

Expand Down
4 changes: 4 additions & 0 deletions e2e-tests/tests/auto-config/01-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ spec:
name: bin
- mountPath: /var/lib/mysql
name: datadir
- mountPath: /.mysqlsh
name: mysqlsh
- mountPath: /etc/mysql/mysql-users-secret
name: users
- mountPath: /etc/mysql/mysql-tls-secret
Expand Down Expand Up @@ -44,6 +46,8 @@ spec:
volumes:
- emptyDir: {}
name: bin
- emptyDir: {}
name: mysqlsh
- name: users
secret:
defaultMode: 420
Expand Down
6 changes: 5 additions & 1 deletion e2e-tests/tests/config-router/01-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ spec:
name: bin
- mountPath: /var/lib/mysql
name: datadir
- mountPath: /.mysqlsh
name: mysqlsh
- mountPath: /etc/mysql/mysql-users-secret
name: users
- mountPath: /etc/mysql/mysql-tls-secret
Expand All @@ -36,6 +38,8 @@ spec:
volumes:
- emptyDir: {}
name: bin
- emptyDir: { }
name: mysqlsh
- name: users
secret:
defaultMode: 420
Expand Down Expand Up @@ -82,4 +86,4 @@ status:
observedGeneration: 1
readyReplicas: 3
replicas: 3
updatedReplicas: 3
updatedReplicas: 3
4 changes: 4 additions & 0 deletions e2e-tests/tests/config/01-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ spec:
name: bin
- mountPath: /var/lib/mysql
name: datadir
- mountPath: /.mysqlsh
name: mysqlsh
- mountPath: /etc/mysql/mysql-users-secret
name: users
- mountPath: /etc/mysql/mysql-tls-secret
Expand Down Expand Up @@ -44,6 +46,8 @@ spec:
volumes:
- emptyDir: {}
name: bin
- emptyDir: { }
name: mysqlsh
- name: users
secret:
defaultMode: 420
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ commands:
set -o xtrace

source ../../functions

init_temp_dir # do this only in the first TestStep

apply_s3_storage_secrets
Expand Down
14 changes: 0 additions & 14 deletions e2e-tests/tests/gr-security-context/02-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,14 @@ spec:
command:
- /opt/percona/ps-entrypoint.sh
name: mysql
securityContext:
privileged: true
- command:
- /opt/percona/sidecar
name: xtrabackup
securityContext:
privileged: false
initContainers:
- command:
- /opt/percona-server-mysql-operator/ps-init-entrypoint.sh
name: mysql-init
securityContext:
privileged: true
securityContext:
fsGroup: 1001
supplementalGroups:
- 1001
- 1002
Expand All @@ -55,23 +48,16 @@ spec:
command:
- /opt/percona/haproxy-entrypoint.sh
name: haproxy
securityContext:
privileged: true
- args:
- /opt/percona/peer-list
- -on-change=/opt/percona/haproxy_add_mysql_nodes.sh
- -service=$(MYSQL_SERVICE)
name: mysql-monit
securityContext:
privileged: true
initContainers:
- command:
- /opt/percona-server-mysql-operator/ps-init-entrypoint.sh
name: haproxy-init
securityContext:
privileged: true
securityContext:
fsGroup: 1001
supplementalGroups:
- 1001
- 1002
Expand Down
59 changes: 50 additions & 9 deletions e2e-tests/tests/gr-security-context/02-create-cluster.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
timeout: 10
commands:
- script: |-
set -o errexit
Expand All @@ -9,23 +8,65 @@ commands:
source ../../functions

get_cr \
| yq eval '.spec.backup.storages.minio.type="s3"' - \
| yq eval '.spec.backup.storages.minio.type="s3"' - \
| yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - \
| yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - \
| yq eval ".spec.backup.storages.minio.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \
| yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - \
| yq eval ".spec.backup.storages.minio.containerSecurityContext.privileged=true" - \
| yq eval ".spec.backup.storages.minio.podSecurityContext.fsGroup=1001" - \
| yq eval ".spec.backup.storages.minio.podSecurityContext.supplementalGroups |= [1001, 1002, 1003]" - \
| yq eval ".spec.backup.containerSecurityContext.privileged=false" - \
| yq eval '.spec.mysql.clusterType="group-replication"' - \
| yq eval ".spec.mysql.containerSecurityContext.privileged=true" - \
| yq eval ".spec.mysql.podSecurityContext.fsGroup=1001" - \
| yq eval ".spec.mysql.podSecurityContext.supplementalGroups |= [1001, 1002, 1003]" - \
| yq eval ".spec.proxy.router.enabled=false" - \
| yq eval ".spec.proxy.haproxy.enabled=true" - \
| yq eval ".spec.proxy.haproxy.containerSecurityContext.privileged=true" - \
| yq eval ".spec.proxy.haproxy.podSecurityContext.fsGroup=1001" - \
| yq eval ".spec.proxy.haproxy.podSecurityContext.supplementalGroups |= [1001, 1002, 1003]" - \
| (
if [[ -z $OPENSHIFT ]]; then
yq eval ".spec.backup.storages.minio.containerSecurityContext.privileged=true" - \
| yq eval ".spec.backup.storages.minio.podSecurityContext.fsGroup=1001" - \
| yq eval ".spec.backup.containerSecurityContext.privileged=false" - \
| yq eval ".spec.mysql.containerSecurityContext.privileged=true" - \
| yq eval ".spec.mysql.podSecurityContext.fsGroup=1001" - \
| yq eval ".spec.proxy.haproxy.containerSecurityContext.privileged=true" - \
| yq eval ".spec.proxy.haproxy.podSecurityContext.fsGroup=1001" -
else
cat -
fi
) \
| kubectl -n "${NAMESPACE}" apply -f -

sleep 100

if [[ -z $OPENSHIFT ]]; then
# For Haproxy
FS_GROUP_HAPROXY=$(kubectl get statefulset gr-security-context-haproxy -n "${NAMESPACE}" -o jsonpath='{.spec.template.spec.securityContext.fsGroup}')
if [[ "$FS_GROUP_HAPROXY" != "1001" ]]; then
echo "Test Failed: fsGroup is not set to 1001 (found: $FS_GROUP_HAPROXY)."
exit 1
fi

# Check if all containers (including initContainers) have privileged: true
PRIVILEGED_CONTAINERS_HAPROXY=$(kubectl get statefulset gr-security-context-haproxy -n "${NAMESPACE}" -o jsonpath='{.spec.template.spec.containers[*].securityContext.privileged}')
PRIVILEGED_INIT_CONTAINERS_HAPROXY=$(kubectl get statefulset gr-security-context-haproxy -n "${NAMESPACE}" -o jsonpath='{.spec.template.spec.initContainers[*].securityContext.privileged}')

if [[ "$PRIVILEGED_CONTAINERS_HAPROXY" != "true true" || "$PRIVILEGED_INIT_CONTAINERS_HAPROXY" != "true" ]]; then
echo "Test Failed: Some containers are missing privileged: true."
exit 1
fi

# For mysql
FS_GROUP_MYSQL=$(kubectl get statefulset gr-security-context-mysql -n "${NAMESPACE}" -o jsonpath='{.spec.template.spec.securityContext.fsGroup}')
if [[ "$FS_GROUP_MYSQL" != "1001" ]]; then
echo "Test Failed: fsGroup is not set to 1001 (found: $FS_GROUP_MYSQL)."
exit 1
fi

# Check if all containers (including initContainers) have privileged: true
PRIVILEGED_CONTAINERS_MYSQL=$(kubectl get statefulset gr-security-context-mysql -n "${NAMESPACE}" -o jsonpath='{.spec.template.spec.containers[*].securityContext.privileged}')
PRIVILEGED_INIT_CONTAINERS_MYSQL=$(kubectl get statefulset gr-security-context-mysql -n "${NAMESPACE}" -o jsonpath='{.spec.template.spec.initContainers[*].securityContext.privileged}')

if [[ "$PRIVILEGED_CONTAINERS_MYSQL" != "true false" || "$PRIVILEGED_INIT_CONTAINERS_MYSQL" != "true" ]]; then
echo "Test Failed: Some containers are missing privileged: true."
exit 1
fi
fi
timeout: 120
5 changes: 0 additions & 5 deletions e2e-tests/tests/gr-security-context/04-assert.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@ spec:
imagePullPolicy: Always
name: xtrabackup
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
Expand All @@ -54,8 +52,6 @@ spec:
imagePullPolicy: Always
name: xtrabackup-init
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
Expand All @@ -64,7 +60,6 @@ spec:
restartPolicy: Never
schedulerName: default-scheduler
securityContext:
fsGroup: 1001
supplementalGroups:
- 1001
- 1002
Expand Down
Loading
Loading