Skip to content

Commit

Permalink
ci: add test to verify omap deletion
Browse files Browse the repository at this point in the history
this commit add ci test to verify the omap
deletion

Signed-off-by: yati1998 <[email protected]>
  • Loading branch information
yati1998 committed Mar 18, 2024
1 parent 403b294 commit 40198f5
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 4 deletions.
12 changes: 8 additions & 4 deletions .github/workflows/go-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,13 @@ jobs:
run: |
set -ex
kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph ceph fs subvolume create myfs test-subvol-1 group-a
kubectl rook-ceph subvolume ls
kubectl rook-ceph subvolume ls --stale
kubectl rook-ceph subvolume delete myfs test-subvol group-a
kubectl rook-ceph subvolume delete myfs test-subvol-1
tests/github-action-helper.sh create_sc_with_retain_policy
tests/github-action-helper.sh create_stale_subvolume
subVol=$(kubectl rook-ceph subvolume ls --stale | awk '{print $2}' | grep csi-vol)
kubectl rook_ceph subvolume delete myfs $subVol
- name: Get mon endpoints
run: |
Expand Down Expand Up @@ -234,12 +236,14 @@ jobs:
- name: Subvolume command
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol-1 group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol-1
tests/github-action-helper.sh create_sc_with_retain_policy_custom_ns test-operator test-cluster
tests/github-action-helper.sh create_stale_subvolume
subVol=$(kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale | awk '{print $2}' | grep csi-vol)
kubectl rook_ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs $subVol
- name: Get mon endpoints
run: |
Expand Down
44 changes: 44 additions & 0 deletions tests/github-action-helper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ deploy_rook() {
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/filesystem-test.yaml
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/subvolumegroup.yaml
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/toolbox.yaml
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/storageclass.yaml
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/rbd/storageclass-test.yaml
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/rbd/pvc.yaml
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/pvc.yaml
Expand All @@ -57,6 +58,9 @@ deploy_rook_in_custom_namespace() {
curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/rbd/storageclass-test.yaml -o storageclass-test.yaml
sed -i "s|provisioner: rook-ceph.rbd.csi.ceph.com |provisioner: test-operator.rbd.csi.ceph.com |g" storageclass-test.yaml
deploy_with_custom_ns "$1" "$2" storageclass-test.yaml
curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/storageclass.yaml -o storageclass.yaml
sed -i "s|provisioner: rook-ceph.cephfs.csi.ceph.com |provisioner: test-operator.cephfs.csi.ceph.com |g" storageclass.yaml
deploy_with_custom_ns "$1" "$2" storageclass.yaml
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/rbd/pvc.yaml
curl -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/filesystem-test.yaml -o filesystem.yaml
deploy_with_custom_ns "$1" "$2" filesystem.yaml
Expand All @@ -68,12 +72,52 @@ deploy_rook_in_custom_namespace() {
deploy_with_custom_ns "$1" "$2" toolbox.yaml
}

create_sc_with_retain_policy(){
curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/storageclass.yaml -o storageclass.yaml
sed -i "s|name: rook-cephfs|name: rook-cephfs-retain|g" storageclass.yaml
sed -i "s|reclaimPolicy: Delete|reclaimPolicy: Retain|g" storageclass.yaml
kubectl create -f storageclass.yaml
}

create_sc_with_retain_policy_custom_ns(){
export OPERATOR_NS=$1
export CLUSTER_NS=$2

curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/storageclass.yaml -o storageclass.yaml
sed -i "s|name: rook-cephfs|name: rook-cephfs-retain|g" storageclass.yaml
sed -i "s|reclaimPolicy: Delete|reclaimPolicy: Retain|g" storageclass.yaml
sed -i "s|provisioner: rook-ceph.cephfs.csi.ceph.com |provisioner: test-operator.cephfs.csi.ceph.com |g" storageclass.yaml
deploy_with_custom_ns $OPERATOR_NS $CLUSTER_NS storageclass.yaml
}

create_stale_subvolume() {
curl https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/cephfs/pvc.yaml -o pvc.yaml
sed -i "s|name: cephfs-pvc|name: cephfs-pvc-retain|g" pvc.yaml
sed -i "s|storageClassName: rook-cephfs|storageClassName: rook-cephfs-retain|g" pvc.yaml
kubectl create -f pvc.yaml
kubectl get pvc cephfs-pvc-retain
: "${PVNAME:=$(kubectl get pvc cephfs-pvc-retain -o=jsonpath='{.spec.volumeName}')}"
wait_for_pvc_to_be_bound_state_default
kubectl get pvc cephfs-pvc-retain
kubectl delete pvc cephfs-pvc-retain
kubectl delete pv "$PVNAME"
}

deploy_with_custom_ns() {
sed -i "s|rook-ceph # namespace:operator|$1 # namespace:operator|g" "$3"
sed -i "s|rook-ceph # namespace:cluster|$2 # namespace:cluster|g" "$3"
kubectl create -f "$3"
}

wait_for_pvc_to_be_bound_state_default() {
timeout 100 bash <<-'EOF'
until [ $(kubectl get pvc cephfs-pvc-retain -o jsonpath='{.status.phase}') == "Bound" ]; do
echo "waiting for the pvc to be in bound state"
sleep 1
done
EOF
}

wait_for_pod_to_be_ready_state_default() {
timeout 200 bash <<-'EOF'
until [ $(kubectl get pod -l app=rook-ceph-osd -n rook-ceph -o jsonpath='{.items[*].metadata.name}' -o custom-columns=READY:status.containerStatuses[*].ready | grep -c true) -eq 1 ]; do
Expand Down

0 comments on commit 40198f5

Please sign in to comment.