From eb8cc302e00a606431386c80ae91195e38d279c8 Mon Sep 17 00:00:00 2001 From: riya-singhal31 Date: Thu, 20 Apr 2023 15:21:43 +0530 Subject: [PATCH] ci: fix shell check failures Signed-off-by: riya-singhal31 (cherry picked from commit 44612fe34c8336dce39c96e211775579aac46905) --- scripts/install-helm.sh | 20 ++++++++++---------- scripts/install-snapshot.sh | 12 ++++++------ troubleshooting/tools/tracevol.py | 2 +- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/scripts/install-helm.sh b/scripts/install-helm.sh index ef71a7c3278..41c18bd4539 100755 --- a/scripts/install-helm.sh +++ b/scripts/install-helm.sh @@ -148,7 +148,7 @@ install_cephcsi_helm_charts() { NAMESPACE="default" fi - kubectl_retry create namespace ${NAMESPACE} + kubectl_retry create namespace "${NAMESPACE}" # label the nodes uniformly for domain information for node in $(kubectl_retry get node -o jsonpath='{.items[*].metadata.name}'); do @@ -170,19 +170,19 @@ install_cephcsi_helm_charts() { # install ceph-csi-cephfs and ceph-csi-rbd charts # shellcheck disable=SC2086 "${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-cephfs", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${CEPHFS_SECRET_TEMPLATE_VALUES} ${CEPHFS_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs - check_deployment_status app=ceph-csi-cephfs ${NAMESPACE} - check_daemonset_status app=ceph-csi-cephfs ${NAMESPACE} + check_deployment_status app=ceph-csi-cephfs "${NAMESPACE}" + check_daemonset_status app=ceph-csi-cephfs "${NAMESPACE}" # deleting configmaps as a workaround to avoid configmap already present # issue when installing ceph-csi-rbd - kubectl_retry delete cm ceph-csi-config --namespace ${NAMESPACE} - kubectl_retry delete cm ceph-config --namespace ${NAMESPACE} + kubectl_retry delete cm ceph-csi-config --namespace "${NAMESPACE}" + kubectl_retry delete cm ceph-config --namespace "${NAMESPACE}" # shellcheck disable=SC2086 "${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-rbd", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${RBD_SECRET_TEMPLATE_VALUES} ${RBD_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}" --set provisioner.maxSnapshotsOnImage=3 --set provisioner.minSnapshotsOnImage=2 - check_deployment_status app=ceph-csi-rbd ${NAMESPACE} - check_daemonset_status app=ceph-csi-rbd ${NAMESPACE} + check_deployment_status app=ceph-csi-rbd "${NAMESPACE}" + check_daemonset_status app=ceph-csi-rbd "${NAMESPACE}" } @@ -197,9 +197,9 @@ cleanup_cephcsi_helm_charts() { if [ -z "$NAMESPACE" ]; then NAMESPACE="default" fi - "${HELM}" uninstall ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE} - "${HELM}" uninstall ${RBD_CHART_NAME} --namespace ${NAMESPACE} - kubectl_retry delete namespace ${NAMESPACE} + "${HELM}" uninstall ${CEPHFS_CHART_NAME} --namespace "${NAMESPACE}" + "${HELM}" uninstall ${RBD_CHART_NAME} --namespace "${NAMESPACE}" + kubectl_retry delete namespace "${NAMESPACE}" } helm_reset() { diff --git a/scripts/install-snapshot.sh b/scripts/install-snapshot.sh index aa296310466..eb72b2ae2d2 100755 --- a/scripts/install-snapshot.sh +++ b/scripts/install-snapshot.sh @@ -27,21 +27,21 @@ function install_snapshot_controller() { namespace="kube-system" fi - create_or_delete_resource "create" ${namespace} + create_or_delete_resource "create" "${namespace}" - pod_ready=$(kubectl get pods -l app=snapshot-controller -n ${namespace} -o jsonpath='{.items[0].status.containerStatuses[0].ready}') + pod_ready=$(kubectl get pods -l app=snapshot-controller -n "${namespace}" -o jsonpath='{.items[0].status.containerStatuses[0].ready}') INC=0 until [[ "${pod_ready}" == "true" || $INC -gt 20 ]]; do sleep 10 ((++INC)) - pod_ready=$(kubectl get pods -l app=snapshot-controller -n ${namespace} -o jsonpath='{.items[0].status.containerStatuses[0].ready}') + pod_ready=$(kubectl get pods -l app=snapshot-controller -n "${namespace}" -o jsonpath='{.items[0].status.containerStatuses[0].ready}') echo "snapshotter pod status: ${pod_ready}" done if [ "${pod_ready}" != "true" ]; then echo "snapshotter controller creation failed" - kubectl get pods -l app=snapshot-controller -n ${namespace} - kubectl describe po -l app=snapshot-controller -n ${namespace} + kubectl get pods -l app=snapshot-controller -n "${namespace}" + kubectl describe po -l app=snapshot-controller -n "${namespace}" exit 1 fi @@ -53,7 +53,7 @@ function cleanup_snapshot_controller() { if [ -z "${namespace}" ]; then namespace="kube-system" fi - create_or_delete_resource "delete" ${namespace} + create_or_delete_resource "delete" "${namespace}" } function create_or_delete_resource() { diff --git a/troubleshooting/tools/tracevol.py b/troubleshooting/tools/tracevol.py index 45d96099bfd..d18f0e5c458 100755 --- a/troubleshooting/tools/tracevol.py +++ b/troubleshooting/tools/tracevol.py @@ -363,7 +363,7 @@ def get_tool_box_pod_name(arg): print("failed to pod %s", err) return "" -#pylint: disable=too-many-branches +#pylint: disable=too-many-branches, E0012, W0719 def get_pool_name(arg, vol_id, is_rbd): """ get pool name from ceph backend