diff --git a/workloads/kube-burner-ocp-wrapper/run.sh b/workloads/kube-burner-ocp-wrapper/run.sh index 81eb488c..3941cb80 100755 --- a/workloads/kube-burner-ocp-wrapper/run.sh +++ b/workloads/kube-burner-ocp-wrapper/run.sh @@ -13,6 +13,10 @@ GC=${GC:-true} EXTRA_FLAGS=${EXTRA_FLAGS:-} UUID=${UUID:-$(uuidgen)} KUBE_DIR=${KUBE_DIR:-/tmp} +US_WEST_2A=${US_WEST_2A:-} +US_WEST_2B=${US_WEST_2B:-} +US_WEST_2C=${US_WEST_2C:-} +US_WEST_2D=${US_WEST_2D:-} download_binary(){ KUBE_BURNER_URL=https://github.com/cloud-bulldozer/kube-burner/releases/download/v${KUBE_BURNER_VERSION}/kube-burner-V${KUBE_BURNER_VERSION}-linux-x86_64.tar.gz @@ -116,6 +120,31 @@ fi # Capture the exit code of the run, but don't exit the script if it fails. set +e +# scale machineset +for machineset_name in $(oc get -n openshift-machine-api machineset --no-headers -o custom-columns=":.metadata.name" | grep -i worker); do + region=$(oc get -n openshift-machine-api machineset --no-headers -o custom-columns=":.spec.template.spec.providerSpec.value.placement.availabilityZone" $machineset_name) + # region will be of the form us-west-2a. We need to match it to user provided var i.e replae "-" with '_' and then convert it to upper case. + # For example us-west-2a will be converted to US_WEST_2A. + region_var=$(echo "$region" | tr '-' '_' | tr '[:lower:]' '[:upper:]') + # desired_replicas will be the value stored in US_WEST_2A (if povided by user) + desired_replicas=${!region_var} + if [[ "${desired_replicas}" != "" ]]; then + echo "scale the ${machineset_name} to ${desired_replicas}" + oc scale -n openshift-machine-api machineset "$machineset_name" --replicas="${desired_replicas}" + # wait for 1 hour + for ((i = 1; i <= 720; i++)); do + available_replicas=$(oc get -n openshift-machine-api -o template machineset "$machineset_name" --template={{.status.availableReplicas}}) + if [ "$available_replicas" -eq "$desired_replicas" ]; then + echo "Desired number of replicas ($desired_replicas) reached." + break + fi + sleep 5 + done + + fi +done + + # Label workers with ovnic. Metrics from only these workers are pulled. # node-desnity-cni on 500 nodes runs for 2 hours 15 minutes. Scraping metrics from 500 nodes for the duration of 2 hours 15 minutes is overkill. # So we scrape from only 10 worker nodes if the worker node count is more than 120.