From 313ccab796d4abe370376efd909d1b08be6d0dd8 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 2 Mar 2023 17:59:39 +0000 Subject: [PATCH 1/2] env.sh - added the following vars VM_NODE_SELECTOR,NODE_VM_DENSITY_IMAGE run.sh - added the node-vm-density case added folder node-vm-density - VM template and settings --- workloads/kube-burner/env.sh | 2 + workloads/kube-burner/run.sh | 9 ++ .../node-vm-density/node-vm-density.yml | 87 +++++++++++++++++++ .../workloads/node-vm-density/vm.yml | 50 +++++++++++ 4 files changed, 148 insertions(+) create mode 100644 workloads/kube-burner/workloads/node-vm-density/node-vm-density.yml create mode 100644 workloads/kube-burner/workloads/node-vm-density/vm.yml diff --git a/workloads/kube-burner/env.sh b/workloads/kube-burner/env.sh index ef2c9d09..1e289120 100644 --- a/workloads/kube-burner/env.sh +++ b/workloads/kube-burner/env.sh @@ -17,6 +17,7 @@ export BURST=${BURST:-20} export MAX_WAIT_TIMEOUT=${MAX_WAIT_TIMEOUT:-1h} export CLEANUP=${CLEANUP:-true} export POD_NODE_SELECTOR=${POD_NODE_SELECTOR:-'{node-role.kubernetes.io/worker: }'} +export VM_NODE_SELECTOR=${VM_NODE_SELECTOR:-'{kubernetes.io/hostname: }'} export WORKER_NODE_LABEL=${WORKER_NODE_LABEL:-"node-role.kubernetes.io/worker"} export WAIT_WHEN_FINISHED=true export POD_WAIT=${POD_WAIT:-false} @@ -35,6 +36,7 @@ export JOB_PAUSE=${JOB_PAUSE:-1m} # kube-burner workload defaults export NODE_POD_DENSITY_IMAGE=${NODE_POD_DENSITY_IMAGE:-gcr.io/google_containers/pause:3.1} +export NODE_VM_DENSITY_IMAGE=${NODE_VM_DENSITY_IMAGE:-quay.io/kubevirt/fedora-container-disk-images:35} # kube-burner churn enablement export CHURN=${CHURN:-false} diff --git a/workloads/kube-burner/run.sh b/workloads/kube-burner/run.sh index cc05c69b..b1817fd3 100755 --- a/workloads/kube-burner/run.sh +++ b/workloads/kube-burner/run.sh @@ -20,6 +20,15 @@ case ${WORKLOAD} in label_node_with_label $label find_running_pods_num regular ;; + node-vm-density) + WORKLOAD_TEMPLATE=workloads/node-vm-density/node-vm-density.yml + METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml} + NODE_COUNT=${NODE_COUNT:-$(kubectl get node -l ${WORKER_NODE_LABEL},node-role.kubernetes.io/infra!=,node-role.kubernetes.io/workload!= -o name | wc -l)} + PODS_PER_NODE=${PODS_PER_NODE:-245} + label="node-density=enabled" + label_node_with_label $label + find_running_pods_num regular + ;; node-density-heavy) WORKLOAD_TEMPLATE=workloads/node-density-heavy/node-density-heavy.yml METRICS_PROFILE=${METRICS_PROFILE:-metrics-profiles/metrics.yaml} diff --git a/workloads/kube-burner/workloads/node-vm-density/node-vm-density.yml b/workloads/kube-burner/workloads/node-vm-density/node-vm-density.yml new file mode 100644 index 00000000..14ff07da --- /dev/null +++ b/workloads/kube-burner/workloads/node-vm-density/node-vm-density.yml @@ -0,0 +1,87 @@ +--- +global: + writeToFile: false + indexerConfig: + enabled: {{.INDEXING}} + esServers: ["{{.ES_SERVER}}"] + insecureSkipVerify: true + defaultIndex: {{.ES_INDEX}} + type: elastic + measurements: + - name: podLatency + esIndex: {{.ES_INDEX}} + thresholds: + - conditionType: Ready + metric: P99 + threshold: {{.POD_READY_THRESHOLD}} + - name: vmiLatency +{{ if eq .PPROF_COLLECTION "True" }} + - name: pprof + pprofInterval: {{ .PPROF_COLLECTION_INTERVAL }} + pprofDirectory: /tmp/pprof-data + pprofTargets: + - name: kube-apiserver-cpu + namespace: "openshift-kube-apiserver" + labelSelector: {app: openshift-kube-apiserver} + bearerToken: {{ .BEARER_TOKEN }} + url: https://localhost:6443/debug/pprof/profile?timeout=30 + - name: kube-apiserver-heap + namespace: "openshift-kube-apiserver" + labelSelector: {app: openshift-kube-apiserver} + bearerToken: {{ .BEARER_TOKEN }} + url: https://localhost:6443/debug/pprof/heap + - name: kube-controller-manager-heap + namespace: "openshift-kube-controller-manager" + labelSelector: {app: kube-controller-manager} + bearerToken: {{ .BEARER_TOKEN }} + url: https://localhost:10257/debug/pprof/heap + + - name: kube-controller-manager-cpu + namespace: "openshift-kube-controller-manager" + labelSelector: {app: kube-controller-manager} + bearerToken: {{ .BEARER_TOKEN }} + url: https://localhost:10257/debug/pprof/profile?timeout=30 + + - name: etcd-heap + namespace: "openshift-etcd" + labelSelector: {app: etcd} + cert: {{ .CERTIFICATE }} + key: {{ .PRIVATE_KEY }} + url: https://localhost:2379/debug/pprof/heap + + - name: etcd-cpu + namespace: "openshift-etcd" + labelSelector: {app: etcd} + cert: {{ .CERTIFICATE }} + key: {{ .PRIVATE_KEY }} + url: https://localhost:2379/debug/pprof/profile?timeout=30 +{{ end }} +jobs: + - name: node-vm-density + jobIterations: {{.TEST_JOB_ITERATIONS}} + qps: {{.QPS}} + burst: {{.BURST}} + namespacedIterations: false + namespace: {{.UUID}} + podWait: {{.POD_WAIT}} + cleanup: {{.CLEANUP}} + waitFor: {{.WAIT_FOR}} + waitWhenFinished: {{.WAIT_WHEN_FINISHED}} + verifyObjects: {{.VERIFY_OBJECTS}} + errorOnVerify: {{.ERROR_ON_VERIFY}} + maxWaitTimeout: {{.MAX_WAIT_TIMEOUT}} + preLoadImages: {{.PRELOAD_IMAGES}} + preLoadPeriod: {{.PRELOAD_PERIOD}} + namespaceLabels: + security.openshift.io/scc.podSecurityLabelSync: false + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + objects: + + - objectTemplate: vm.yml + replicas: 1 + inputVars: + containerImage: {{.NODE_VM_DENSITY_IMAGE}} + nodeSelector: "{{.VM_NODE_SELECTOR}}" + #nodeSelector: "{{.POD_NODE_SELECTOR}}" diff --git a/workloads/kube-burner/workloads/node-vm-density/vm.yml b/workloads/kube-burner/workloads/node-vm-density/vm.yml new file mode 100644 index 00000000..c9e3a4d0 --- /dev/null +++ b/workloads/kube-burner/workloads/node-vm-density/vm.yml @@ -0,0 +1,50 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + labels: + name: {{.JobName}} + name: {{.JobName}}-{{.Iteration}} +spec: + running: true + template: + metadata: + labels: + kubevirt-vm: {{.JobName}}-{{.Iteration}} + spec: + domain: + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + disks: + - disk: + bus: virtio + name: node-vm-density + machine: + type: pc-q35-rhel8.4.0 + resources: + requests: + memory: 256Mi + cpu: 100m + limits: + cpu: 100m + #memory: + #hugepages: + #pageSize: 1Gi + nodeSelector: {{.nodeSelector}} + terminationGracePeriodSeconds: 0 + volumes: + - containerDisk: + image: {{.containerImage}} + name: node-vm-density + - cloudInitNoCloud: + userData: |- + #cloud-config + password: password + chpasswd: { expire: False } + runcmd: + - sed -i -e "s/PasswordAuthentication.*/PasswordAuthentication yes/" /etc/ssh/sshd_config + - systemctl restart sshd + name: cloudinitdisk +status: {} From fdfe8a391ac2a310d916418015a17636c6fc6163 Mon Sep 17 00:00:00 2001 From: Boaz Ben Shabat <64833692+bbenshab@users.noreply.github.com> Date: Thu, 2 Mar 2023 20:10:14 +0200 Subject: [PATCH 2/2] Update README.md added node-vm-density description --- workloads/kube-burner/README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/workloads/kube-burner/README.md b/workloads/kube-burner/README.md index 57856922..26060f5a 100644 --- a/workloads/kube-burner/README.md +++ b/workloads/kube-burner/README.md @@ -85,6 +85,20 @@ Each iteration creates the following objects: - 10 secrets containing 2048 character random string - 10 configMaps containing a 2048 character random string +### node-vm-density variables + +The `node-vm-density`workloads support the following environment variables: + +- **NODE_COUNT**: Number of worker nodes to deploy the pods on. During the workload nodes will be labeled with `node-density=enabled`. Defaults to the number of worker nodes across the cluster (Nodes resulting of the expression `oc get node -o name --no-headers -l node-role.kubernetes.io/workload!="",node-role.kubernetes.io/infra!="",${WORKER_NODE_LABEL}` +- **PODS_PER_NODE**: Define the maximum number of VM to deploy on each labeled node. Defaults to 245 +- **VM_POD_DENSITY_IMAGE**: Image to use as node-vm-density workload. Defaults to `quay.io/kubevirt/fedora-container-disk-images:35`. + +These workloads create different objects each: + +- **node-density**: Creates a single namespace with a number of Deployments proportional to the calculated number of pod. +Each iteration of this workload creates the following object: + - 1 pod. (sleep) + ### Node-density and Node-density-heavy variables The `node-density` and `node-density-heavy` workloads support the following environment variables: