From 25160ff414d2c8b164ebd3fd3e657b673606f684 Mon Sep 17 00:00:00 2001 From: cyclinder Date: Thu, 18 Apr 2024 18:29:18 +0800 Subject: [PATCH] DRA: Fix the latest feedback --- charts/spiderpool/templates/configmap.yaml | 4 +- docs/reference/crd-spiderclaimparameter.md | 8 +- docs/usage/dra.md | 87 +++++-------- docs/usage/dra_zh_CN.md | 36 +----- test/Makefile | 11 +- test/doc/dra.md | 3 +- test/e2e/common/resourceclaim.go | 24 ++++ test/e2e/common/spiderpool.go | 14 +++ test/e2e/dra/dra_test.go | 135 +++++++++++++++++---- 9 files changed, 203 insertions(+), 119 deletions(-) diff --git a/charts/spiderpool/templates/configmap.yaml b/charts/spiderpool/templates/configmap.yaml index 10b74f83a2..5ebd0e48fe 100644 --- a/charts/spiderpool/templates/configmap.yaml +++ b/charts/spiderpool/templates/configmap.yaml @@ -26,12 +26,10 @@ data: {{- else}} clusterSubnetDefaultFlexibleIPNumber: 0 {{- end }} - {{- if .Values.dra.enabled }} dra: - enabled: true + enabled: {{ .Values.dra.enabled }} cdiRootPath: {{ .Values.dra.cdiRootPath }} libraryPath: {{ .Values.dra.libraryPath }} - {{- end }} {{- if .Values.multus.multusCNI.install }} --- kind: ConfigMap diff --git a/docs/reference/crd-spiderclaimparameter.md b/docs/reference/crd-spiderclaimparameter.md index bc608d559e..296c208fb7 100644 --- a/docs/reference/crd-spiderclaimparameter.md +++ b/docs/reference/crd-spiderclaimparameter.md @@ -13,7 +13,10 @@ metadata: annotations: dra.spidernet.io/cdi-version: 0.6.0 spec: - rdmaAcc: false + netResources: + spidernet.io/shared-rdma-device: 1 + ippools: + - pool ``` ## Spidercoordinators definition @@ -30,4 +33,5 @@ This is the Spidercoordinators spec for users to configure. | Field | Description | Schema | Validation | Values | Default | |--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------|-----------------------------------------------|------------------------------| -| rdmaAcc | TODO | bool | optional | true,false | false | +| netResources | Used for device-plugin declaration resources | map[string]string | optional | nil | nil | +ippools | A list of subnets used by the pod for scheduling purposes. | []string | optional | []string{} | empty | diff --git a/docs/usage/dra.md b/docs/usage/dra.md index 0b8e775ce8..2ff39a80d5 100644 --- a/docs/usage/dra.md +++ b/docs/usage/dra.md @@ -5,8 +5,6 @@ Dynamic-Resource-Allocation (DRA) is a new feature introduced by Kubernetes that puts resource scheduling in the hands of third-party developers. It provides an API more akin to a storage persistent volume, instead of the countable model (e.g., "nvidia.com/gpu: 2") that device-plugin used to request access to resources, with the main benefit being a more flexible and dynamic allocation of hardware resources, resulting in improved resource utilization. The main benefit is more flexible and dynamic allocation of hardware resources, which improves resource utilization and enhances resource scheduling, enabling Pods to schedule the best nodes. DRA is currently available as an alpha feature in Kubernetes 1.26 (December 2022 release), driven by Nvidia and Intel. Spiderpool currently integrates with the DRA framework, which allows for the following, but not limited to: -* Enabling RDMA hardware resources. -* Enables the use and scheduling of RDMA hardware resources, mounting key linux so(shared object) files and setting environment variables. * Automatically scheduling Pods to appropriate nodes based on their subnets and NICs to prevent Pods from failing to start after scheduling to a node. * Unify the resource declaration of multiple device-plugins. * Continuously updated, see for details. [RoadMap](../develop/roadmap.md) @@ -65,11 +63,8 @@ Spiderpool currently integrates with the DRA framework, which allows for the fol ``` helm repo add spiderpool https://spidernet-io.github.io/spiderpool helm repo update spiderpool - helm install spiderpool spiderpool/spiderpool --namespace kube-system --set dra.enabled=true \ - --set dra.librarypath="/usr/lib/libtest.so" + helm install spiderpool spiderpool/spiderpool --namespace kube-system --set dra.enabled=true - > Specify the path to the so file via dra.librarypath, which will be mounted to the Pod's container via CDI. Note that this so file needs to exist on the host. - 4. Verify the installation Check that the Spiderpool pod is running correctly, and check for the presence of the resourceclass resource: @@ -131,48 +126,46 @@ Spiderpool currently integrates with the DRA framework, which allows for the fol ~# export NAME=demo apiVersion: spiderpool.spidernet.io/v2beta1 kind: SpiderClaimParameter - metadata. + metadata: name: ${NAME} - metadata: name: ${NAME} - rdmaAcc: true - ---ApiVersion: resource.k8s.io/v1alpha2 + --- apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaimTemplate - metadata: ${NAME} + metadata: name: ${NAME} - spec: ${NAME} - resourceClassName: netresources.k8s.io/valpha2 + spec: + spec: resourceClassName: netresources.spidernet.io - parametersRef: apiGroup: spiderpool.spidernet.io + parametersRef: apiGroup: spiderpool.spidernet.io kind: SpiderClaimParameter name: ${NAME} --- apiVersion: apps/v1 kind: Deployment - name: ${NAME} --- apiVersion: apps/v1 kind: Deployment - name: ${NAME} --- apiVersion: apps/v1 kind: Deployment - spec: replicas: 2 + metadata: + name: ${NAME} + spec: replicas: 2 - selector: ${NAME - matchLabels: app: ${NAME} + selector: + matchLabels: app: ${NAME} - template: ${NAME} - metadata: ${NAME} - annotations: ${NAME} template: metadata. + template: + metadata: + annotations: v1.multus-cni.io/default-network: kube-system/macvlan-conf - labels: app: ${NAME} + labels: app: ${NAME} - spec: ${NAME} - name: ctr: ${NAME} labels: app: ${NAME} + spec: + containers: - name: ctr image: nginx - resources: ${NAME} - claims: name: ${NAME} + resources: + claims: - name: ${NAME} - resourceClaims: name: ${NAME} + resourceClaims: - name: ${NAME} - resourceClaims: name: ${NAME} + source: resourceClaimTemplateName: ${NAME} ``` @@ -190,39 +183,23 @@ Spiderpool currently integrates with the DRA framework, which allows for the fol ``` ~# kubectl get resourceclaim - NAME RESOURCECLASSNAME ALLOCATIONMODE STATE AGE - demo-745fb4c498-72g7g-demo-7d458 netresources.spidernet.io WaitForFirstConsumer allocated,reserved 20d + NAME RESOURCECLASSNAME ALLOCATIONMODE STATE AGE + demo-745fb4c498-72g7g-demo-7d458 netresources.spidernet.io WaitForFirstConsumer allocated,reserved 20d ~# cat /var/run/cdi/k8s.netresources.spidernet.io-claim_1e15705a-62fe-4694-8535-93a5f0ccf996.yaml --- cdiVersion: 0.6.0 containerEdits: {} - devices: {} - - {} devices: {} containerEdits: {} - env: {} devices: containerEdits: {} devices: containerEdits: {} + devices: + - containerEdits: + env: - DRA_CLAIM_UID=1e15705a-62fe-4694-8535-93a5f0ccf996 - - LD_PRELOAD=libtest.so - mounts. - - containerPath: /usr/lib/libtest.so - hostPath: /usr/lib/libtest.so - options: /usr/lib/libtest.so - - /usr/lib/libtest.so options: ro - - nosuid - - nodev - - nodev - - containerPath: /usr/lib64/libtest.so - hostPath: /usr/lib/libtest.so - options: /usr/lib64/libtest.so - - nosuid - - nosuid - - nodev - - bind name: 1e15705a-62fe-4694-8535-93a5f0ccf996 kind: k8s.netresources.spidernet.io/claim ``` This shows that the ResourceClaim has been created, and STATE shows allocated and reserverd, indicating that it has been used by the pod. And spiderpool has generated a CDI file for the ResourceClaim, which describes the files and environment variables to be mounted. - Check that the pod is Running and verify that the so file is mounted and the environment variable (LD_PRELOAD) is declared. + Check that the pod is Running and verify that the the environment variable (DRA_CLAIM_UID) is declared. ``` ~# kubectl get po @@ -230,13 +207,11 @@ Spiderpool currently integrates with the DRA framework, which allows for the fol nginx-745fb4c498-72g7g 1/1 Running 0 20m nginx-745fb4c498-s92qr 1/1 Running 0 20m ~# kubectl exec -it nginx-745fb4c498-72g7g sh - ~# ls /usr/lib/libtest.so - /usr/lib/libtest.so - ~# printenv LD_PRELOAD - libtest.so + ~# printenv DRA_CLAIM_UID + 1e15705a-62fe-4694-8535-93a5f0ccf996 ``` - You can see that the Pod's containers have correctly mounted the so files and environment variables, and your containers are ready to use the so files you have mounted. + You can see that the Pod's containers have correctly declared environment variables, It shows the dra is works. ## Welcome to try it out diff --git a/docs/usage/dra_zh_CN.md b/docs/usage/dra_zh_CN.md index 55729f7429..8c98f2ced6 100644 --- a/docs/usage/dra_zh_CN.md +++ b/docs/usage/dra_zh_CN.md @@ -6,7 +6,6 @@ 目前 Spiderpool 已经集成 DRA 框架,基于该功能可实现以下但不限于的能力: -* 实现 RDMA 硬件资源的使用和调度,挂载关键 linux so(shared object) 文件及设置环境变量 * 可根据 Pod 使用的子网和网卡信息,自动调度到合适的节点,避免 Pod 调度到节点之后无法启动 * 统一多个 device-plugin 的资源声明方式 * 持续更新, 详见 [RoadMap](../develop/roadmap.md) @@ -68,12 +67,9 @@ helm repo update spiderpool - helm install spiderpool spiderpool/spiderpool --namespace kube-system --set dra.enabled=true \ - --set dra.librarypath="/usr/lib/libtest.so" + helm install spiderpool spiderpool/spiderpool --namespace kube-system --set dra.enabled=true ``` - > 通过 dra.librarypath 指定 so 文件的路径,这将会通过 CDI 挂载到 Pod 的容器中. 注意此 so 文件需要存在于主机上。 - 4. 验证安装 检查 Spiderpool pod 是否正常 running, 并检查是否存在 resourceclass 资源: @@ -137,8 +133,6 @@ kind: SpiderClaimParameter metadata: name: ${NAME} - spec: - rdmaAcc: true --- apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaimTemplate @@ -182,7 +176,7 @@ > 创建一个 ResourceClaimTemplate, K8s 将会根据这个 ResourceClaimTemplate 为每个 Pod 创建自己独有的 Resourceclaim。该 Resourceclaim 的声明周期与该 Pod保持一致。 > - > SpiderClaimParameter 用于扩展 ResourceClaim 的配置参数,将会影响 ResourceClaim 的调度以及其 CDI 文件的生成。本例子中,设置 rdmaAcc 为 true,将会影响是否挂载配置的 so 文件。 + > SpiderClaimParameter 用于扩展 ResourceClaim 的配置参数,将会影响 ResourceClaim 的调度以及其 CDI 文件的生成。 > > Pod 的 container 通过在 Resources 中声明 claims 的使用,这将影响 containerd 所需要的资源。容器运行时会将该 claim 对应的 CDI 文件翻译为 OCI Spec配置,从而决定container的创建。 > @@ -204,29 +198,13 @@ - containerEdits: env: - DRA_CLAIM_UID=1e15705a-62fe-4694-8535-93a5f0ccf996 - - LD_PRELOAD=libtest.so - mounts: - - containerPath: /usr/lib/libtest.so - hostPath: /usr/lib/libtest.so - options: - - ro - - nosuid - - nodev - - bind - - containerPath: /usr/lib64/libtest.so - hostPath: /usr/lib/libtest.so - options: - - ro - - nosuid - - nodev - - bind name: 1e15705a-62fe-4694-8535-93a5f0ccf996 kind: k8s.netresources.spidernet.io/claim ``` 这里显示 ResourceClaim 已经被创建,并且 STATE 显示 allocated 和 reserverd,说明已经被 pod 使用。并且 spiderpool 已经为该 ResourceClaim 生成了对应的 CDI 文件。CDI 文件描述了需要挂载的文件和环境变量等。 - 检查 Pod 是否 Running,并且验证是否挂载 so 文件以及声明环境变量(LD_PRELOAD): + 检查 Pod 是否 Running,并且验证 Pod 是否指定了环境变量 `DRA_CLAIM_UID`: ``` ~# kubectl get po @@ -234,13 +212,11 @@ nginx-745fb4c498-72g7g 1/1 Running 0 20m nginx-745fb4c498-s92qr 1/1 Running 0 20m ~# kubectl exec -it nginx-745fb4c498-72g7g sh - ~# ls /usr/lib/libtest.so - /usr/lib/libtest.so - ~# printenv LD_PRELOAD - libtest.so + ~# printenv DRA_CLAIM_UID + 1e15705a-62fe-4694-8535-93a5f0ccf996 ``` - 可以看到 Pod 的容器已经正确挂载 so 文件和环境变量,您的容器已经可以正常使用你挂载的 so 文件。 + 可以看到 Pod 的容器已经正确写入环境变量,说明 DRA 工作正常。 ## 欢迎试用 diff --git a/test/Makefile b/test/Makefile index 433fba1032..4b48a30fc3 100644 --- a/test/Makefile +++ b/test/Makefile @@ -162,8 +162,8 @@ setup_kind: if [ "${E2E_SPIDERPOOL_ENABLE_DRA}" == "true" ]; then \ sed -i '$$ a\ DynamicResourceAllocation: true' $${NEW_KIND_YAML} ; \ printf 'containerdConfigPatches: \n# Enable CDI as described in https://tags.cncf.io/container-device-interface#containerd-configuration\n- |-\n [plugins."io.containerd.grpc.v1.cri"]\n enable_cdi = true\n ' >> $${NEW_KIND_YAML} ; \ - fi \ - fi + fi ;\ + fi ; \ $(QUIET) cat $(CLUSTER_DIR)/$(E2E_CLUSTER_NAME)/kind-config.yaml ; \ echo "-------------" ; \ KIND_OPTION="" ; \ @@ -468,9 +468,12 @@ e2e_test: export INSTALL_OVERLAY_CNI=$(INSTALL_OVERLAY_CNI) ; \ export E2E_SPIDERSUBNET_ENABLED=$(E2E_SPIDERPOOL_ENABLE_SUBNET) ; \ K8S_VERSION=` kubectl version -o json --kubeconfig $(E2E_KUBECONFIG) | jq '.serverVersion.gitVersion' ` ; \ - echo "k8s version: $${K8S_VERSION}" ; \ - if [ $$(echo -e "$${K8S_VERSION}\nv1.29.0" | sort -V | head -n1) == "v1.29.0" ] && [ "${E2E_SPIDERPOOL_ENABLE_DRA}" == "true" ]; then \ + if [ $$(echo -e "$${K8S_VERSION}\nv1.29.0" | sort -V | head -n1) = "v1.29.0" ] && [ "${E2E_SPIDERPOOL_ENABLE_DRA}" == "true" ]; then \ + echo "k8s version: $${K8S_VERSION}" ; \ export E2E_SPIDERPOOL_ENABLE_DRA=true ; \ + else \ + echo "k8s version1: $${K8S_VERSION}" ; \ + export E2E_SPIDERPOOL_ENABLE_DRA=false ; \ fi ; \ rm -f $(E2E_LOG_FILE) || true ; \ echo "=========== before test `date` ===========" >> $(E2E_LOG_FILE) ; \ diff --git a/test/doc/dra.md b/test/doc/dra.md index 889b5c9d25..ffbc23c8b8 100644 --- a/test/doc/dra.md +++ b/test/doc/dra.md @@ -2,4 +2,5 @@ | Case ID | Title | Priority | Smoke | Status | Other | | ------- | --------------------------------------------------------------------------------- | -------- | ----- | ------ | ----- | -| Q00001 | Creating a Pod to verify DRA if works | p1 | true | done | | +| Q00001 | Creating a Pod to verify DRA if works while set rdmaAcc to true | p1 | true | done | | +| Q00002 | Creating a Pod to verify DRA if works while set rdmaAcc to false | p1 | true | done | | diff --git a/test/e2e/common/resourceclaim.go b/test/e2e/common/resourceclaim.go index 00ce049a1a..bed7206c3a 100644 --- a/test/e2e/common/resourceclaim.go +++ b/test/e2e/common/resourceclaim.go @@ -10,9 +10,33 @@ import ( resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apitypes "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) +func ListResourceClaim(f *frame.Framework, opts ...client.ListOption) (*resourcev1alpha2.ResourceClaimList, error) { + list := resourcev1alpha2.ResourceClaimList{} + if err := f.ListResource(&list, opts...); err != nil { + return nil, err + } + + return &list, nil +} + +func GetResourceClaim(f *frame.Framework, name, ns string) (*resourcev1alpha2.ResourceClaim, error) { + if name == "" || f == nil { + return nil, errors.New("wrong input") + } + + v := apitypes.NamespacedName{Name: name, Namespace: ns} + existing := &resourcev1alpha2.ResourceClaim{} + e := f.GetResource(v, existing) + if e != nil { + return nil, e + } + return existing, nil +} + func CreateResourceClaimTemplate(f *frame.Framework, rct *resourcev1alpha2.ResourceClaimTemplate, opts ...client.CreateOption) error { if f == nil || rct == nil { return fmt.Errorf("invalid parameters") diff --git a/test/e2e/common/spiderpool.go b/test/e2e/common/spiderpool.go index 17cd3d4a59..7b30285db7 100644 --- a/test/e2e/common/spiderpool.go +++ b/test/e2e/common/spiderpool.go @@ -45,6 +45,20 @@ type SpiderConfigMap struct { ClusterSubnetDefaultFlexibleIPNum int `yaml:"clusterSubnetDefaultFlexibleIPNumber"` } +func GetSpiderClaimParameter(f *frame.Framework, name, ns string) (*v1.SpiderClaimParameter, error) { + if name == "" || f == nil { + return nil, errors.New("wrong input") + } + + v := apitypes.NamespacedName{Name: name, Namespace: ns} + existing := &v1.SpiderClaimParameter{} + e := f.GetResource(v, existing) + if e != nil { + return nil, e + } + return existing, nil +} + func CreateSpiderClaimParameter(f *frame.Framework, scp *v1.SpiderClaimParameter, opts ...client.CreateOption) error { if f == nil || scp == nil { return fmt.Errorf("invalid parameters") diff --git a/test/e2e/dra/dra_test.go b/test/e2e/dra/dra_test.go index f852fa399d..070510702f 100644 --- a/test/e2e/dra/dra_test.go +++ b/test/e2e/dra/dra_test.go @@ -83,6 +83,29 @@ var _ = Describe("dra", Label("dra"), func() { } Expect(frame.CreateSpiderMultusInstance(nad)).NotTo(HaveOccurred()) + DeferCleanup(func() { + GinkgoWriter.Printf("delete spiderMultusConfig %v/%v. \n", namespace, multusNadName) + //Expect(frame.DeleteSpiderMultusInstance(namespace, multusNadName)).NotTo(HaveOccurred()) + + GinkgoWriter.Printf("delete namespace %v. \n", namespace) + //Expect(frame.DeleteNamespace(namespace)).NotTo(HaveOccurred()) + + if frame.Info.IpV4Enabled { + GinkgoWriter.Printf("delete v4 ippool %v. \n", v4PoolName) + //Expect(common.DeleteIPPoolByName(frame, v4PoolName)).NotTo(HaveOccurred()) + } + if frame.Info.IpV6Enabled { + GinkgoWriter.Printf("delete v6 ippool %v. \n", v6PoolName) + //Expect(common.DeleteIPPoolByName(frame, v6PoolName)).NotTo(HaveOccurred()) + } + + //Expect( + // common.DeleteSpiderClaimParameter(frame, spiderClaimName, namespace), + //).NotTo(HaveOccurred()) + }) + }) + + It("Creating a Pod to verify DRA if works while set rdmaAcc to true", Label("Q00001"), func() { Expect(common.CreateSpiderClaimParameter(frame, &spiderpoolv2beta1.SpiderClaimParameter{ ObjectMeta: metav1.ObjectMeta{ Name: spiderClaimName, @@ -99,29 +122,6 @@ var _ = Describe("dra", Label("dra"), func() { }, })).NotTo(HaveOccurred()) - DeferCleanup(func() { - GinkgoWriter.Printf("delete spiderMultusConfig %v/%v. \n", namespace, multusNadName) - Expect(frame.DeleteSpiderMultusInstance(namespace, multusNadName)).NotTo(HaveOccurred()) - - GinkgoWriter.Printf("delete namespace %v. \n", namespace) - Expect(frame.DeleteNamespace(namespace)).NotTo(HaveOccurred()) - - if frame.Info.IpV4Enabled { - GinkgoWriter.Printf("delete v4 ippool %v. \n", v4PoolName) - Expect(common.DeleteIPPoolByName(frame, v4PoolName)).NotTo(HaveOccurred()) - } - if frame.Info.IpV6Enabled { - GinkgoWriter.Printf("delete v6 ippool %v. \n", v6PoolName) - Expect(common.DeleteIPPoolByName(frame, v6PoolName)).NotTo(HaveOccurred()) - } - - Expect( - common.DeleteSpiderClaimParameter(frame, spiderClaimName, namespace), - ).NotTo(HaveOccurred()) - }) - }) - - It("Creating a Pod to verify DRA if works", Label("Q00001"), func() { // create resourceclaimtemplate Expect( common.CreateResourceClaimTemplate(frame, &v1alpha2.ResourceClaimTemplate{ @@ -209,5 +209,94 @@ var _ = Describe("dra", Label("dra"), func() { Expect(string(executeCommandResult)).To(Equal(soBaseName), "unexpected result: %s", executeCommandResult) } }) + + It("Creating a Pod to verify DRA if works while set rdmaAcc to false", Label("Q00002"), func() { + Expect(common.CreateSpiderClaimParameter(frame, &spiderpoolv2beta1.SpiderClaimParameter{ + ObjectMeta: metav1.ObjectMeta{ + Name: spiderClaimName, + Namespace: namespace, + // kind k8s v1.29.0 -> use containerd v1.7.1 -> use cdi version(v0.5.4) + // v0.5.4 don't support CDISpec version 0.6.0, so update the cdi version + // by the annotation + Annotations: map[string]string{ + constant.AnnoDraCdiVersion: "0.5.0", + }, + }, + Spec: spiderpoolv2beta1.ClaimParameterSpec{ + RdmaAcc: false, + }, + })).NotTo(HaveOccurred()) + + // create resourceclaimtemplate + Expect( + common.CreateResourceClaimTemplate(frame, &v1alpha2.ResourceClaimTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: spiderClaimName, + Namespace: namespace, + }, + Spec: v1alpha2.ResourceClaimTemplateSpec{ + Spec: v1alpha2.ResourceClaimSpec{ + ResourceClassName: constant.DRADriverName, + ParametersRef: &v1alpha2.ResourceClaimParametersReference{ + APIGroup: constant.SpiderpoolAPIGroup, + Kind: constant.KindSpiderClaimParameter, + Name: spiderClaimName, + }, + }, + }, + })).NotTo(HaveOccurred()) + + podIppoolsAnno := types.AnnoPodIPPoolsValue{ + types.AnnoIPPoolItem{ + NIC: common.NIC1, + }, + types.AnnoIPPoolItem{ + NIC: common.NIC2, + }, + } + if frame.Info.IpV4Enabled { + podIppoolsAnno[0].IPv4Pools = []string{common.SpiderPoolIPv4PoolDefault} + podIppoolsAnno[1].IPv4Pools = []string{v4PoolName} + } + if frame.Info.IpV6Enabled { + podIppoolsAnno[0].IPv6Pools = []string{common.SpiderPoolIPv6PoolDefault} + podIppoolsAnno[1].IPv6Pools = []string{v6PoolName} + } + podAnnoMarshal, err := json.Marshal(podIppoolsAnno) + Expect(err).NotTo(HaveOccurred()) + var annotations = make(map[string]string) + annotations[common.MultusNetworks] = fmt.Sprintf("%s/%s", namespace, multusNadName) + annotations[constant.AnnoPodIPPools] = string(podAnnoMarshal) + deployObject := common.GenerateDraDeploymentYaml(depName, spiderClaimName, namespace, int32(1)) + deployObject.Spec.Template.Annotations = annotations + Expect(frame.CreateDeployment(deployObject)).NotTo(HaveOccurred()) + + ctx, cancel := context.WithTimeout(context.Background(), common.PodStartTimeout) + defer cancel() + depObject, err := frame.WaitDeploymentReady(depName, namespace, ctx) + Expect(err).NotTo(HaveOccurred(), "waiting for deploy ready failed: %v ", err) + podList, err := frame.GetPodListByLabel(depObject.Spec.Template.Labels) + Expect(err).NotTo(HaveOccurred(), "failed to get podList: %v ", err) + + rcList, err := common.ListResourceClaim(frame) + Expect(err).NotTo(HaveOccurred(), "failed to get resourceclaim list: %v ", err) + + resourceClaimUidMap := make(map[string]struct{}, len(rcList.Items)) + for _, rc := range rcList.Items { + resourceClaimUidMap[string(rc.ObjectMeta.UID)] = struct{}{} + } + + GinkgoWriter.Printf("resourceClaimUidMap: %v\n", resourceClaimUidMap) + var executeCommandResult []byte + for _, pod := range podList.Items { + checkEnvComand := "printenv DRA_CLAIM_UID" + executeCommandResult, err = frame.ExecCommandInPod(pod.Name, pod.Namespace, checkEnvComand, ctx) + Expect(err).NotTo(HaveOccurred(), "failed to check the value of env DRA_CLAIM_UID: %v", err) + + executeCommandResult = bytes.TrimSuffix(executeCommandResult, []byte("\n")) + _, ok := resourceClaimUidMap[string(executeCommandResult)] + Expect(ok).To(BeTrue(), "the value of DRA_CLAIM_UID is not match any resourceclaim'uid.") + } + }) }) })