From 58b0b16e58447b334c497b49b023a585b9a1bfc3 Mon Sep 17 00:00:00 2001 From: Zespre Chang Date: Sun, 21 Jan 2024 23:59:37 +0800 Subject: [PATCH 1/3] test: add ippool tests Signed-off-by: Zespre Chang --- go.mod | 3 + pkg/controller/ippool/common.go | 253 ++++++++++++++++++ pkg/controller/ippool/controller.go | 134 +--------- pkg/controller/ippool/controller_test.go | 219 +++++++++++++++ pkg/util/fakeclient/ippool.go | 64 +++++ .../fakeclient/networkattachmentdefinition.go | 61 +++++ pkg/util/fakeclient/pod.go | 64 +++++ 7 files changed, 665 insertions(+), 133 deletions(-) create mode 100644 pkg/controller/ippool/common.go create mode 100644 pkg/controller/ippool/controller_test.go create mode 100644 pkg/util/fakeclient/ippool.go create mode 100644 pkg/util/fakeclient/networkattachmentdefinition.go create mode 100644 pkg/util/fakeclient/pod.go diff --git a/go.mod b/go.mod index e1209a6..fa084e9 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/rancher/wrangler v1.1.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.4 golang.org/x/sync v0.3.0 k8s.io/api v0.27.9 k8s.io/apiextensions-apiserver v0.27.9 @@ -26,6 +27,8 @@ require ( kubevirt.io/api v1.1.0 ) +require github.com/pmezard/go-difflib v1.0.0 // indirect + require ( github.com/gorilla/mux v1.8.0 github.com/rancher/dynamiclistener v0.3.5 // indirect diff --git a/pkg/controller/ippool/common.go b/pkg/controller/ippool/common.go new file mode 100644 index 0000000..bb1df99 --- /dev/null +++ b/pkg/controller/ippool/common.go @@ -0,0 +1,253 @@ +package ippool + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/rancher/wrangler/pkg/kv" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io" + networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" + "github.com/harvester/vm-dhcp-controller/pkg/config" +) + +func prepareAgentPod( + ipPool *networkv1.IPPool, + noDHCP bool, + agentNamespace string, + clusterNetwork string, + agentServiceAccountName string, + agentImage *config.Image, +) *corev1.Pod { + name := fmt.Sprintf("%s-%s-agent", ipPool.Namespace, ipPool.Name) + + nadNamespace, nadName := kv.RSplit(ipPool.Spec.NetworkName, "/") + networks := []Network{ + { + Namespace: nadNamespace, + Name: nadName, + InterfaceName: "eth1", + }, + } + networksStr, _ := json.Marshal(networks) + + _, ipNet, _ := net.ParseCIDR(ipPool.Spec.IPv4Config.CIDR) + prefixLength, _ := ipNet.Mask.Size() + + args := []string{ + "--ippool-ref", + fmt.Sprintf("%s/%s", ipPool.Namespace, ipPool.Name), + } + if noDHCP { + args = append(args, "--dry-run") + } + + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + multusNetworksAnnotationKey: string(networksStr), + }, + Labels: map[string]string{ + vmDHCPControllerLabelKey: "agent", + ipPoolNamespaceLabelKey: ipPool.Namespace, + ipPoolNameLabelKey: ipPool.Name, + }, + Name: name, + Namespace: agentNamespace, + }, + Spec: corev1.PodSpec{ + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: network.GroupName + "/" + clusterNetwork, + Operator: corev1.NodeSelectorOpIn, + Values: []string{ + "true", + }, + }, + }, + }, + }, + }, + }, + }, + ServiceAccountName: agentServiceAccountName, + InitContainers: []corev1.Container{ + { + Name: "ip-setter", + Image: "busybox", + Command: []string{ + "/bin/sh", + "-c", + fmt.Sprintf(setIPAddrScript, ipPool.Spec.IPv4Config.ServerIP, prefixLength), + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &runAsUserID, + RunAsGroup: &runAsGroupID, + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_ADMIN", + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "agent", + Image: agentImage.String(), + Args: args, + Env: []corev1.EnvVar{ + { + Name: "VM_DHCP_AGENT_NAME", + Value: name, + }, + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &runAsUserID, + RunAsGroup: &runAsGroupID, + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_ADMIN", + }, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(8080), + }, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/readyz", + Port: intstr.FromInt(8080), + }, + }, + }, + }, + }, + }, + } +} + +func setRegisteredCondition(ipPool *networkv1.IPPool, status corev1.ConditionStatus, reason, message string) { + networkv1.Registered.SetStatus(ipPool, string(status)) + networkv1.Registered.Reason(ipPool, reason) + networkv1.Registered.Message(ipPool, message) +} + +func setCacheReadyCondition(ipPool *networkv1.IPPool, status corev1.ConditionStatus, reason, message string) { + networkv1.CacheReady.SetStatus(ipPool, string(status)) + networkv1.CacheReady.Reason(ipPool, reason) + networkv1.CacheReady.Message(ipPool, message) +} + +func setAgentReadyCondition(ipPool *networkv1.IPPool, status corev1.ConditionStatus, reason, message string) { + networkv1.AgentReady.SetStatus(ipPool, string(status)) + networkv1.AgentReady.Reason(ipPool, reason) + networkv1.AgentReady.Message(ipPool, message) +} + +func setDisabledCondition(ipPool *networkv1.IPPool, status corev1.ConditionStatus, reason, message string) { + networkv1.Disabled.SetStatus(ipPool, string(status)) + networkv1.Disabled.Reason(ipPool, reason) + networkv1.Disabled.Message(ipPool, message) +} + +type ipPoolBuilder struct { + ipPool *networkv1.IPPool +} + +func newIPPoolBuilder(namespace, name string) *ipPoolBuilder { + return &ipPoolBuilder{ + ipPool: &networkv1.IPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, + } +} + +func (b *ipPoolBuilder) NetworkName(networkName string) *ipPoolBuilder { + b.ipPool.Spec.NetworkName = networkName + return b +} + +func (b *ipPoolBuilder) Paused() *ipPoolBuilder { + paused := true + b.ipPool.Spec.Paused = &paused + return b +} + +func (b *ipPoolBuilder) UnPaused() *ipPoolBuilder { + paused := false + b.ipPool.Spec.Paused = &paused + return b +} + +func (b *ipPoolBuilder) ServerIP(serverIP string) *ipPoolBuilder { + b.ipPool.Spec.IPv4Config.ServerIP = serverIP + return b +} + +func (b *ipPoolBuilder) CIDR(cidr string) *ipPoolBuilder { + b.ipPool.Spec.IPv4Config.CIDR = cidr + return b +} + +func (b *ipPoolBuilder) PoolRange(start, end string) *ipPoolBuilder { + b.ipPool.Spec.IPv4Config.Pool.Start = start + b.ipPool.Spec.IPv4Config.Pool.End = end + return b +} + +func (b *ipPoolBuilder) Exclude(exclude []string) *ipPoolBuilder { + b.ipPool.Spec.IPv4Config.Pool.Exclude = exclude + return b +} + +func (b *ipPoolBuilder) AgentPodRef(namespace, name string) *ipPoolBuilder { + if b.ipPool.Status.AgentPodRef == nil { + b.ipPool.Status.AgentPodRef = new(networkv1.PodReference) + } + b.ipPool.Status.AgentPodRef.Namespace = namespace + b.ipPool.Status.AgentPodRef.Name = name + return b +} + +func (b *ipPoolBuilder) RegisteredCondition(status corev1.ConditionStatus, reason, message string) *ipPoolBuilder { + setRegisteredCondition(b.ipPool, status, reason, message) + return b +} + +func (b *ipPoolBuilder) CacheReadyCondition(status corev1.ConditionStatus, reason, message string) *ipPoolBuilder { + setCacheReadyCondition(b.ipPool, status, reason, message) + return b +} + +func (b *ipPoolBuilder) AgentReadyCondition(status corev1.ConditionStatus, reason, message string) *ipPoolBuilder { + setAgentReadyCondition(b.ipPool, status, reason, message) + return b +} + +func (b *ipPoolBuilder) DisabledCondition(status corev1.ConditionStatus, reason, message string) *ipPoolBuilder { + setDisabledCondition(b.ipPool, status, reason, message) + return b +} + +func (p *ipPoolBuilder) Build() *networkv1.IPPool { + return p.ipPool +} diff --git a/pkg/controller/ippool/controller.go b/pkg/controller/ippool/controller.go index e7bdc95..ad3c446 100644 --- a/pkg/controller/ippool/controller.go +++ b/pkg/controller/ippool/controller.go @@ -2,9 +2,7 @@ package ippool import ( "context" - "encoding/json" "fmt" - "net" "reflect" "github.com/rancher/wrangler/pkg/kv" @@ -15,7 +13,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io" networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" @@ -283,10 +280,7 @@ func (h *Handler) DeployAgent(ipPool *networkv1.IPPool, status networkv1.IPPoolS return status, fmt.Errorf("could not find clusternetwork for nad %s", ipPool.Spec.NetworkName) } - agent, err := h.prepareAgentPod(ipPool, clusterNetwork) - if err != nil { - return status, err - } + agent := prepareAgentPod(ipPool, h.noDHCP, h.agentNamespace, clusterNetwork, h.agentServiceAccountName, h.agentImage) agentPod, err := h.podClient.Create(agent) if err != nil { @@ -388,132 +382,6 @@ func (h *Handler) MonitorAgent(ipPool *networkv1.IPPool, status networkv1.IPPool return status, nil } -func (h *Handler) prepareAgentPod(ipPool *networkv1.IPPool, clusterNetwork string) (*corev1.Pod, error) { - name := fmt.Sprintf("%s-%s-agent", ipPool.Namespace, ipPool.Name) - - nadNamespace, nadName := kv.RSplit(ipPool.Spec.NetworkName, "/") - networks := []Network{ - { - Namespace: nadNamespace, - Name: nadName, - InterfaceName: "eth1", - }, - } - networksStr, err := json.Marshal(networks) - if err != nil { - return nil, err - } - - _, ipNet, err := net.ParseCIDR(ipPool.Spec.IPv4Config.CIDR) - if err != nil { - return nil, err - } - prefixLength, _ := ipNet.Mask.Size() - - args := []string{ - "--ippool-ref", - fmt.Sprintf("%s/%s", ipPool.Namespace, ipPool.Name), - } - if h.noDHCP { - args = append(args, "--dry-run") - } - - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - multusNetworksAnnotationKey: string(networksStr), - }, - Labels: map[string]string{ - vmDHCPControllerLabelKey: "agent", - ipPoolNamespaceLabelKey: ipPool.Namespace, - ipPoolNameLabelKey: ipPool.Name, - }, - Name: name, - Namespace: h.agentNamespace, - }, - Spec: corev1.PodSpec{ - Affinity: &corev1.Affinity{ - NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: network.GroupName + "/" + clusterNetwork, - Operator: corev1.NodeSelectorOpIn, - Values: []string{ - "true", - }, - }, - }, - }, - }, - }, - }, - }, - ServiceAccountName: h.agentServiceAccountName, - InitContainers: []corev1.Container{ - { - Name: "ip-setter", - Image: "busybox", - Command: []string{ - "/bin/sh", - "-c", - fmt.Sprintf(setIPAddrScript, ipPool.Spec.IPv4Config.ServerIP, prefixLength), - }, - SecurityContext: &corev1.SecurityContext{ - RunAsUser: &runAsUserID, - RunAsGroup: &runAsGroupID, - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{ - "NET_ADMIN", - }, - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "agent", - Image: h.agentImage.String(), - Args: args, - Env: []corev1.EnvVar{ - { - Name: "VM_DHCP_AGENT_NAME", - Value: name, - }, - }, - SecurityContext: &corev1.SecurityContext{ - RunAsUser: &runAsUserID, - RunAsGroup: &runAsGroupID, - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{ - "NET_ADMIN", - }, - }, - }, - LivenessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/healthz", - Port: intstr.FromInt(8080), - }, - }, - }, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/readyz", - Port: intstr.FromInt(8080), - }, - }, - }, - }, - }, - }, - }, nil -} - func isPodReady(pod *corev1.Pod) bool { for _, c := range pod.Status.Conditions { if c.Type == corev1.PodReady { diff --git a/pkg/controller/ippool/controller_test.go b/pkg/controller/ippool/controller_test.go new file mode 100644 index 0000000..75dc934 --- /dev/null +++ b/pkg/controller/ippool/controller_test.go @@ -0,0 +1,219 @@ +package ippool + +import ( + "testing" + + cniv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/rancher/wrangler/pkg/genericcondition" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + k8sfake "k8s.io/client-go/kubernetes/fake" + + networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" + "github.com/harvester/vm-dhcp-controller/pkg/cache" + "github.com/harvester/vm-dhcp-controller/pkg/config" + "github.com/harvester/vm-dhcp-controller/pkg/generated/clientset/versioned/fake" + "github.com/harvester/vm-dhcp-controller/pkg/ipam" + "github.com/harvester/vm-dhcp-controller/pkg/metrics" + "github.com/harvester/vm-dhcp-controller/pkg/util/fakeclient" +) + +func TestHandler_OnChange(t *testing.T) { + type input struct { + key string + ipPool *networkv1.IPPool + pods []*corev1.Pod + } + + type output struct { + ipPool *networkv1.IPPool + pods []*corev1.Pod + err error + } + + testCases := []struct { + name string + given input + expected output + }{ + { + name: "pause ippool", + given: input{ + key: "default/ippool-1", + ipPool: newIPPoolBuilder("default", "ippool-1"). + Paused(). + AgentPodRef("default", "default-ippool-1-agent"). + Build(), + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "default-ippool-1-agent", + }, + }, + }, + }, + expected: output{ + ipPool: newIPPoolBuilder("default", "ippool-1"). + Paused(). + DisabledCondition(corev1.ConditionTrue, "", ""). + Build(), + }, + }, + } + + for _, tc := range testCases { + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Add(tc.given.ipPool) + if err != nil { + t.Fatal(err) + } + + var pods []runtime.Object + for _, pod := range tc.given.pods { + pods = append(pods, pod) + } + k8sclientset := k8sfake.NewSimpleClientset(pods...) + handler := Handler{ + agentNamespace: "default", + agentImage: &config.Image{ + Repository: "rancher/harvester-vm-dhcp-controller", + Tag: "main", + }, + cacheAllocator: cache.New(), + ipAllocator: ipam.New(), + metricsAllocator: metrics.New(), + ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), + podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), + } + + var actual output + + actual.ipPool, actual.err = handler.OnChange(tc.given.key, tc.given.ipPool) + assert.Nil(t, actual.err) + + emptyConditionsTimestamp(tc.expected.ipPool.Status.Conditions) + emptyConditionsTimestamp(actual.ipPool.Status.Conditions) + assert.Equal(t, tc.expected.ipPool, actual.ipPool, tc.name) + + assert.Equal(t, tc.expected.pods, actual.pods) + } +} + +func TestHandler_DeployAgent(t *testing.T) { + type input struct { + key string + ipPool *networkv1.IPPool + nad *cniv1.NetworkAttachmentDefinition + } + + type output struct { + ipPoolStatus networkv1.IPPoolStatus + pod *corev1.Pod + err error + } + + testCases := []struct { + name string + given input + expected output + }{ + { + name: "resume ippool", + given: input{ + key: "default/ippool-1", + ipPool: newIPPoolBuilder("default", "ippool-1"). + ServerIP("192.168.0.2"). + CIDR("192.168.0.0/24"). + NetworkName("default/net-1"). + Build(), + nad: &cniv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + clusterNetworkLabelKey: "provider", + }, + Namespace: "default", + Name: "net-1", + }, + }, + }, + expected: output{ + ipPoolStatus: networkv1.IPPoolStatus{ + AgentPodRef: &networkv1.PodReference{ + Namespace: "default", + Name: "default-ippool-1-agent", + }, + }, + pod: prepareAgentPod( + newIPPoolBuilder("default", "ippool-1"). + ServerIP("192.168.0.2"). + CIDR("192.168.0.0/24"). + NetworkName("default/net-1"). + Build(), + false, + "default", + "provider", + "vdca", + &config.Image{ + Repository: "rancher/harvester-vm-dhcp-controller", + Tag: "main", + }, + ), + }, + }, + } + + nadGVR := schema.GroupVersionResource{ + Group: "k8s.cni.cncf.io", + Version: "v1", + Resource: "network-attachment-definitions", + } + + for _, tc := range testCases { + clientset := fake.NewSimpleClientset(tc.given.ipPool) + if tc.given.nad != nil { + err := clientset.Tracker().Create(nadGVR, tc.given.nad, tc.given.nad.Namespace) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + } + + k8sclientset := k8sfake.NewSimpleClientset() + + handler := Handler{ + agentNamespace: "default", + agentImage: &config.Image{ + Repository: "rancher/harvester-vm-dhcp-controller", + Tag: "main", + }, + agentServiceAccountName: "vdca", + cacheAllocator: cache.New(), + ipAllocator: ipam.New(), + metricsAllocator: metrics.New(), + ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), + nadCache: fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions), + podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), + } + + var actual output + + actual.ipPoolStatus, actual.err = handler.DeployAgent(tc.given.ipPool, tc.given.ipPool.Status) + assert.Nil(t, actual.err) + + emptyConditionsTimestamp(tc.expected.ipPoolStatus.Conditions) + emptyConditionsTimestamp(actual.ipPoolStatus.Conditions) + assert.Equal(t, tc.expected.ipPoolStatus, actual.ipPoolStatus, tc.name) + + actual.pod, actual.err = handler.podClient.Get("default", "default-ippool-1-agent", metav1.GetOptions{}) + assert.Nil(t, actual.err) + assert.Equal(t, tc.expected.pod, actual.pod) + } +} + +func emptyConditionsTimestamp(conditions []genericcondition.GenericCondition) { + for i := range conditions { + conditions[i].LastTransitionTime = "" + conditions[i].LastUpdateTime = "" + } +} diff --git a/pkg/util/fakeclient/ippool.go b/pkg/util/fakeclient/ippool.go new file mode 100644 index 0000000..8e2a7e5 --- /dev/null +++ b/pkg/util/fakeclient/ippool.go @@ -0,0 +1,64 @@ +package fakeclient + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + + networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" + typenetworkv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/clientset/versioned/typed/network.harvesterhci.io/v1alpha1" + ctlnetworkv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/network.harvesterhci.io/v1alpha1" +) + +type IPPoolClient func(string) typenetworkv1.IPPoolInterface + +func (c IPPoolClient) Update(ipPool *networkv1.IPPool) (*networkv1.IPPool, error) { + return c(ipPool.Namespace).Update(context.TODO(), ipPool, metav1.UpdateOptions{}) +} +func (c IPPoolClient) Get(namespace, name string, options metav1.GetOptions) (*networkv1.IPPool, error) { + panic("implement me") +} +func (c IPPoolClient) Create(*networkv1.IPPool) (*networkv1.IPPool, error) { + panic("implement me") +} +func (c IPPoolClient) Delete(namespace, name string, options *metav1.DeleteOptions) error { + panic("implement me") +} +func (c IPPoolClient) List(namespace string, opts metav1.ListOptions) (*networkv1.IPPoolList, error) { + panic("implement me") +} +func (c IPPoolClient) UpdateStatus(ipPool *networkv1.IPPool) (*networkv1.IPPool, error) { + return c(ipPool.Namespace).UpdateStatus(context.TODO(), ipPool, metav1.UpdateOptions{}) +} +func (c IPPoolClient) Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error) { + panic("implement me") +} +func (c IPPoolClient) Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (result *networkv1.IPPool, err error) { + panic("implement me") +} + +type IPPoolCache func(string) typenetworkv1.IPPoolInterface + +func (c IPPoolCache) Get(namespace, name string) (*networkv1.IPPool, error) { + return c(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} +func (c IPPoolCache) List(namespace string, selector labels.Selector) ([]*networkv1.IPPool, error) { + list, err := c(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + result := make([]*networkv1.IPPool, 0, len(list.Items)) + for _, ipPool := range list.Items { + result = append(result, &ipPool) + } + return result, err +} +func (c IPPoolCache) AddIndexer(indexName string, indexer ctlnetworkv1.IPPoolIndexer) { + panic("implement me") +} +func (c IPPoolCache) GetByIndex(indexName, key string) ([]*networkv1.IPPool, error) { + panic("implement me") +} diff --git a/pkg/util/fakeclient/networkattachmentdefinition.go b/pkg/util/fakeclient/networkattachmentdefinition.go new file mode 100644 index 0000000..be7bab4 --- /dev/null +++ b/pkg/util/fakeclient/networkattachmentdefinition.go @@ -0,0 +1,61 @@ +package fakeclient + +import ( + "context" + + cniv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + + typecniv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/clientset/versioned/typed/k8s.cni.cncf.io/v1" + ctlcniv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/k8s.cni.cncf.io/v1" +) + +type NetworkAttachmentDefinitionClient func(string) typecniv1.NetworkAttachmentDefinitionInterface + +func (c NetworkAttachmentDefinitionClient) Update(nad *cniv1.NetworkAttachmentDefinition) (*cniv1.NetworkAttachmentDefinition, error) { + return c(nad.Namespace).Update(context.TODO(), nad, metav1.UpdateOptions{}) +} +func (c NetworkAttachmentDefinitionClient) Get(namespace, name string, options metav1.GetOptions) (*cniv1.NetworkAttachmentDefinition, error) { + panic("implement me") +} +func (c NetworkAttachmentDefinitionClient) Create(nad *cniv1.NetworkAttachmentDefinition) (*cniv1.NetworkAttachmentDefinition, error) { + return c(nad.Namespace).Create(context.TODO(), nad, metav1.CreateOptions{}) +} +func (c NetworkAttachmentDefinitionClient) Delete(namespace, name string, options *metav1.DeleteOptions) error { + return c(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) +} +func (c NetworkAttachmentDefinitionClient) List(namespace string, opts metav1.ListOptions) (*cniv1.NetworkAttachmentDefinitionList, error) { + panic("implement me") +} +func (c NetworkAttachmentDefinitionClient) Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error) { + panic("implement me") +} +func (c NetworkAttachmentDefinitionClient) Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (result *cniv1.NetworkAttachmentDefinition, err error) { + panic("implement me") +} + +type NetworkAttachmentDefinitionCache func(string) typecniv1.NetworkAttachmentDefinitionInterface + +func (c NetworkAttachmentDefinitionCache) Get(namespace, name string) (*cniv1.NetworkAttachmentDefinition, error) { + return c(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} +func (c NetworkAttachmentDefinitionCache) List(namespace string, selector labels.Selector) ([]*cniv1.NetworkAttachmentDefinition, error) { + list, err := c(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + result := make([]*cniv1.NetworkAttachmentDefinition, 0, len(list.Items)) + for _, nad := range list.Items { + result = append(result, &nad) + } + return result, err +} +func (c NetworkAttachmentDefinitionCache) AddIndexer(indexName string, indexer ctlcniv1.NetworkAttachmentDefinitionIndexer) { + panic("implement me") +} +func (c NetworkAttachmentDefinitionCache) GetByIndex(indexName, key string) ([]*cniv1.NetworkAttachmentDefinition, error) { + panic("implement me") +} diff --git a/pkg/util/fakeclient/pod.go b/pkg/util/fakeclient/pod.go new file mode 100644 index 0000000..ed4e445 --- /dev/null +++ b/pkg/util/fakeclient/pod.go @@ -0,0 +1,64 @@ +package fakeclient + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + typecorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + ctlcorev1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/core/v1" +) + +type PodClient func(string) typecorev1.PodInterface + +func (c PodClient) Update(pod *corev1.Pod) (*corev1.Pod, error) { + return c(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) +} +func (c PodClient) Get(namespace, name string, options metav1.GetOptions) (*corev1.Pod, error) { + return c(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} +func (c PodClient) Create(pod *corev1.Pod) (*corev1.Pod, error) { + return c(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) +} +func (c PodClient) Delete(namespace, name string, options *metav1.DeleteOptions) error { + return c(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) +} +func (c PodClient) List(namespace string, opts metav1.ListOptions) (*corev1.PodList, error) { + panic("implement me") +} +func (c PodClient) UpdateStatus(pod *corev1.Pod) (*corev1.Pod, error) { + return c(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) +} +func (c PodClient) Watch(namespace string, opts metav1.ListOptions) (watch.Interface, error) { + panic("implement me") +} +func (c PodClient) Patch(namespace, name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Pod, err error) { + panic("implement me") +} + +type PodCache func(string) typecorev1.PodInterface + +func (c PodCache) Get(namespace, name string) (*corev1.Pod, error) { + return c(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} +func (c PodCache) List(namespace string, selector labels.Selector) ([]*corev1.Pod, error) { + list, err := c(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + result := make([]*corev1.Pod, 0, len(list.Items)) + for _, pod := range list.Items { + result = append(result, &pod) + } + return result, err +} +func (c PodCache) AddIndexer(indexName string, indexer ctlcorev1.PodIndexer) { + panic("implement me") +} +func (c PodCache) GetByIndex(indexName, key string) ([]*corev1.Pod, error) { + panic("implement me") +} From a7d110b041690e0117a446ba13f198670ec1a27e Mon Sep 17 00:00:00 2001 From: Zespre Chang Date: Mon, 22 Jan 2024 12:41:31 +0800 Subject: [PATCH 2/3] test: add helper functions for ippool tests Signed-off-by: Zespre Chang --- pkg/controller/ippool/common.go | 53 ++++++++++++++++++++++-- pkg/controller/ippool/controller_test.go | 48 +++++++++++++-------- 2 files changed, 80 insertions(+), 21 deletions(-) diff --git a/pkg/controller/ippool/common.go b/pkg/controller/ippool/common.go index bb1df99..cb26b37 100644 --- a/pkg/controller/ippool/common.go +++ b/pkg/controller/ippool/common.go @@ -5,6 +5,7 @@ import ( "fmt" "net" + cniv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/rancher/wrangler/pkg/kv" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -174,8 +175,8 @@ func newIPPoolBuilder(namespace, name string) *ipPoolBuilder { return &ipPoolBuilder{ ipPool: &networkv1.IPPool{ ObjectMeta: metav1.ObjectMeta{ - Name: name, Namespace: namespace, + Name: name, }, }, } @@ -248,6 +249,52 @@ func (b *ipPoolBuilder) DisabledCondition(status corev1.ConditionStatus, reason, return b } -func (p *ipPoolBuilder) Build() *networkv1.IPPool { - return p.ipPool +func (b *ipPoolBuilder) Build() *networkv1.IPPool { + return b.ipPool +} + +type podBuilder struct { + pod *corev1.Pod +} + +func newPodBuilder(namespace, name string) *podBuilder { + return &podBuilder{ + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + }, + } +} + +func (b *podBuilder) Build() *corev1.Pod { + return b.pod +} + +type networkAttachmentDefinitionBuilder struct { + nad *cniv1.NetworkAttachmentDefinition +} + +func newNetworkAttachmentDefinitionBuilder(namespace, name string) *networkAttachmentDefinitionBuilder { + return &networkAttachmentDefinitionBuilder{ + nad: &cniv1.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + }, + } +} + +func (b *networkAttachmentDefinitionBuilder) Label(key, value string) *networkAttachmentDefinitionBuilder { + if b.nad.Labels == nil { + b.nad.Labels = make(map[string]string) + } + b.nad.Labels[key] = value + return b +} + +func (b *networkAttachmentDefinitionBuilder) Build() *cniv1.NetworkAttachmentDefinition { + return b.nad } diff --git a/pkg/controller/ippool/controller_test.go b/pkg/controller/ippool/controller_test.go index 75dc934..53f96b5 100644 --- a/pkg/controller/ippool/controller_test.go +++ b/pkg/controller/ippool/controller_test.go @@ -21,6 +21,28 @@ import ( "github.com/harvester/vm-dhcp-controller/pkg/util/fakeclient" ) +const ( + testIPPoolNamespace = "default" + testIPPoolName = "ippool-1" + testPodNamespace = "default" + testPodName = "default-ippool-1-agent" + testNADNamespace = "default" + testNADName = "net-1" + testClusterNetworkName = "provider" +) + +func newTestIPPoolBuilder() *ipPoolBuilder { + return newIPPoolBuilder(testIPPoolNamespace, testIPPoolName) +} + +func newTestPodBuilder() *podBuilder { + return newPodBuilder(testPodNamespace, testPodName) +} + +func newTestNetworkAttachmentDefinitionBuilder() *networkAttachmentDefinitionBuilder { + return newNetworkAttachmentDefinitionBuilder(testNADNamespace, testNADName) +} + func TestHandler_OnChange(t *testing.T) { type input struct { key string @@ -43,21 +65,17 @@ func TestHandler_OnChange(t *testing.T) { name: "pause ippool", given: input{ key: "default/ippool-1", - ipPool: newIPPoolBuilder("default", "ippool-1"). + ipPool: newTestIPPoolBuilder(). Paused(). AgentPodRef("default", "default-ippool-1-agent"). Build(), pods: []*corev1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "default-ippool-1-agent", - }, - }, + newTestPodBuilder(). + Build(), }, }, expected: output{ - ipPool: newIPPoolBuilder("default", "ippool-1"). + ipPool: newTestIPPoolBuilder(). Paused(). DisabledCondition(corev1.ConditionTrue, "", ""). Build(), @@ -125,20 +143,14 @@ func TestHandler_DeployAgent(t *testing.T) { name: "resume ippool", given: input{ key: "default/ippool-1", - ipPool: newIPPoolBuilder("default", "ippool-1"). + ipPool: newTestIPPoolBuilder(). ServerIP("192.168.0.2"). CIDR("192.168.0.0/24"). NetworkName("default/net-1"). Build(), - nad: &cniv1.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - clusterNetworkLabelKey: "provider", - }, - Namespace: "default", - Name: "net-1", - }, - }, + nad: newTestNetworkAttachmentDefinitionBuilder(). + Label(clusterNetworkLabelKey, testClusterNetworkName). + Build(), }, expected: output{ ipPoolStatus: networkv1.IPPoolStatus{ From dc5dc83bf86afe58d0fd11cce3990a141e6c3973 Mon Sep 17 00:00:00 2001 From: Zespre Chang Date: Tue, 23 Jan 2024 11:54:44 +0800 Subject: [PATCH 3/3] test(ippool): add tests for other reconcile loops Signed-off-by: Zespre Chang --- pkg/controller/ippool/common.go | 161 ++++++- pkg/controller/ippool/controller_test.go | 567 +++++++++++++++++++---- 2 files changed, 632 insertions(+), 96 deletions(-) diff --git a/pkg/controller/ippool/common.go b/pkg/controller/ippool/common.go index cb26b37..da90201 100644 --- a/pkg/controller/ippool/common.go +++ b/pkg/controller/ippool/common.go @@ -13,7 +13,9 @@ import ( "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io" networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" + "github.com/harvester/vm-dhcp-controller/pkg/cache" "github.com/harvester/vm-dhcp-controller/pkg/config" + "github.com/harvester/vm-dhcp-controller/pkg/ipam" ) func prepareAgentPod( @@ -215,8 +217,8 @@ func (b *ipPoolBuilder) PoolRange(start, end string) *ipPoolBuilder { return b } -func (b *ipPoolBuilder) Exclude(exclude []string) *ipPoolBuilder { - b.ipPool.Spec.IPv4Config.Pool.Exclude = exclude +func (b *ipPoolBuilder) Exclude(ipAddressList ...string) *ipPoolBuilder { + b.ipPool.Spec.IPv4Config.Pool.Exclude = append(b.ipPool.Spec.IPv4Config.Pool.Exclude, ipAddressList...) return b } @@ -229,6 +231,33 @@ func (b *ipPoolBuilder) AgentPodRef(namespace, name string) *ipPoolBuilder { return b } +func (b *ipPoolBuilder) Allocated(ipAddress, macAddress string) *ipPoolBuilder { + if b.ipPool.Status.IPv4 == nil { + b.ipPool.Status.IPv4 = new(networkv1.IPv4Status) + } + if b.ipPool.Status.IPv4.Allocated == nil { + b.ipPool.Status.IPv4.Allocated = make(map[string]string, 2) + } + b.ipPool.Status.IPv4.Allocated[ipAddress] = macAddress + return b +} + +func (b *ipPoolBuilder) Available(count int) *ipPoolBuilder { + if b.ipPool.Status.IPv4 == nil { + b.ipPool.Status.IPv4 = new(networkv1.IPv4Status) + } + b.ipPool.Status.IPv4.Available = count + return b +} + +func (b *ipPoolBuilder) Used(count int) *ipPoolBuilder { + if b.ipPool.Status.IPv4 == nil { + b.ipPool.Status.IPv4 = new(networkv1.IPv4Status) + } + b.ipPool.Status.IPv4.Used = count + return b +} + func (b *ipPoolBuilder) RegisteredCondition(status corev1.ConditionStatus, reason, message string) *ipPoolBuilder { setRegisteredCondition(b.ipPool, status, reason, message) return b @@ -253,6 +282,57 @@ func (b *ipPoolBuilder) Build() *networkv1.IPPool { return b.ipPool } +type ipPoolStatusBuilder struct { + ipPoolStatus networkv1.IPPoolStatus +} + +func newIPPoolStatusBuilder() *ipPoolStatusBuilder { + return &ipPoolStatusBuilder{ + ipPoolStatus: networkv1.IPPoolStatus{}, + } +} + +func (b *ipPoolStatusBuilder) AgentPodRef(namespace, name string) *ipPoolStatusBuilder { + if b.ipPoolStatus.AgentPodRef == nil { + b.ipPoolStatus.AgentPodRef = new(networkv1.PodReference) + } + b.ipPoolStatus.AgentPodRef.Namespace = namespace + b.ipPoolStatus.AgentPodRef.Name = name + return b +} + +func (b *ipPoolStatusBuilder) RegisteredCondition(status corev1.ConditionStatus, reason, message string) *ipPoolStatusBuilder { + networkv1.Registered.SetStatus(&b.ipPoolStatus, string(status)) + networkv1.Registered.Reason(&b.ipPoolStatus, reason) + networkv1.Registered.Message(&b.ipPoolStatus, message) + return b +} + +func (b *ipPoolStatusBuilder) CacheReadyCondition(status corev1.ConditionStatus, reason, message string) *ipPoolStatusBuilder { + networkv1.CacheReady.SetStatus(&b.ipPoolStatus, string(status)) + networkv1.CacheReady.Reason(&b.ipPoolStatus, reason) + networkv1.CacheReady.Message(&b.ipPoolStatus, message) + return b +} + +func (b *ipPoolStatusBuilder) AgentReadyCondition(status corev1.ConditionStatus, reason, message string) *ipPoolStatusBuilder { + networkv1.AgentReady.SetStatus(&b.ipPoolStatus, string(status)) + networkv1.AgentReady.Reason(&b.ipPoolStatus, reason) + networkv1.AgentReady.Message(&b.ipPoolStatus, message) + return b +} + +func (b *ipPoolStatusBuilder) DisabledCondition(status corev1.ConditionStatus, reason, message string) *ipPoolStatusBuilder { + networkv1.Disabled.SetStatus(&b.ipPoolStatus, string(status)) + networkv1.Disabled.Reason(&b.ipPoolStatus, reason) + networkv1.Disabled.Message(&b.ipPoolStatus, message) + return b +} + +func (b *ipPoolStatusBuilder) Build() networkv1.IPPoolStatus { + return b.ipPoolStatus +} + type podBuilder struct { pod *corev1.Pod } @@ -268,6 +348,26 @@ func newPodBuilder(namespace, name string) *podBuilder { } } +func (b *podBuilder) PodReady(ready corev1.ConditionStatus) *podBuilder { + var found bool + if b.pod.Status.Conditions == nil { + b.pod.Status.Conditions = make([]corev1.PodCondition, 0, 1) + } + for i := range b.pod.Status.Conditions { + if b.pod.Status.Conditions[i].Type == corev1.PodReady { + b.pod.Status.Conditions[i].Status = corev1.ConditionTrue + break + } + } + if !found { + b.pod.Status.Conditions = append(b.pod.Status.Conditions, corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }) + } + return b +} + func (b *podBuilder) Build() *corev1.Pod { return b.pod } @@ -298,3 +398,60 @@ func (b *networkAttachmentDefinitionBuilder) Label(key, value string) *networkAt func (b *networkAttachmentDefinitionBuilder) Build() *cniv1.NetworkAttachmentDefinition { return b.nad } + +type cacheAllocatorBuilder struct { + cacheAllocator *cache.CacheAllocator +} + +func newCacheAllocatorBuilder() *cacheAllocatorBuilder { + return &cacheAllocatorBuilder{ + cacheAllocator: cache.New(), + } +} + +func (b *cacheAllocatorBuilder) MACSet(name string) *cacheAllocatorBuilder { + _ = b.cacheAllocator.NewMACSet(name) + return b +} + +func (b *cacheAllocatorBuilder) Add(name, macAddress, ipAddress string) *cacheAllocatorBuilder { + _ = b.cacheAllocator.AddMAC(name, macAddress, ipAddress) + return b +} + +func (b *cacheAllocatorBuilder) Build() *cache.CacheAllocator { + return b.cacheAllocator +} + +type ipAllocatorBuilder struct { + ipAllocator *ipam.IPAllocator +} + +func newIPAllocatorBuilder() *ipAllocatorBuilder { + return &ipAllocatorBuilder{ + ipAllocator: ipam.New(), + } +} + +func (b *ipAllocatorBuilder) IPSubnet(name, cidr, start, end string) *ipAllocatorBuilder { + _ = b.ipAllocator.NewIPSubnet(name, cidr, start, end) + return b +} + +func (b *ipAllocatorBuilder) Revoke(name string, ipAddressList ...string) *ipAllocatorBuilder { + for _, ip := range ipAddressList { + _ = b.ipAllocator.RevokeIP(name, ip) + } + return b +} + +func (b *ipAllocatorBuilder) Allocate(name string, ipAddressList ...string) *ipAllocatorBuilder { + for _, ip := range ipAddressList { + _, _ = b.ipAllocator.AllocateIP(name, ip) + } + return b +} + +func (b *ipAllocatorBuilder) Build() *ipam.IPAllocator { + return b.ipAllocator +} diff --git a/pkg/controller/ippool/controller_test.go b/pkg/controller/ippool/controller_test.go index 53f96b5..b9c5fc7 100644 --- a/pkg/controller/ippool/controller_test.go +++ b/pkg/controller/ippool/controller_test.go @@ -1,10 +1,10 @@ package ippool import ( + "fmt" "testing" + "time" - cniv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "github.com/rancher/wrangler/pkg/genericcondition" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,13 +24,40 @@ import ( const ( testIPPoolNamespace = "default" testIPPoolName = "ippool-1" - testPodNamespace = "default" + testKey = testIPPoolNamespace + "/" + testIPPoolName + testPodNamespace = "harvester-system" testPodName = "default-ippool-1-agent" testNADNamespace = "default" testNADName = "net-1" - testClusterNetworkName = "provider" + testClusterNetwork = "provider" + testServerIP = "192.168.0.2" + testNetworkName = testNADNamespace + "/" + testNADName + testCIDR = "192.168.0.0/24" + testStartIP = "192.168.0.101" + testEndIP = "192.168.0.200" + testServiceAccountName = "vdca" + testImageRepository = "rancher/harvester-vm-dhcp-controller" + testImageTag = "main" + + testExcludedIP1 = "192.168.0.150" + testExcludedIP2 = "192.168.0.187" + testExcludedIP3 = "192.168.0.10" + testExcludedIP4 = "192.168.0.235" + + testAllocatedIP1 = "192.168.0.111" + testAllocatedIP2 = "192.168.0.177" + testMAC1 = "11:22:33:44:55:66" + testMAC2 = "22:33:44:55:66:77" ) +func newTestCacheAllocatorBuilder() *cacheAllocatorBuilder { + return newCacheAllocatorBuilder() +} + +func newTestIPAllocatorBuilder() *ipAllocatorBuilder { + return newIPAllocatorBuilder() +} + func newTestIPPoolBuilder() *ipPoolBuilder { return newIPPoolBuilder(testIPPoolNamespace, testIPPoolName) } @@ -39,15 +66,20 @@ func newTestPodBuilder() *podBuilder { return newPodBuilder(testPodNamespace, testPodName) } +func newTestIPPoolStatusBuilder() *ipPoolStatusBuilder { + return newIPPoolStatusBuilder() +} + func newTestNetworkAttachmentDefinitionBuilder() *networkAttachmentDefinitionBuilder { return newNetworkAttachmentDefinitionBuilder(testNADNamespace, testNADName) } func TestHandler_OnChange(t *testing.T) { type input struct { - key string - ipPool *networkv1.IPPool - pods []*corev1.Pod + key string + ipAllocator *ipam.IPAllocator + ipPool *networkv1.IPPool + pods []*corev1.Pod } type output struct { @@ -61,10 +93,54 @@ func TestHandler_OnChange(t *testing.T) { given input expected output }{ + { + name: "new ippool", + given: input{ + key: testIPPoolNamespace + "/" + testIPPoolName, + ipAllocator: newTestIPAllocatorBuilder(). + Build(), + ipPool: newTestIPPoolBuilder(). + Build(), + }, + expected: output{ + ipPool: newTestIPPoolBuilder(). + DisabledCondition(corev1.ConditionFalse, "", ""). + CacheReadyCondition(corev1.ConditionFalse, "NotInitialized", ""). + Build(), + }, + }, + { + name: "ippool with ipam initialized", + given: input{ + key: testIPPoolNamespace + "/" + testIPPoolName, + ipAllocator: newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Build(), + ipPool: newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + Build(), + }, + expected: output{ + ipPool: newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + Available(100). + Used(0). + DisabledCondition(corev1.ConditionFalse, "", ""). + Build(), + }, + }, { name: "pause ippool", given: input{ - key: "default/ippool-1", + key: testIPPoolNamespace + "/" + testIPPoolName, + ipAllocator: newTestIPAllocatorBuilder(). + Build(), ipPool: newTestIPPoolBuilder(). Paused(). AgentPodRef("default", "default-ippool-1-agent"). @@ -81,6 +157,24 @@ func TestHandler_OnChange(t *testing.T) { Build(), }, }, + { + name: "resume ippool", + given: input{ + key: testIPPoolNamespace + "/" + testIPPoolName, + ipAllocator: newTestIPAllocatorBuilder(). + Build(), + ipPool: newTestIPPoolBuilder(). + UnPaused(). + Build(), + }, + expected: output{ + ipPool: newTestIPPoolBuilder(). + UnPaused(). + DisabledCondition(corev1.ConditionFalse, "", ""). + CacheReadyCondition(corev1.ConditionFalse, "NotInitialized", ""). + Build(), + }, + }, } for _, tc := range testCases { @@ -102,7 +196,7 @@ func TestHandler_OnChange(t *testing.T) { Tag: "main", }, cacheAllocator: cache.New(), - ipAllocator: ipam.New(), + ipAllocator: tc.given.ipAllocator, metricsAllocator: metrics.New(), ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), @@ -113,8 +207,9 @@ func TestHandler_OnChange(t *testing.T) { actual.ipPool, actual.err = handler.OnChange(tc.given.key, tc.given.ipPool) assert.Nil(t, actual.err) - emptyConditionsTimestamp(tc.expected.ipPool.Status.Conditions) - emptyConditionsTimestamp(actual.ipPool.Status.Conditions) + sanitizeStatus(&tc.expected.ipPool.Status) + sanitizeStatus(&actual.ipPool.Status) + assert.Equal(t, tc.expected.ipPool, actual.ipPool, tc.name) assert.Equal(t, tc.expected.pods, actual.pods) @@ -122,110 +217,394 @@ func TestHandler_OnChange(t *testing.T) { } func TestHandler_DeployAgent(t *testing.T) { - type input struct { - key string - ipPool *networkv1.IPPool - nad *cniv1.NetworkAttachmentDefinition - } + t.Run("ippool created", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName).Build() + givenNAD := newTestNetworkAttachmentDefinitionBuilder(). + Label(clusterNetworkLabelKey, testClusterNetwork).Build() - type output struct { - ipPoolStatus networkv1.IPPoolStatus - pod *corev1.Pod - err error - } + expectedStatus := newTestIPPoolStatusBuilder(). + AgentPodRef(testPodNamespace, testPodName).Build() + expectedPod := prepareAgentPod( + newIPPoolBuilder(testIPPoolNamespace, testIPPoolName). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName).Build(), + false, + testPodNamespace, + testClusterNetwork, + testServiceAccountName, + &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, + }, + ) - testCases := []struct { - name string - given input - expected output - }{ - { - name: "resume ippool", - given: input{ - key: "default/ippool-1", - ipPool: newTestIPPoolBuilder(). - ServerIP("192.168.0.2"). - CIDR("192.168.0.0/24"). - NetworkName("default/net-1"). - Build(), - nad: newTestNetworkAttachmentDefinitionBuilder(). - Label(clusterNetworkLabelKey, testClusterNetworkName). - Build(), + nadGVR := schema.GroupVersionResource{ + Group: "k8s.cni.cncf.io", + Version: "v1", + Resource: "network-attachment-definitions", + } + + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Create(nadGVR, givenNAD, givenNAD.Namespace) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + k8sclientset := k8sfake.NewSimpleClientset() + + handler := Handler{ + agentNamespace: testPodNamespace, + agentImage: &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, }, - expected: output{ - ipPoolStatus: networkv1.IPPoolStatus{ - AgentPodRef: &networkv1.PodReference{ - Namespace: "default", - Name: "default-ippool-1-agent", - }, - }, - pod: prepareAgentPod( - newIPPoolBuilder("default", "ippool-1"). - ServerIP("192.168.0.2"). - CIDR("192.168.0.0/24"). - NetworkName("default/net-1"). - Build(), - false, - "default", - "provider", - "vdca", - &config.Image{ - Repository: "rancher/harvester-vm-dhcp-controller", - Tag: "main", - }, - ), + agentServiceAccountName: testServiceAccountName, + nadCache: fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions), + podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), + } + + status, err := handler.DeployAgent(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + assert.Equal(t, expectedStatus, status) + + pod, err := handler.podClient.Get(testPodNamespace, testPodName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, expectedPod, pod) + }) + + t.Run("ippool paused", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + Paused().Build() + + handler := Handler{ + agentNamespace: testPodNamespace, + agentImage: &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, }, - }, - } + agentServiceAccountName: testServiceAccountName, + } - nadGVR := schema.GroupVersionResource{ - Group: "k8s.cni.cncf.io", - Version: "v1", - Resource: "network-attachment-definitions", - } + _, err := handler.DeployAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Errorf("ippool %s was administratively disabled", testIPPoolNamespace+"/"+testIPPoolName), err) + }) - for _, tc := range testCases { - clientset := fake.NewSimpleClientset(tc.given.ipPool) - if tc.given.nad != nil { - err := clientset.Tracker().Create(nadGVR, tc.given.nad, tc.given.nad.Namespace) - assert.Nil(t, err, "mock resource should add into fake controller tracker") + t.Run("nad not found", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + NetworkName("you-cant-find-me").Build() + givenNAD := newTestNetworkAttachmentDefinitionBuilder(). + Label(clusterNetworkLabelKey, testClusterNetwork).Build() + + nadGVR := schema.GroupVersionResource{ + Group: "k8s.cni.cncf.io", + Version: "v1", + Resource: "network-attachment-definitions", + } + + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Create(nadGVR, givenNAD, givenNAD.Namespace) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + handler := Handler{ + nadCache: fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions), + } + + _, err = handler.DeployAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("network-attachment-definitions.k8s.cni.cncf.io \"%s\" not found", "you-cant-find-me"), err.Error()) + }) + + t.Run("agent pod already exists", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName). + AgentPodRef(testPodNamespace, testPodName).Build() + givenNAD := newTestNetworkAttachmentDefinitionBuilder(). + Label(clusterNetworkLabelKey, testClusterNetwork).Build() + givenPod := prepareAgentPod( + newIPPoolBuilder(testIPPoolNamespace, testIPPoolName). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName).Build(), + false, + testPodNamespace, + testClusterNetwork, + testServiceAccountName, + &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, + }, + ) + + expectedStatus := newTestIPPoolStatusBuilder(). + AgentPodRef(testPodNamespace, testPodName).Build() + expectedPod := prepareAgentPod( + newIPPoolBuilder(testIPPoolNamespace, testIPPoolName). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName).Build(), + false, + testPodNamespace, + testClusterNetwork, + testServiceAccountName, + &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, + }, + ) + + nadGVR := schema.GroupVersionResource{ + Group: "k8s.cni.cncf.io", + Version: "v1", + Resource: "network-attachment-definitions", } + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Create(nadGVR, givenNAD, givenNAD.Namespace) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + k8sclientset := k8sfake.NewSimpleClientset() + err = k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") handler := Handler{ - agentNamespace: "default", + agentNamespace: testPodNamespace, agentImage: &config.Image{ - Repository: "rancher/harvester-vm-dhcp-controller", - Tag: "main", + Repository: testImageRepository, + Tag: testImageTag, }, - agentServiceAccountName: "vdca", - cacheAllocator: cache.New(), - ipAllocator: ipam.New(), - metricsAllocator: metrics.New(), - ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), + agentServiceAccountName: testServiceAccountName, nadCache: fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions), podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), } - var actual output + status, err := handler.DeployAgent(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + assert.Equal(t, expectedStatus, status) - actual.ipPoolStatus, actual.err = handler.DeployAgent(tc.given.ipPool, tc.given.ipPool.Status) - assert.Nil(t, actual.err) + pod, err := handler.podClient.Get(testPodNamespace, testPodName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, expectedPod, pod) + }) +} - emptyConditionsTimestamp(tc.expected.ipPoolStatus.Conditions) - emptyConditionsTimestamp(actual.ipPoolStatus.Conditions) - assert.Equal(t, tc.expected.ipPoolStatus, actual.ipPoolStatus, tc.name) +func TestHandler_BuildCache(t *testing.T) { + t.Run("new ippool", func(t *testing.T) { + givenIPAllocator := newTestIPAllocatorBuilder(). + Build() + givenCacheAllocator := newTestCacheAllocatorBuilder(). + Build() + givenIPPool := newTestIPPoolBuilder(). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + Build() - actual.pod, actual.err = handler.podClient.Get("default", "default-ippool-1-agent", metav1.GetOptions{}) - assert.Nil(t, actual.err) - assert.Equal(t, tc.expected.pod, actual.pod) - } + expectedIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Build() + expectedCacheAllocator := newTestCacheAllocatorBuilder(). + MACSet(testNetworkName). + Build() + + handler := Handler{ + cacheAllocator: givenCacheAllocator, + ipAllocator: givenIPAllocator, + } + + _, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + + assert.Equal(t, expectedIPAllocator, handler.ipAllocator) + assert.Equal(t, expectedCacheAllocator, handler.cacheAllocator) + }) + + t.Run("ippool paused", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + Paused(). + Build() + + handler := Handler{} + + _, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("ippool %s was administratively disabled", testIPPoolNamespace+"/"+testIPPoolName), err.Error()) + }) + + t.Run("cache is already ready", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + CacheReadyCondition(corev1.ConditionTrue, "", ""). + Build() + + expectedStatus := newTestIPPoolStatusBuilder(). + CacheReadyCondition(corev1.ConditionTrue, "", ""). + Build() + + handler := Handler{} + + status, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + assert.Equal(t, expectedStatus, status) + }) + + t.Run("ippool with excluded ips", func(t *testing.T) { + givenIPAllocator := newTestIPAllocatorBuilder(). + Build() + givenCacheAllocator := newTestCacheAllocatorBuilder(). + Build() + givenIPPool := newTestIPPoolBuilder(). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + Exclude(testExcludedIP1, testExcludedIP2). + NetworkName(testNetworkName). + Build() + + expectedIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Revoke(testNetworkName, testExcludedIP1, testExcludedIP2). + Build() + expectedCacheAllocator := newTestCacheAllocatorBuilder(). + MACSet(testNetworkName). + Build() + + handler := Handler{ + cacheAllocator: givenCacheAllocator, + ipAllocator: givenIPAllocator, + } + + _, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + + assert.Equal(t, expectedIPAllocator, handler.ipAllocator) + assert.Equal(t, expectedCacheAllocator, handler.cacheAllocator) + }) + + t.Run("rebuild caches", func(t *testing.T) { + givenIPAllocator := newTestIPAllocatorBuilder(). + Build() + givenCacheAllocator := newTestCacheAllocatorBuilder(). + Build() + givenIPPool := newTestIPPoolBuilder(). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + Exclude(testExcludedIP1, testExcludedIP2). + NetworkName(testNetworkName). + Allocated(testAllocatedIP1, testMAC1). + Allocated(testAllocatedIP2, testMAC2). + Build() + + expectedIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Revoke(testNetworkName, testExcludedIP1, testExcludedIP2). + Allocate(testNetworkName, testAllocatedIP1, testAllocatedIP2). + Build() + expectedCacheAllocator := newTestCacheAllocatorBuilder(). + MACSet(testNetworkName). + Add(testNetworkName, testMAC1, testAllocatedIP1). + Add(testNetworkName, testMAC2, testAllocatedIP2). + Build() + + handler := Handler{ + cacheAllocator: givenCacheAllocator, + ipAllocator: givenIPAllocator, + } + + _, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + + assert.Equal(t, expectedIPAllocator, handler.ipAllocator) + assert.Equal(t, expectedCacheAllocator, handler.cacheAllocator) + }) +} + +func TestHandler_MonitorAgent(t *testing.T) { + t.Run("agent pod not found", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().AgentPodRef(testPodNamespace, testPodName).Build() + givenPod := newPodBuilder("default", "nginx").Build() + + k8sclientset := k8sfake.NewSimpleClientset() + + err := k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + handler := Handler{ + podCache: fakeclient.PodCache(k8sclientset.CoreV1().Pods), + } + + _, err = handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("pods \"%s\" not found", testPodName), err.Error()) + }) + + t.Run("agent pod unready", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().AgentPodRef(testPodNamespace, testPodName).Build() + givenPod := newTestPodBuilder().Build() + + k8sclientset := k8sfake.NewSimpleClientset() + + err := k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + handler := Handler{ + podCache: fakeclient.PodCache(k8sclientset.CoreV1().Pods), + } + + _, err = handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("agent for ippool %s is not ready", testPodNamespace+"/"+testPodName), err.Error()) + }) + + t.Run("agent pod ready", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().AgentPodRef(testPodNamespace, testPodName).Build() + givenPod := newTestPodBuilder().PodReady(corev1.ConditionTrue).Build() + + k8sclientset := k8sfake.NewSimpleClientset() + + err := k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + handler := Handler{ + podCache: fakeclient.PodCache(k8sclientset.CoreV1().Pods), + } + + _, err = handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + }) + + t.Run("ippool paused", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().Paused().Build() + + handler := Handler{} + + _, err := handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("ippool %s was administratively disabled", testIPPoolNamespace+"/"+testIPPoolName), err.Error()) + }) + + t.Run("ippool in no-agent mode", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().Build() + + handler := Handler{ + noAgent: true, + } + + _, err := handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + }) + + t.Run("agentpodref not set", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().Build() + + handler := Handler{} + + _, err := handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("agent for ippool %s is not deployed", testIPPoolNamespace+"/"+testIPPoolName), err.Error()) + }) } -func emptyConditionsTimestamp(conditions []genericcondition.GenericCondition) { - for i := range conditions { - conditions[i].LastTransitionTime = "" - conditions[i].LastUpdateTime = "" +func sanitizeStatus(status *networkv1.IPPoolStatus) { + now := time.Time{} + status.LastUpdate = metav1.NewTime(now) + for i := range status.Conditions { + status.Conditions[i].LastTransitionTime = "" + status.Conditions[i].LastUpdateTime = "" } }