From dc5dc83bf86afe58d0fd11cce3990a141e6c3973 Mon Sep 17 00:00:00 2001 From: Zespre Chang Date: Tue, 23 Jan 2024 11:54:44 +0800 Subject: [PATCH] test(ippool): add tests for other reconcile loops Signed-off-by: Zespre Chang --- pkg/controller/ippool/common.go | 161 ++++++- pkg/controller/ippool/controller_test.go | 567 +++++++++++++++++++---- 2 files changed, 632 insertions(+), 96 deletions(-) diff --git a/pkg/controller/ippool/common.go b/pkg/controller/ippool/common.go index cb26b37..da90201 100644 --- a/pkg/controller/ippool/common.go +++ b/pkg/controller/ippool/common.go @@ -13,7 +13,9 @@ import ( "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io" networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" + "github.com/harvester/vm-dhcp-controller/pkg/cache" "github.com/harvester/vm-dhcp-controller/pkg/config" + "github.com/harvester/vm-dhcp-controller/pkg/ipam" ) func prepareAgentPod( @@ -215,8 +217,8 @@ func (b *ipPoolBuilder) PoolRange(start, end string) *ipPoolBuilder { return b } -func (b *ipPoolBuilder) Exclude(exclude []string) *ipPoolBuilder { - b.ipPool.Spec.IPv4Config.Pool.Exclude = exclude +func (b *ipPoolBuilder) Exclude(ipAddressList ...string) *ipPoolBuilder { + b.ipPool.Spec.IPv4Config.Pool.Exclude = append(b.ipPool.Spec.IPv4Config.Pool.Exclude, ipAddressList...) return b } @@ -229,6 +231,33 @@ func (b *ipPoolBuilder) AgentPodRef(namespace, name string) *ipPoolBuilder { return b } +func (b *ipPoolBuilder) Allocated(ipAddress, macAddress string) *ipPoolBuilder { + if b.ipPool.Status.IPv4 == nil { + b.ipPool.Status.IPv4 = new(networkv1.IPv4Status) + } + if b.ipPool.Status.IPv4.Allocated == nil { + b.ipPool.Status.IPv4.Allocated = make(map[string]string, 2) + } + b.ipPool.Status.IPv4.Allocated[ipAddress] = macAddress + return b +} + +func (b *ipPoolBuilder) Available(count int) *ipPoolBuilder { + if b.ipPool.Status.IPv4 == nil { + b.ipPool.Status.IPv4 = new(networkv1.IPv4Status) + } + b.ipPool.Status.IPv4.Available = count + return b +} + +func (b *ipPoolBuilder) Used(count int) *ipPoolBuilder { + if b.ipPool.Status.IPv4 == nil { + b.ipPool.Status.IPv4 = new(networkv1.IPv4Status) + } + b.ipPool.Status.IPv4.Used = count + return b +} + func (b *ipPoolBuilder) RegisteredCondition(status corev1.ConditionStatus, reason, message string) *ipPoolBuilder { setRegisteredCondition(b.ipPool, status, reason, message) return b @@ -253,6 +282,57 @@ func (b *ipPoolBuilder) Build() *networkv1.IPPool { return b.ipPool } +type ipPoolStatusBuilder struct { + ipPoolStatus networkv1.IPPoolStatus +} + +func newIPPoolStatusBuilder() *ipPoolStatusBuilder { + return &ipPoolStatusBuilder{ + ipPoolStatus: networkv1.IPPoolStatus{}, + } +} + +func (b *ipPoolStatusBuilder) AgentPodRef(namespace, name string) *ipPoolStatusBuilder { + if b.ipPoolStatus.AgentPodRef == nil { + b.ipPoolStatus.AgentPodRef = new(networkv1.PodReference) + } + b.ipPoolStatus.AgentPodRef.Namespace = namespace + b.ipPoolStatus.AgentPodRef.Name = name + return b +} + +func (b *ipPoolStatusBuilder) RegisteredCondition(status corev1.ConditionStatus, reason, message string) *ipPoolStatusBuilder { + networkv1.Registered.SetStatus(&b.ipPoolStatus, string(status)) + networkv1.Registered.Reason(&b.ipPoolStatus, reason) + networkv1.Registered.Message(&b.ipPoolStatus, message) + return b +} + +func (b *ipPoolStatusBuilder) CacheReadyCondition(status corev1.ConditionStatus, reason, message string) *ipPoolStatusBuilder { + networkv1.CacheReady.SetStatus(&b.ipPoolStatus, string(status)) + networkv1.CacheReady.Reason(&b.ipPoolStatus, reason) + networkv1.CacheReady.Message(&b.ipPoolStatus, message) + return b +} + +func (b *ipPoolStatusBuilder) AgentReadyCondition(status corev1.ConditionStatus, reason, message string) *ipPoolStatusBuilder { + networkv1.AgentReady.SetStatus(&b.ipPoolStatus, string(status)) + networkv1.AgentReady.Reason(&b.ipPoolStatus, reason) + networkv1.AgentReady.Message(&b.ipPoolStatus, message) + return b +} + +func (b *ipPoolStatusBuilder) DisabledCondition(status corev1.ConditionStatus, reason, message string) *ipPoolStatusBuilder { + networkv1.Disabled.SetStatus(&b.ipPoolStatus, string(status)) + networkv1.Disabled.Reason(&b.ipPoolStatus, reason) + networkv1.Disabled.Message(&b.ipPoolStatus, message) + return b +} + +func (b *ipPoolStatusBuilder) Build() networkv1.IPPoolStatus { + return b.ipPoolStatus +} + type podBuilder struct { pod *corev1.Pod } @@ -268,6 +348,26 @@ func newPodBuilder(namespace, name string) *podBuilder { } } +func (b *podBuilder) PodReady(ready corev1.ConditionStatus) *podBuilder { + var found bool + if b.pod.Status.Conditions == nil { + b.pod.Status.Conditions = make([]corev1.PodCondition, 0, 1) + } + for i := range b.pod.Status.Conditions { + if b.pod.Status.Conditions[i].Type == corev1.PodReady { + b.pod.Status.Conditions[i].Status = corev1.ConditionTrue + break + } + } + if !found { + b.pod.Status.Conditions = append(b.pod.Status.Conditions, corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }) + } + return b +} + func (b *podBuilder) Build() *corev1.Pod { return b.pod } @@ -298,3 +398,60 @@ func (b *networkAttachmentDefinitionBuilder) Label(key, value string) *networkAt func (b *networkAttachmentDefinitionBuilder) Build() *cniv1.NetworkAttachmentDefinition { return b.nad } + +type cacheAllocatorBuilder struct { + cacheAllocator *cache.CacheAllocator +} + +func newCacheAllocatorBuilder() *cacheAllocatorBuilder { + return &cacheAllocatorBuilder{ + cacheAllocator: cache.New(), + } +} + +func (b *cacheAllocatorBuilder) MACSet(name string) *cacheAllocatorBuilder { + _ = b.cacheAllocator.NewMACSet(name) + return b +} + +func (b *cacheAllocatorBuilder) Add(name, macAddress, ipAddress string) *cacheAllocatorBuilder { + _ = b.cacheAllocator.AddMAC(name, macAddress, ipAddress) + return b +} + +func (b *cacheAllocatorBuilder) Build() *cache.CacheAllocator { + return b.cacheAllocator +} + +type ipAllocatorBuilder struct { + ipAllocator *ipam.IPAllocator +} + +func newIPAllocatorBuilder() *ipAllocatorBuilder { + return &ipAllocatorBuilder{ + ipAllocator: ipam.New(), + } +} + +func (b *ipAllocatorBuilder) IPSubnet(name, cidr, start, end string) *ipAllocatorBuilder { + _ = b.ipAllocator.NewIPSubnet(name, cidr, start, end) + return b +} + +func (b *ipAllocatorBuilder) Revoke(name string, ipAddressList ...string) *ipAllocatorBuilder { + for _, ip := range ipAddressList { + _ = b.ipAllocator.RevokeIP(name, ip) + } + return b +} + +func (b *ipAllocatorBuilder) Allocate(name string, ipAddressList ...string) *ipAllocatorBuilder { + for _, ip := range ipAddressList { + _, _ = b.ipAllocator.AllocateIP(name, ip) + } + return b +} + +func (b *ipAllocatorBuilder) Build() *ipam.IPAllocator { + return b.ipAllocator +} diff --git a/pkg/controller/ippool/controller_test.go b/pkg/controller/ippool/controller_test.go index 53f96b5..b9c5fc7 100644 --- a/pkg/controller/ippool/controller_test.go +++ b/pkg/controller/ippool/controller_test.go @@ -1,10 +1,10 @@ package ippool import ( + "fmt" "testing" + "time" - cniv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "github.com/rancher/wrangler/pkg/genericcondition" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,13 +24,40 @@ import ( const ( testIPPoolNamespace = "default" testIPPoolName = "ippool-1" - testPodNamespace = "default" + testKey = testIPPoolNamespace + "/" + testIPPoolName + testPodNamespace = "harvester-system" testPodName = "default-ippool-1-agent" testNADNamespace = "default" testNADName = "net-1" - testClusterNetworkName = "provider" + testClusterNetwork = "provider" + testServerIP = "192.168.0.2" + testNetworkName = testNADNamespace + "/" + testNADName + testCIDR = "192.168.0.0/24" + testStartIP = "192.168.0.101" + testEndIP = "192.168.0.200" + testServiceAccountName = "vdca" + testImageRepository = "rancher/harvester-vm-dhcp-controller" + testImageTag = "main" + + testExcludedIP1 = "192.168.0.150" + testExcludedIP2 = "192.168.0.187" + testExcludedIP3 = "192.168.0.10" + testExcludedIP4 = "192.168.0.235" + + testAllocatedIP1 = "192.168.0.111" + testAllocatedIP2 = "192.168.0.177" + testMAC1 = "11:22:33:44:55:66" + testMAC2 = "22:33:44:55:66:77" ) +func newTestCacheAllocatorBuilder() *cacheAllocatorBuilder { + return newCacheAllocatorBuilder() +} + +func newTestIPAllocatorBuilder() *ipAllocatorBuilder { + return newIPAllocatorBuilder() +} + func newTestIPPoolBuilder() *ipPoolBuilder { return newIPPoolBuilder(testIPPoolNamespace, testIPPoolName) } @@ -39,15 +66,20 @@ func newTestPodBuilder() *podBuilder { return newPodBuilder(testPodNamespace, testPodName) } +func newTestIPPoolStatusBuilder() *ipPoolStatusBuilder { + return newIPPoolStatusBuilder() +} + func newTestNetworkAttachmentDefinitionBuilder() *networkAttachmentDefinitionBuilder { return newNetworkAttachmentDefinitionBuilder(testNADNamespace, testNADName) } func TestHandler_OnChange(t *testing.T) { type input struct { - key string - ipPool *networkv1.IPPool - pods []*corev1.Pod + key string + ipAllocator *ipam.IPAllocator + ipPool *networkv1.IPPool + pods []*corev1.Pod } type output struct { @@ -61,10 +93,54 @@ func TestHandler_OnChange(t *testing.T) { given input expected output }{ + { + name: "new ippool", + given: input{ + key: testIPPoolNamespace + "/" + testIPPoolName, + ipAllocator: newTestIPAllocatorBuilder(). + Build(), + ipPool: newTestIPPoolBuilder(). + Build(), + }, + expected: output{ + ipPool: newTestIPPoolBuilder(). + DisabledCondition(corev1.ConditionFalse, "", ""). + CacheReadyCondition(corev1.ConditionFalse, "NotInitialized", ""). + Build(), + }, + }, + { + name: "ippool with ipam initialized", + given: input{ + key: testIPPoolNamespace + "/" + testIPPoolName, + ipAllocator: newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Build(), + ipPool: newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + Build(), + }, + expected: output{ + ipPool: newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + Available(100). + Used(0). + DisabledCondition(corev1.ConditionFalse, "", ""). + Build(), + }, + }, { name: "pause ippool", given: input{ - key: "default/ippool-1", + key: testIPPoolNamespace + "/" + testIPPoolName, + ipAllocator: newTestIPAllocatorBuilder(). + Build(), ipPool: newTestIPPoolBuilder(). Paused(). AgentPodRef("default", "default-ippool-1-agent"). @@ -81,6 +157,24 @@ func TestHandler_OnChange(t *testing.T) { Build(), }, }, + { + name: "resume ippool", + given: input{ + key: testIPPoolNamespace + "/" + testIPPoolName, + ipAllocator: newTestIPAllocatorBuilder(). + Build(), + ipPool: newTestIPPoolBuilder(). + UnPaused(). + Build(), + }, + expected: output{ + ipPool: newTestIPPoolBuilder(). + UnPaused(). + DisabledCondition(corev1.ConditionFalse, "", ""). + CacheReadyCondition(corev1.ConditionFalse, "NotInitialized", ""). + Build(), + }, + }, } for _, tc := range testCases { @@ -102,7 +196,7 @@ func TestHandler_OnChange(t *testing.T) { Tag: "main", }, cacheAllocator: cache.New(), - ipAllocator: ipam.New(), + ipAllocator: tc.given.ipAllocator, metricsAllocator: metrics.New(), ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), @@ -113,8 +207,9 @@ func TestHandler_OnChange(t *testing.T) { actual.ipPool, actual.err = handler.OnChange(tc.given.key, tc.given.ipPool) assert.Nil(t, actual.err) - emptyConditionsTimestamp(tc.expected.ipPool.Status.Conditions) - emptyConditionsTimestamp(actual.ipPool.Status.Conditions) + sanitizeStatus(&tc.expected.ipPool.Status) + sanitizeStatus(&actual.ipPool.Status) + assert.Equal(t, tc.expected.ipPool, actual.ipPool, tc.name) assert.Equal(t, tc.expected.pods, actual.pods) @@ -122,110 +217,394 @@ func TestHandler_OnChange(t *testing.T) { } func TestHandler_DeployAgent(t *testing.T) { - type input struct { - key string - ipPool *networkv1.IPPool - nad *cniv1.NetworkAttachmentDefinition - } + t.Run("ippool created", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName).Build() + givenNAD := newTestNetworkAttachmentDefinitionBuilder(). + Label(clusterNetworkLabelKey, testClusterNetwork).Build() - type output struct { - ipPoolStatus networkv1.IPPoolStatus - pod *corev1.Pod - err error - } + expectedStatus := newTestIPPoolStatusBuilder(). + AgentPodRef(testPodNamespace, testPodName).Build() + expectedPod := prepareAgentPod( + newIPPoolBuilder(testIPPoolNamespace, testIPPoolName). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName).Build(), + false, + testPodNamespace, + testClusterNetwork, + testServiceAccountName, + &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, + }, + ) - testCases := []struct { - name string - given input - expected output - }{ - { - name: "resume ippool", - given: input{ - key: "default/ippool-1", - ipPool: newTestIPPoolBuilder(). - ServerIP("192.168.0.2"). - CIDR("192.168.0.0/24"). - NetworkName("default/net-1"). - Build(), - nad: newTestNetworkAttachmentDefinitionBuilder(). - Label(clusterNetworkLabelKey, testClusterNetworkName). - Build(), + nadGVR := schema.GroupVersionResource{ + Group: "k8s.cni.cncf.io", + Version: "v1", + Resource: "network-attachment-definitions", + } + + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Create(nadGVR, givenNAD, givenNAD.Namespace) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + k8sclientset := k8sfake.NewSimpleClientset() + + handler := Handler{ + agentNamespace: testPodNamespace, + agentImage: &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, }, - expected: output{ - ipPoolStatus: networkv1.IPPoolStatus{ - AgentPodRef: &networkv1.PodReference{ - Namespace: "default", - Name: "default-ippool-1-agent", - }, - }, - pod: prepareAgentPod( - newIPPoolBuilder("default", "ippool-1"). - ServerIP("192.168.0.2"). - CIDR("192.168.0.0/24"). - NetworkName("default/net-1"). - Build(), - false, - "default", - "provider", - "vdca", - &config.Image{ - Repository: "rancher/harvester-vm-dhcp-controller", - Tag: "main", - }, - ), + agentServiceAccountName: testServiceAccountName, + nadCache: fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions), + podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), + } + + status, err := handler.DeployAgent(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + assert.Equal(t, expectedStatus, status) + + pod, err := handler.podClient.Get(testPodNamespace, testPodName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, expectedPod, pod) + }) + + t.Run("ippool paused", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + Paused().Build() + + handler := Handler{ + agentNamespace: testPodNamespace, + agentImage: &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, }, - }, - } + agentServiceAccountName: testServiceAccountName, + } - nadGVR := schema.GroupVersionResource{ - Group: "k8s.cni.cncf.io", - Version: "v1", - Resource: "network-attachment-definitions", - } + _, err := handler.DeployAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Errorf("ippool %s was administratively disabled", testIPPoolNamespace+"/"+testIPPoolName), err) + }) - for _, tc := range testCases { - clientset := fake.NewSimpleClientset(tc.given.ipPool) - if tc.given.nad != nil { - err := clientset.Tracker().Create(nadGVR, tc.given.nad, tc.given.nad.Namespace) - assert.Nil(t, err, "mock resource should add into fake controller tracker") + t.Run("nad not found", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + NetworkName("you-cant-find-me").Build() + givenNAD := newTestNetworkAttachmentDefinitionBuilder(). + Label(clusterNetworkLabelKey, testClusterNetwork).Build() + + nadGVR := schema.GroupVersionResource{ + Group: "k8s.cni.cncf.io", + Version: "v1", + Resource: "network-attachment-definitions", + } + + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Create(nadGVR, givenNAD, givenNAD.Namespace) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + handler := Handler{ + nadCache: fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions), + } + + _, err = handler.DeployAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("network-attachment-definitions.k8s.cni.cncf.io \"%s\" not found", "you-cant-find-me"), err.Error()) + }) + + t.Run("agent pod already exists", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName). + AgentPodRef(testPodNamespace, testPodName).Build() + givenNAD := newTestNetworkAttachmentDefinitionBuilder(). + Label(clusterNetworkLabelKey, testClusterNetwork).Build() + givenPod := prepareAgentPod( + newIPPoolBuilder(testIPPoolNamespace, testIPPoolName). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName).Build(), + false, + testPodNamespace, + testClusterNetwork, + testServiceAccountName, + &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, + }, + ) + + expectedStatus := newTestIPPoolStatusBuilder(). + AgentPodRef(testPodNamespace, testPodName).Build() + expectedPod := prepareAgentPod( + newIPPoolBuilder(testIPPoolNamespace, testIPPoolName). + ServerIP(testServerIP). + CIDR(testCIDR). + NetworkName(testNetworkName).Build(), + false, + testPodNamespace, + testClusterNetwork, + testServiceAccountName, + &config.Image{ + Repository: testImageRepository, + Tag: testImageTag, + }, + ) + + nadGVR := schema.GroupVersionResource{ + Group: "k8s.cni.cncf.io", + Version: "v1", + Resource: "network-attachment-definitions", } + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Create(nadGVR, givenNAD, givenNAD.Namespace) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + k8sclientset := k8sfake.NewSimpleClientset() + err = k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") handler := Handler{ - agentNamespace: "default", + agentNamespace: testPodNamespace, agentImage: &config.Image{ - Repository: "rancher/harvester-vm-dhcp-controller", - Tag: "main", + Repository: testImageRepository, + Tag: testImageTag, }, - agentServiceAccountName: "vdca", - cacheAllocator: cache.New(), - ipAllocator: ipam.New(), - metricsAllocator: metrics.New(), - ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), + agentServiceAccountName: testServiceAccountName, nadCache: fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions), podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), } - var actual output + status, err := handler.DeployAgent(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + assert.Equal(t, expectedStatus, status) - actual.ipPoolStatus, actual.err = handler.DeployAgent(tc.given.ipPool, tc.given.ipPool.Status) - assert.Nil(t, actual.err) + pod, err := handler.podClient.Get(testPodNamespace, testPodName, metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, expectedPod, pod) + }) +} - emptyConditionsTimestamp(tc.expected.ipPoolStatus.Conditions) - emptyConditionsTimestamp(actual.ipPoolStatus.Conditions) - assert.Equal(t, tc.expected.ipPoolStatus, actual.ipPoolStatus, tc.name) +func TestHandler_BuildCache(t *testing.T) { + t.Run("new ippool", func(t *testing.T) { + givenIPAllocator := newTestIPAllocatorBuilder(). + Build() + givenCacheAllocator := newTestCacheAllocatorBuilder(). + Build() + givenIPPool := newTestIPPoolBuilder(). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + Build() - actual.pod, actual.err = handler.podClient.Get("default", "default-ippool-1-agent", metav1.GetOptions{}) - assert.Nil(t, actual.err) - assert.Equal(t, tc.expected.pod, actual.pod) - } + expectedIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Build() + expectedCacheAllocator := newTestCacheAllocatorBuilder(). + MACSet(testNetworkName). + Build() + + handler := Handler{ + cacheAllocator: givenCacheAllocator, + ipAllocator: givenIPAllocator, + } + + _, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + + assert.Equal(t, expectedIPAllocator, handler.ipAllocator) + assert.Equal(t, expectedCacheAllocator, handler.cacheAllocator) + }) + + t.Run("ippool paused", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + Paused(). + Build() + + handler := Handler{} + + _, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("ippool %s was administratively disabled", testIPPoolNamespace+"/"+testIPPoolName), err.Error()) + }) + + t.Run("cache is already ready", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder(). + CacheReadyCondition(corev1.ConditionTrue, "", ""). + Build() + + expectedStatus := newTestIPPoolStatusBuilder(). + CacheReadyCondition(corev1.ConditionTrue, "", ""). + Build() + + handler := Handler{} + + status, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + assert.Equal(t, expectedStatus, status) + }) + + t.Run("ippool with excluded ips", func(t *testing.T) { + givenIPAllocator := newTestIPAllocatorBuilder(). + Build() + givenCacheAllocator := newTestCacheAllocatorBuilder(). + Build() + givenIPPool := newTestIPPoolBuilder(). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + Exclude(testExcludedIP1, testExcludedIP2). + NetworkName(testNetworkName). + Build() + + expectedIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Revoke(testNetworkName, testExcludedIP1, testExcludedIP2). + Build() + expectedCacheAllocator := newTestCacheAllocatorBuilder(). + MACSet(testNetworkName). + Build() + + handler := Handler{ + cacheAllocator: givenCacheAllocator, + ipAllocator: givenIPAllocator, + } + + _, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + + assert.Equal(t, expectedIPAllocator, handler.ipAllocator) + assert.Equal(t, expectedCacheAllocator, handler.cacheAllocator) + }) + + t.Run("rebuild caches", func(t *testing.T) { + givenIPAllocator := newTestIPAllocatorBuilder(). + Build() + givenCacheAllocator := newTestCacheAllocatorBuilder(). + Build() + givenIPPool := newTestIPPoolBuilder(). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + Exclude(testExcludedIP1, testExcludedIP2). + NetworkName(testNetworkName). + Allocated(testAllocatedIP1, testMAC1). + Allocated(testAllocatedIP2, testMAC2). + Build() + + expectedIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Revoke(testNetworkName, testExcludedIP1, testExcludedIP2). + Allocate(testNetworkName, testAllocatedIP1, testAllocatedIP2). + Build() + expectedCacheAllocator := newTestCacheAllocatorBuilder(). + MACSet(testNetworkName). + Add(testNetworkName, testMAC1, testAllocatedIP1). + Add(testNetworkName, testMAC2, testAllocatedIP2). + Build() + + handler := Handler{ + cacheAllocator: givenCacheAllocator, + ipAllocator: givenIPAllocator, + } + + _, err := handler.BuildCache(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + + assert.Equal(t, expectedIPAllocator, handler.ipAllocator) + assert.Equal(t, expectedCacheAllocator, handler.cacheAllocator) + }) +} + +func TestHandler_MonitorAgent(t *testing.T) { + t.Run("agent pod not found", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().AgentPodRef(testPodNamespace, testPodName).Build() + givenPod := newPodBuilder("default", "nginx").Build() + + k8sclientset := k8sfake.NewSimpleClientset() + + err := k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + handler := Handler{ + podCache: fakeclient.PodCache(k8sclientset.CoreV1().Pods), + } + + _, err = handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("pods \"%s\" not found", testPodName), err.Error()) + }) + + t.Run("agent pod unready", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().AgentPodRef(testPodNamespace, testPodName).Build() + givenPod := newTestPodBuilder().Build() + + k8sclientset := k8sfake.NewSimpleClientset() + + err := k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + handler := Handler{ + podCache: fakeclient.PodCache(k8sclientset.CoreV1().Pods), + } + + _, err = handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("agent for ippool %s is not ready", testPodNamespace+"/"+testPodName), err.Error()) + }) + + t.Run("agent pod ready", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().AgentPodRef(testPodNamespace, testPodName).Build() + givenPod := newTestPodBuilder().PodReady(corev1.ConditionTrue).Build() + + k8sclientset := k8sfake.NewSimpleClientset() + + err := k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + + handler := Handler{ + podCache: fakeclient.PodCache(k8sclientset.CoreV1().Pods), + } + + _, err = handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + }) + + t.Run("ippool paused", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().Paused().Build() + + handler := Handler{} + + _, err := handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("ippool %s was administratively disabled", testIPPoolNamespace+"/"+testIPPoolName), err.Error()) + }) + + t.Run("ippool in no-agent mode", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().Build() + + handler := Handler{ + noAgent: true, + } + + _, err := handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Nil(t, err) + }) + + t.Run("agentpodref not set", func(t *testing.T) { + givenIPPool := newTestIPPoolBuilder().Build() + + handler := Handler{} + + _, err := handler.MonitorAgent(givenIPPool, givenIPPool.Status) + assert.Equal(t, fmt.Sprintf("agent for ippool %s is not deployed", testIPPoolNamespace+"/"+testIPPoolName), err.Error()) + }) } -func emptyConditionsTimestamp(conditions []genericcondition.GenericCondition) { - for i := range conditions { - conditions[i].LastTransitionTime = "" - conditions[i].LastUpdateTime = "" +func sanitizeStatus(status *networkv1.IPPoolStatus) { + now := time.Time{} + status.LastUpdate = metav1.NewTime(now) + for i := range status.Conditions { + status.Conditions[i].LastTransitionTime = "" + status.Conditions[i].LastUpdateTime = "" } }