Skip to content

Commit

Permalink
[fix] getExistingSharedIPs should get Public IPs from the holder
Browse files Browse the repository at this point in the history
  • Loading branch information
dthorsen committed Sep 20, 2024
1 parent 708ee98 commit 0568585
Show file tree
Hide file tree
Showing 2 changed files with 76 additions and 5 deletions.
4 changes: 2 additions & 2 deletions cloud/linode/cilium_loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ func (l *loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *lino
if err != nil {
return nil, err
}
addrs := make([]string, 0, len(ipHolderAddrs.IPv4.Shared))
for _, addr := range ipHolderAddrs.IPv4.Shared {
addrs := make([]string, 0, len(ipHolderAddrs.IPv4.Public))
for _, addr := range ipHolderAddrs.IPv4.Public {
addrs = append(addrs, addr.Address)
}
return addrs, nil
Expand Down
77 changes: 74 additions & 3 deletions cloud/linode/cilium_loadbalancers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,17 @@ var (
},
},
}
additionalNodes = []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-5",
Labels: map[string]string{"cilium-bgp-peering": "true"},
},
Spec: v1.NodeSpec{
ProviderID: fmt.Sprintf("%s%d", providerIDPrefix, 55555),
},
},
}
publicIPv4 = net.ParseIP("45.76.101.25")
ipHolderInstance = linodego.Instance{
ID: 12345,
Expand Down Expand Up @@ -93,6 +104,10 @@ func TestCiliumCCMLoadBalancers(t *testing.T) {
name: "Delete Cilium Load Balancer",
f: testEnsureCiliumLoadBalancerDeleted,
},
{
name: "Add node to existing Cilium Load Balancer",
f: testCiliumUpdateLoadBalancerAddNode,
},
}
for _, tc := range testCases {
ctrl := gomock.NewController(t)
Expand Down Expand Up @@ -165,7 +180,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) {
mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&ipHolderInstance, nil)
mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{
IPv4: &linodego.InstanceIPv4Response{
Shared: []*linodego.InstanceIP{{Address: dummySharedIP}},
Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}},
},
}, nil)
mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil)
Expand Down Expand Up @@ -226,7 +241,7 @@ func testCreateWithExistingIPHolder(t *testing.T, mc *mocks.MockClient) {
mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil)
mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{
IPv4: &linodego.InstanceIPv4Response{
Shared: []*linodego.InstanceIP{{Address: dummySharedIP}},
Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}},
},
}, nil)
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
Expand Down Expand Up @@ -264,7 +279,7 @@ func testCreateWithNoExistingIPHolder(t *testing.T, mc *mocks.MockClient) {
mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&ipHolderInstance, nil)
mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{
IPv4: &linodego.InstanceIPv4Response{
Shared: []*linodego.InstanceIP{{Address: dummySharedIP}},
Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}},
},
}, nil)
mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil)
Expand Down Expand Up @@ -311,3 +326,59 @@ func testEnsureCiliumLoadBalancerDeleted(t *testing.T, mc *mocks.MockClient) {
t.Fatalf("expected a nil error, got %v", err)
}
}

func testCiliumUpdateLoadBalancerAddNode(t *testing.T, mc *mocks.MockClient) {
Options.BGPNodeSelector = "cilium-bgp-peering=true"
svc := createTestService()

kubeClient, _ := k8sClient.NewFakeClientset()
ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake}
addService(t, kubeClient, svc)
addNodes(t, kubeClient, nodes)
lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType}

filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)}
rawFilter, _ := json.Marshal(filter)
mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{ipHolderInstance}, nil)
dummySharedIP := "45.76.101.26"
mc.EXPECT().AddInstanceIPAddress(gomock.Any(), ipHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil)
mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{
IPv4: &linodego.InstanceIPv4Response{
Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}},
},
}, nil)
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
IPs: []string{dummySharedIP},
LinodeID: 11111,
}).Times(1)
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
IPs: []string{dummySharedIP},
LinodeID: 22222,
}).Times(1)

lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes)
if err != nil {
t.Fatalf("expected a nil error, got %v", err)
}
if lbStatus == nil {
t.Fatal("expected non-nil lbStatus")
}

// Now add another node to the cluster and assert that it gets the shared IP
mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{ipHolderInstance}, nil)
mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), ipHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{
IPv4: &linodego.InstanceIPv4Response{
Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}},
},
}, nil)
mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{
IPs: []string{dummySharedIP},
LinodeID: 55555,
}).Times(1)
addNodes(t, kubeClient, additionalNodes)

err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, additionalNodes)
if err != nil {
t.Fatalf("expected a nil error, got %v", err)
}
}

0 comments on commit 0568585

Please sign in to comment.