diff --git a/cmd/lbManager.go b/cmd/lbManager.go new file mode 100644 index 0000000..17810f6 --- /dev/null +++ b/cmd/lbManager.go @@ -0,0 +1,418 @@ +/* +Copyright © 2023 NAME HERE +*/ +package cmd + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/klog/v2" + + "github.com/meln5674/kink/pkg/kubectl" +) + +var ( + guestKubeConfig kubectl.KubeFlags + lbSvcLeaderElectionIdentity string + lbSvcLeaderElectionLease time.Duration + lbSvcLeaderElectionRenew time.Duration + lbSvcLeaderElectionRetry time.Duration + + lbServiceCreated bool +) + +// lbManagerCmd represents the lbManager command +var lbManagerCmd = &cobra.Command{ + Use: "lb-manager", + Short: "Watch a guest cluster for NodePort and LoadBalancer services", + Long: `While running, NodePort and LoadBalancer services in the guest cluster will +manifest as extra ports on a dynamically managed service within the host cluster. LoadBalancer +type services will also have their ingress IPs set to this service IP. + `, + Run: func(cmd *cobra.Command, args []string) { + err := func() error { + var err error + ctx, stop := context.WithCancel(context.TODO()) + defer stop() + + hostConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ + ExplicitPath: config.Kubernetes.Kubeconfig, + }, + &config.Kubernetes.ConfigOverrides, + ).ClientConfig() + if err != nil { + return err + } + + guestConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ + ExplicitPath: guestKubeConfig.Kubeconfig, + }, + &guestKubeConfig.ConfigOverrides, + ).ClientConfig() + if err != nil { + return err + } + + hostClient, err := kubernetes.NewForConfig(hostConfig) + if err != nil { + return err + } + + guestClient, err := kubernetes.NewForConfig(guestConfig) + if err != nil { + return err + } + + guestInformer := informers.NewSharedInformerFactory(guestClient, 5*time.Minute) + + handler := ServiceEventHandler{ + Host: hostClient.CoreV1(), + Guest: guestClient.CoreV1(), + Ctx: ctx, + Target: corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: releaseConfig.LoadBalancerFullname, + Namespace: releaseNamespace, + Labels: releaseConfig.LoadBalancerLabels, + Annotations: releaseConfig.LoadBalancerAnnotations, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, // TODO: Should this be configurable? + Ports: make([]corev1.ServicePort, 0), + Selector: releaseConfig.WorkerSelectorLabels, + }, + }, + NodePorts: make(map[int32]corev1.ServicePort), + } + + leaderChan := make(chan struct{}) + leaderLock, err := resourcelock.NewFromKubeconfig( + resourcelock.LeasesResourceLock, + releaseNamespace, + releaseConfig.LBManagerFullname, + resourcelock.ResourceLockConfig{Identity: lbSvcLeaderElectionIdentity}, + hostConfig, + lbSvcLeaderElectionRenew, + ) + if err != nil { + return err + } + elector, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: leaderLock, + Name: handler.Target.Name, + LeaseDuration: lbSvcLeaderElectionLease, + RenewDeadline: lbSvcLeaderElectionRenew, + RetryPeriod: lbSvcLeaderElectionRetry, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(context.Context) { + klog.Info("Became Leader") + leaderChan <- struct{}{} + }, + OnStoppedLeading: func() { + klog.Info("No longer the leader") + stop() + }, + }, + ReleaseOnCancel: true, + }) + if err != nil { + return err + } + go elector.Run(ctx) + + _ = <-leaderChan + + err = handler.InitNodePorts() + if err != nil { + return err + } + err = handler.CreateOrUpdateHostLB() + if err != nil { + return err + } + + guestInformer.Core().V1().Services().Informer().AddEventHandler(&handler) + klog.Info("Starting guest cluster service watch") + guestInformer.Start(ctx.Done()) + + sigChan := make(chan os.Signal, 2) + + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + _ = <-sigChan + + klog.Info("Exiting") + return nil + }() + + if err != nil { + klog.Fatal(err) + } + }, +} + +func init() { + rootCmd.AddCommand(lbManagerCmd) + lbManagerCmd.PersistentFlags().StringVar(&guestKubeConfig.Kubeconfig, "guest-kubeconfig", "", "Path to the kubeconfig file to use for accessing the guest cluster") + lbManagerCmd.PersistentFlags().StringVar(&lbSvcLeaderElectionIdentity, "leader-election-id", "", "Identity for leader election") + lbManagerCmd.PersistentFlags().DurationVar(&lbSvcLeaderElectionLease, "leader-election-lease", 15*time.Second, "Lease duration for leader election") + lbManagerCmd.PersistentFlags().DurationVar(&lbSvcLeaderElectionRenew, "leader-election-renew", 10*time.Second, "Renewal deadline for leader election") + lbManagerCmd.PersistentFlags().DurationVar(&lbSvcLeaderElectionRetry, "leader-election-retry", 2*time.Second, "Retry period for leader election") + // If we don't do this, the short names overlap with the host k8s flags + guestFlags := clientcmd.RecommendedConfigOverrideFlags("guest-") + guestFlagPtrs := []*clientcmd.FlagInfo{ + &guestFlags.AuthOverrideFlags.ClientCertificate, + &guestFlags.AuthOverrideFlags.ClientKey, + &guestFlags.AuthOverrideFlags.Token, + &guestFlags.AuthOverrideFlags.Impersonate, + &guestFlags.AuthOverrideFlags.ImpersonateUID, + &guestFlags.AuthOverrideFlags.ImpersonateGroups, + &guestFlags.AuthOverrideFlags.Username, + &guestFlags.AuthOverrideFlags.Password, + &guestFlags.ClusterOverrideFlags.APIServer, + &guestFlags.ClusterOverrideFlags.APIVersion, + &guestFlags.ClusterOverrideFlags.CertificateAuthority, + &guestFlags.ClusterOverrideFlags.InsecureSkipTLSVerify, + &guestFlags.ClusterOverrideFlags.TLSServerName, + &guestFlags.ClusterOverrideFlags.ProxyURL, + //&guestFlags.ClusterOverrideFlags.DisableCompression, + &guestFlags.ContextOverrideFlags.ClusterName, + &guestFlags.ContextOverrideFlags.AuthInfoName, + &guestFlags.ContextOverrideFlags.Namespace, + &guestFlags.CurrentContext, + &guestFlags.Timeout, + } + + for _, ptr := range guestFlagPtrs { + ptr.ShortName = "" + } + + clientcmd.BindOverrideFlags(&guestKubeConfig.ConfigOverrides, lbManagerCmd.PersistentFlags(), guestFlags) +} + +func objString(i interface{}) string { + obj, ok := i.(metav1.Object) + if !ok { + return fmt.Sprintf("%#v", i) + } + typ, ok := i.(metav1.Type) + if !ok { + return fmt.Sprintf("%#v", i) + } + return fmt.Sprintf("%s/%s/%s/%s", typ.GetAPIVersion(), typ.GetKind(), obj.GetNamespace(), obj.GetName()) +} + +type ServiceEventHandler struct { + Host corev1client.ServicesGetter + Guest corev1client.ServicesGetter + Ctx context.Context + Target corev1.Service + NodePorts map[int32]corev1.ServicePort +} + +func (s *ServiceEventHandler) SetPorts() { + ports := make([]corev1.ServicePort, 0, len(s.NodePorts)) + for _, port := range s.NodePorts { + ports = append(ports, port) + } + if len(ports) == 0 { + ports = []corev1.ServicePort{ + { + Name: "tmp", + Port: 1, + TargetPort: intstr.FromInt(1), + }, + } + } + s.Target.Spec.Ports = ports +} + +func (s *ServiceEventHandler) AddPortsFor(svc *corev1.Service) { + for _, port := range svc.Spec.Ports { + s.NodePorts[port.NodePort] = ConvertPort(&port) + } +} + +func (s *ServiceEventHandler) RemovePortsFor(svc *corev1.Service) { + if svc.Spec.Type == corev1.ServiceTypeNodePort || svc.Spec.Type == corev1.ServiceTypeLoadBalancer { + for _, port := range svc.Spec.Ports { + delete(s.NodePorts, port.NodePort) + } + } +} + +func (s *ServiceEventHandler) InitNodePorts() error { + klog.Info("Fetching initial state of LB service") + svc, err := s.Host.Services(s.Target.Namespace).Get(s.Ctx, s.Target.Name, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + s.Target = *svc + for _, port := range s.Target.Spec.Ports { + s.NodePorts[int32(port.TargetPort.IntValue())] = port + } + return nil +} + +func (s *ServiceEventHandler) CreateOrUpdateHostLB() error { + klog.Infof("Generated Service: %#v", s.Target) + + svc, err := s.Host.Services(s.Target.Namespace).Get(s.Ctx, s.Target.Name, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + s.SetPorts() + klog.Infof("Generated Service: %#v", s.Target) + svc, err := s.Host.Services(s.Target.Namespace).Create(s.Ctx, &s.Target, metav1.CreateOptions{}) + if err != nil { + return err + } + s.Target = *svc + return nil + } + if err != nil { + return err + } + + s.SetPorts() + klog.Infof("Generated Service: %#v", s.Target) + svc, err = s.Host.Services(s.Target.Namespace).Update(s.Ctx, &s.Target, metav1.UpdateOptions{}) + if err != nil { + return err + } + s.Target = *svc + return nil +} + +func (s *ServiceEventHandler) SetLBIngress(svc *corev1.Service) error { + klog.Infof("Setting LoadBalancer IP for %s/%s", svc.Namespace, svc.Name) + svc.Status.LoadBalancer.Ingress = []corev1.LoadBalancerIngress{ + { + IP: s.Target.Spec.ClusterIP, + }, + } + newSvc, err := s.Guest.Services(svc.Namespace).UpdateStatus(s.Ctx, svc, metav1.UpdateOptions{}) + if err != nil { + return err + } + *svc = *newSvc + return nil +} + +func (s *ServiceEventHandler) OnAdd(obj interface{}) { + svc, ok := obj.(*corev1.Service) + if !ok { + klog.Warning("Got unexpected guest resource %s", objString(obj)) + return + } + + if !(svc.Spec.Type == corev1.ServiceTypeNodePort || svc.Spec.Type == corev1.ServiceTypeLoadBalancer) { + return + } + klog.Info("Got new guest service %s", objString(obj)) + + s.AddPortsFor(svc) + + err := s.CreateOrUpdateHostLB() + if err != nil { + klog.Error(err) + return + } + + if svc.Spec.Type != corev1.ServiceTypeLoadBalancer { + klog.Info("Ignoring %s guest service", svc.Spec.Type) + return + } + + err = s.SetLBIngress(svc) + if err != nil { + klog.Error(err) + return + } +} + +func ConvertPort(port *corev1.ServicePort) corev1.ServicePort { + return corev1.ServicePort{ + Name: fmt.Sprintf("%d", port.NodePort), + Protocol: port.Protocol, + AppProtocol: port.AppProtocol, + Port: port.NodePort, + TargetPort: intstr.FromInt(int(port.NodePort)), + } +} + +func (s *ServiceEventHandler) OnUpdate(oldObj, newObj interface{}) { + oldSvc, ok := oldObj.(*corev1.Service) + if !ok { + klog.Warning("Got unexpected guest resource %s", objString(oldObj)) + return + } + newSvc, ok := newObj.(*corev1.Service) + if !ok { + klog.Warning("Got unexpected guest resource %s", objString(newObj)) + return + } + + klog.Info("Got updated guest service %s", objString(newObj)) + + if oldSvc.Spec.Type == corev1.ServiceTypeNodePort || oldSvc.Spec.Type == corev1.ServiceTypeLoadBalancer { + s.RemovePortsFor(oldSvc) + } + + if !(newSvc.Spec.Type == corev1.ServiceTypeNodePort || newSvc.Spec.Type == corev1.ServiceTypeLoadBalancer) { + return + } + + s.AddPortsFor(newSvc) + + err := s.CreateOrUpdateHostLB() + if err != nil { + klog.Error(err) + return + } + + if newSvc.Spec.Type != corev1.ServiceTypeLoadBalancer { + klog.Info("Ignoring/removing ports for %s service", newSvc.Spec.Type) + return + } + + err = s.SetLBIngress(newSvc) + if err != nil { + klog.Error(err) + } +} + +func (s *ServiceEventHandler) OnDelete(obj interface{}) { + svc, ok := obj.(*corev1.Service) + if !ok { + klog.Warning("Got unexpected guest resource %s", objString(obj)) + return + } + + s.RemovePortsFor(svc) + + err := s.CreateOrUpdateHostLB() + if err != nil { + klog.Error(err) + return + } +} diff --git a/e2e/e2e_suite_test.go b/e2e/e2e_suite_test.go index 0a7255b..a5c48f4 100644 --- a/e2e/e2e_suite_test.go +++ b/e2e/e2e_suite_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "crypto/tls" "flag" "fmt" "io/ioutil" @@ -9,11 +10,11 @@ import ( "path/filepath" "strings" "testing" - "time" . "github.com/onsi/ginkgo/v2" gtypes "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "github.com/google/go-containerregistry/pkg/crane" @@ -64,6 +65,10 @@ var ( sharedLocalPathProvisionerStorage = filepath.Join(localPathProvisionerStorageRoot, sharedLocalPathProvisionerStorageRel) sharedLocalPathProvisionerMount = "/var/shared-local-path-provisioner" + ingressNginxChartRepo = "https://kubernetes.github.io/ingress-nginx" + ingressNginxChartName = "ingress-nginx" + ingressNginxChartVersion = "4.4.2" + wordpressChartRepo = "https://charts.bitnami.com/bitnami" wordpressChartName = "wordpress" wordpressChartVersion = "15.2.7" @@ -124,14 +129,24 @@ func DeferExpectStop(cmd gosh.Commander) { func BuildImage() { imageRepo = "local.host/meln5674/kink" - imageTag = fmt.Sprintf("%d", time.Now().Unix()) + imageTag = "it" //fmt.Sprintf("%d", time.Now().Unix()) builtImage = fmt.Sprintf("%s:%s", imageRepo, imageTag) ExpectRun( - gosh. - Command(docker.Build(&dockerOpts, builtImage, "..")...). - WithParentEnvAnd(map[string]string{"DOCKER_BUILDKIT": "1"}). - WithStreams(GinkgoOutErr), + /* + gosh. + Command(docker.Build(&dockerOpts, builtImage, "..")...). + WithParentEnvAnd(map[string]string{"DOCKER_BUILDKIT": "1"}). + WithStreams(GinkgoOutErr), + */ + gosh.And( + gosh. + Command("../build-env.sh", "make", "-C", "..", "bin/kink"). + WithParentEnvAnd(map[string]string{"IMAGE_TAG": imageTag}), + gosh. + Command(docker.Build(&dockerOpts, builtImage, "..", "-f", "../standalone.Dockerfile")...). + WithParentEnvAnd(map[string]string{"DOCKER_BUILDKIT": "1"}), + ).WithStreams(GinkgoOutErr), ) } @@ -183,7 +198,7 @@ func InitKindCluster() { pwd, err := os.Getwd() Expect(err).ToNot(HaveOccurred()) kindConfig, err := ioutil.ReadFile(kindConfigPath + ".tpl") - kindConfig = []byte(strings.ReplaceAll(string(kindConfig), "${PWD}", pwd)) + kindConfig = []byte(strings.ReplaceAll(string(kindConfig), "${PWD}", filepath.Join(pwd, ".."))) ioutil.WriteFile(kindConfigPath, kindConfig, 0700) ExpectRun(kindOpts.CreateCluster(kindConfigPath, kindKubeconfigPath)) @@ -239,7 +254,7 @@ type KinkFlags struct { ClusterName string } -func (k *KinkFlags) Kink(ku *kubectl.KubeFlags, args ...string) *gosh.Cmd { +func (k *KinkFlags) Kink(ku *kubectl.KubeFlags, chart *helm.ChartFlags, release *helm.ReleaseFlags, args ...string) *gosh.Cmd { cmd := make([]string, 0, len(k.Command)+len(args)) cmd = append(cmd, k.Command...) cmd = append(cmd, flags.AsFlags(ku.Flags())...) @@ -249,91 +264,119 @@ func (k *KinkFlags) Kink(ku *kubectl.KubeFlags, args ...string) *gosh.Cmd { if k.ClusterName != "" { cmd = append(cmd, "--name", k.ClusterName) } - cmd = append(cmd, args...) - return gosh.Command(cmd...).UsingProcessGroup() -} - -func (k *KinkFlags) CreateCluster(ku *kubectl.KubeFlags, targetKubeconfigPath string, chart *helm.ChartFlags, release *helm.ReleaseFlags) *gosh.Cmd { - args := []string{"create", "cluster"} if chart.ChartName != "" { - args = append(args, "--chart", chart.ChartName) + cmd = append(cmd, "--chart", chart.ChartName) } if chart.RepositoryURL != "" { - args = append(args, "--repository-url", chart.RepositoryURL) + cmd = append(cmd, "--repository-url", chart.RepositoryURL) } if chart.Version != "" { - args = append(args, "--chart-version", chart.Version) + cmd = append(cmd, "--chart-version", chart.Version) } + cmd = append(cmd, release.ValuesFlags()...) + cmd = append(cmd, args...) + return gosh.Command(cmd...).UsingProcessGroup() +} + +func (k *KinkFlags) CreateCluster(ku *kubectl.KubeFlags, targetKubeconfigPath string, chart *helm.ChartFlags, release *helm.ReleaseFlags) *gosh.Cmd { + args := []string{"create", "cluster"} if targetKubeconfigPath != "" { args = append(args, "--out-kubeconfig", targetKubeconfigPath) } - args = append(args, release.ValuesFlags()...) - return k.Kink(ku, args...) + return k.Kink(ku, chart, release, args...) } -func (k *KinkFlags) DeleteCluster(ku *kubectl.KubeFlags) *gosh.Cmd { - return k.Kink(ku, "delete", "cluster") +func (k *KinkFlags) DeleteCluster(ku *kubectl.KubeFlags, chart *helm.ChartFlags, release *helm.ReleaseFlags) *gosh.Cmd { + return k.Kink(ku, chart, release, "delete", "cluster") } -func (k *KinkFlags) Shell(ku *kubectl.KubeFlags, script string) *gosh.Cmd { - return k.Kink(ku, "sh", "--", script) +func (k *KinkFlags) Shell(ku *kubectl.KubeFlags, chart *helm.ChartFlags, release *helm.ReleaseFlags, script string) *gosh.Cmd { + return k.Kink(ku, chart, release, "sh", "--", script) } -func (k *KinkFlags) Load(ku *kubectl.KubeFlags, typ string, flags []string, flag string, items ...string) *gosh.Cmd { +func (k *KinkFlags) Load(ku *kubectl.KubeFlags, chart *helm.ChartFlags, release *helm.ReleaseFlags, typ string, flags []string, flag string, items ...string) *gosh.Cmd { args := []string{"load", typ} args = append(args, flags...) for _, item := range items { args = append(args, "--"+flag, item) } - return k.Kink(ku, args...) + return k.Kink(ku, chart, release, args...) +} + +func (k *KinkFlags) LoadDockerImage(ku *kubectl.KubeFlags, chart *helm.ChartFlags, release *helm.ReleaseFlags, flags []string, images ...string) *gosh.Cmd { + return k.Load(ku, chart, release, "docker-image", flags, "image", images...) +} + +func (k *KinkFlags) LoadDockerArchive(ku *kubectl.KubeFlags, chart *helm.ChartFlags, release *helm.ReleaseFlags, flags []string, archives ...string) *gosh.Cmd { + return k.Load(ku, chart, release, "docker-archive", flags, "archive", archives...) +} + +func (k *KinkFlags) LoadOCIArchive(ku *kubectl.KubeFlags, chart *helm.ChartFlags, release *helm.ReleaseFlags, flags []string, archives ...string) *gosh.Cmd { + return k.Load(ku, chart, release, "oci-archive", flags, "archive", archives...) } -func (k *KinkFlags) LoadDockerImage(ku *kubectl.KubeFlags, flags []string, images ...string) *gosh.Cmd { - return k.Load(ku, "docker-image", flags, "image", images...) +func (k *KinkFlags) PortForward(ku *kubectl.KubeFlags, chart *helm.ChartFlags, release *helm.ReleaseFlags) *gosh.Cmd { + return k.Kink(ku, chart, release, "port-forward") } -func (k *KinkFlags) LoadDockerArchive(ku *kubectl.KubeFlags, flags []string, archives ...string) *gosh.Cmd { - return k.Load(ku, "docker-archive", flags, "archive", archives...) +type ExtraChart struct { + Chart helm.ChartFlags + Release helm.ReleaseFlags } -func (k *KinkFlags) LoadOCIArchive(ku *kubectl.KubeFlags, flags []string, archives ...string) *gosh.Cmd { - return k.Load(ku, "oci-archive", flags, "archive", archives...) +type CaseIngressService struct { + Namespace string + Name string + HTTPPortName string + HTTPSPortName string + WordpressHostname string } -func (k *KinkFlags) PortForward(ku *kubectl.KubeFlags) *gosh.Cmd { - return k.Kink(ku, "port-forward") +type Case struct { + Name string + LoadFlags []string + WordpressSet map[string]string + ExtraCharts []ExtraChart + Ingress CaseIngressService } -func Case(name string, loadFlags []string, set map[string]string) bool { - return Describe(name, func() { +func (c Case) Run() bool { + return Describe(c.Name, func() { It("should work", func() { kinkOpts := KinkFlags{ Command: []string{"go", "run", "../main.go"}, - ConfigPath: filepath.Join("../integration-test", "kink."+name+".config.yaml"), - ClusterName: name, + ConfigPath: filepath.Join("../integration-test", "kink."+c.Name+".config.yaml"), + ClusterName: c.Name, } if _, gconfig := GinkgoConfiguration(); gconfig.Verbosity().GTE(gtypes.VerbosityLevelVerbose) { kinkOpts.Command = append(kinkOpts.Command, "-v11") } - kinkKubeconfigPath := filepath.Join("../integration-test", "kink."+name+".kubeconfig") + kinkKubeconfigPath := filepath.Join("../integration-test", "kink."+c.Name+".kubeconfig") + + ExpectRun(gosh.Command(kubectl.Kubectl(&kubectlOpts, &kindKubeOpts, "create", "namespace", c.Name)...).WithStreams(GinkgoOutErr)) + + chart := helm.ChartFlags{ + ChartName: "../helm/kink", + } + release := helm.ReleaseFlags{ + Set: map[string]string{ + "image.repository": imageRepo, + "image.tag": imageTag, + }, + } By("Creating a cluster") ExpectRun(kinkOpts.CreateCluster( &kindKubeOpts, kinkKubeconfigPath, - &helm.ChartFlags{ - ChartName: "../helm/kink", - }, - &helm.ReleaseFlags{ - Set: map[string]string{ - "image.repository": imageRepo, - "image.tag": imageTag, - }, - }, + &chart, + &release, ).WithStreams(GinkgoOutErr)) DeferCleanup(func() { ExpectRun(kinkOpts.DeleteCluster( &kindKubeOpts, + &chart, + &release, ).WithStreams(GinkgoOutErr)) }) @@ -344,6 +387,8 @@ func Case(name string, loadFlags []string, set map[string]string) bool { By("Connecting to the controlplane w/ kubectl within a shell script") ExpectRun(kinkOpts.Shell( &kindKubeOpts, + &chart, + &release, ` set -xe while ! kubectl version ; do @@ -361,23 +406,44 @@ func Case(name string, loadFlags []string, set map[string]string) bool { `, ).WithStreams(GinkgoOutErr)) - wordpressLoadFlags := make([]string, 0, len(loadFlags)+2) - wordpressLoadFlags = append(wordpressLoadFlags, loadFlags...) + wordpressLoadFlags := make([]string, 0, len(c.LoadFlags)+2) + wordpressLoadFlags = append(wordpressLoadFlags, c.LoadFlags...) wordpressLoadFlags = append(wordpressLoadFlags, "--parallel-loads", "1") By("Loading an image from the docker daemon") - ExpectRun(kinkOpts.LoadDockerImage(&kindKubeOpts, wordpressLoadFlags, wordpressImage).WithStreams(GinkgoOutErr)) + ExpectRun(kinkOpts.LoadDockerImage( + &kindKubeOpts, + &chart, + &release, + wordpressLoadFlags, + wordpressImage, + ).WithStreams(GinkgoOutErr)) By("Loading an image from a docker archive") ExpectRun(gosh.Command(docker.Save(&dockerOpts, mariadbImage)...).WithStreams(gosh.FileOut(mariadbTarballPath), GinkgoErr)) - ExpectRun(kinkOpts.LoadDockerArchive(&kindKubeOpts, loadFlags, mariadbTarballPath).WithStreams(GinkgoOutErr)) + ExpectRun(kinkOpts.LoadDockerArchive( + &kindKubeOpts, + &chart, + &release, + c.LoadFlags, + mariadbTarballPath, + ).WithStreams(GinkgoOutErr)) By("Loading an image from an OCI archive") - ExpectRun(kinkOpts.LoadOCIArchive(&kindKubeOpts, loadFlags, memcachedTarballPath).WithStreams(GinkgoOutErr)) + ExpectRun(kinkOpts.LoadOCIArchive( + &kindKubeOpts, + &chart, + &release, + c.LoadFlags, memcachedTarballPath, + ).WithStreams(GinkgoOutErr)) By("Forwarding the controplane port") - controlplanePortForward := kinkOpts.PortForward(&kindKubeOpts).WithStreams(GinkgoOutErr) + controlplanePortForward := kinkOpts.PortForward( + &kindKubeOpts, + &chart, + &release, + ).WithStreams(GinkgoOutErr) ExpectStart(controlplanePortForward) DeferCleanup(func() { ExpectStop(controlplanePortForward) @@ -395,7 +461,15 @@ func Case(name string, loadFlags []string, set map[string]string) bool { ExpectStop(podWatch) }) - By("Releasing a helm chart") + for _, chart := range c.ExtraCharts { + By(fmt.Sprintf("Releasing %s the helm charts", chart.Chart.ChartName)) + + ExpectRun(gosh.Command(helm.RepoAdd(&helmOpts, &chart.Chart)...).WithStreams(GinkgoOutErr)) + ExpectRun(gosh.Command(helm.RepoUpdate(&helmOpts, chart.Chart.RepoName())...).WithStreams(GinkgoOutErr)) + ExpectRun(gosh.Command(helm.Upgrade(&helmOpts, &chart.Chart, &chart.Release, &kinkKubeOpts)...).WithStreams(GinkgoOutErr)) + } + + By("Releasing the main helm chart") wordpressChart := helm.ChartFlags{ RepositoryURL: wordpressChartRepo, @@ -405,7 +479,7 @@ func Case(name string, loadFlags []string, set map[string]string) bool { wordpressRelease := helm.ReleaseFlags{ Name: "wordpress", UpgradeFlags: []string{"--debug"}, - Set: set, + Set: c.WordpressSet, } ExpectRun(gosh.Command(helm.RepoAdd(&helmOpts, &wordpressChart)...).WithStreams(GinkgoOutErr)) ExpectRun(gosh.Command(helm.RepoUpdate(&helmOpts, wordpressChart.RepoName())...).WithStreams(GinkgoOutErr)) @@ -413,13 +487,68 @@ func Case(name string, loadFlags []string, set map[string]string) bool { By("Interacting with the released service") - wordpressPortForward := gosh.Command(kubectl.PortForward(&kubectlOpts, &kinkKubeOpts, "svc/wordpress", map[string]string{"8080": "80"})...).WithStreams(GinkgoOutErr) - ExpectStart(wordpressPortForward) - DeferCleanup(func() { - ExpectStop(wordpressPortForward) - }) + func() { + wordpressPortForward := gosh.Command(kubectl.PortForward(&kubectlOpts, &kinkKubeOpts, "svc/wordpress", map[string]string{"8080": "80"})...).WithStreams(GinkgoOutErr) + ExpectStart(wordpressPortForward) + defer ExpectStop(wordpressPortForward) - Eventually(func() error { _, err := http.Get("http://localhost:8080"); return err }, "10s", "1s").Should(Succeed()) + Eventually(func() error { _, err := http.Get("http://localhost:8080"); return err }, "10s", "1s").Should(Succeed()) + }() + + if c.Ingress.Name == "" { + return + } + + svc := corev1.Service{} + ExpectRun(gosh. + Command(kubectl.Kubectl(&kubectlOpts, &kinkKubeOpts, "get", "service", "--namespace", c.Ingress.Namespace, c.Ingress.Name, "-o", "json")...). + WithStreams( + gosh.ForwardErr, + gosh.FuncOut(gosh.SaveJSON(&svc)), + ), + ) + + httpPort := int32(0) + httpsPort := int32(0) + for _, port := range svc.Spec.Ports { + if port.Name == c.Ingress.HTTPPortName { + httpPort = port.NodePort + } + if port.Name == c.Ingress.HTTPSPortName { + httpsPort = port.NodePort + } + } + func() { + kubeOpts := kindKubeOpts + kubeOpts.ConfigOverrides.Context.Namespace = c.Name + portForward := gosh. + Command(kubectl.PortForward(&kubectlOpts, &kubeOpts, fmt.Sprintf("svc/kink-%s-lb", c.Name), map[string]string{"8080": fmt.Sprintf("%d", httpPort)})...). + WithStreams(GinkgoOutErr) + ExpectStart(portForward) + defer ExpectStop(portForward) + req, err := http.NewRequest("GET", "http://localhost:8080", nil) + Expect(err).ToNot(HaveOccurred()) + req.Header.Set("Host", c.Ingress.WordpressHostname) + + Eventually(func() error { _, err := http.DefaultClient.Do(req); return err }, "10s", "1s").Should(Succeed()) + }() + func() { + kubeOpts := kindKubeOpts + kubeOpts.ConfigOverrides.Context.Namespace = c.Name + portForward := gosh. + Command(kubectl.PortForward(&kubectlOpts, &kubeOpts, fmt.Sprintf("svc/kink-%s-lb", c.Name), map[string]string{"8080": fmt.Sprintf("%d", httpsPort)})...). + WithStreams(GinkgoOutErr) + ExpectStart(portForward) + defer ExpectStop(portForward) + + req, err := http.NewRequest("GET", "https://localhost:8080", nil) + Expect(err).ToNot(HaveOccurred()) + req.Header.Set("Host", c.Ingress.WordpressHostname) + // TODO: Actually set up a cert for this + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + Eventually(func() error { _, err := http.DefaultClient.Do(req); return err }, "10s", "1s").Should(Succeed()) + + }() }) }) @@ -439,61 +568,94 @@ func CleanupPVCDirs() { ) } -var _ = Case("k3s", []string{}, map[string]string{ - "persistence.enabled": "true", - "persistence.storageClass": "shared-local-path", - "persistence.accessModes": "{ReadWriteMany}", - "replicaCount": "2", - "podAntiAffinityPreset": "hard", - "mariadb.enabled": "true", - "memcached.enabled": "true", - "service.type": "ClusterIP", - "ingress.enabled": "true", - "image.pullPolicy": "Never", - "mariadb.image.pullPolicy": "Never", - "memcached.image.pullPolicy": "Never", -}) - -var _ = Case("k3s-single", []string{"--only-load-workers=false"}, map[string]string{ - "persistence.enabled": "true", - "persistence.storageClass": "shared-local-path", - "persistence.accessModes": "{ReadWriteMany}", - "replicaCount": "1", - "mariadb.enabled": "true", - "memcached.enabled": "true", - "service.type": "ClusterIP", - "ingress.enabled": "true", - "image.pullPolicy": "Never", - "mariadb.image.pullPolicy": "Never", - "memcached.image.pullPolicy": "Never", -}) - -var _ = Case("k3s-ha", []string{}, map[string]string{ - "persistence.enabled": "true", - "persistence.storageClass": "shared-local-path", - "persistence.accessModes": "{ReadWriteMany}", - "replicaCount": "2", - "podAntiAffinityPreset": "hard", - "mariadb.enabled": "true", - "memcached.enabled": "true", - "service.type": "ClusterIP", - "ingress.enabled": "true", - "image.pullPolicy": "Never", - "mariadb.image.pullPolicy": "Never", - "memcached.image.pullPolicy": "Never", -}) - -var _ = Case("rke2", []string{}, map[string]string{ - "persistence.enabled": "true", - "persistence.storageClass": "shared-local-path", - "persistence.accessModes": "{ReadWriteMany}", - "replicaCount": "2", - "podAntiAffinityPreset": "hard", - "mariadb.enabled": "true", - "memcached.enabled": "true", - "service.type": "ClusterIP", - "ingress.enabled": "true", - "image.pullPolicy": "Never", - "mariadb.image.pullPolicy": "Never", - "memcached.image.pullPolicy": "Never", -}) +var _ = Case{ + Name: "k3s", + WordpressSet: map[string]string{ + "persistence.enabled": "true", + "persistence.storageClass": "shared-local-path", + "persistence.accessModes": "{ReadWriteMany}", + "replicaCount": "2", + "podAntiAffinityPreset": "hard", + "service.type": "ClusterIP", + "image.pullPolicy": "Never", + "ingress.enabled": "true", + "ingress.hostname": "wordpress.ingress.local", + "mariadb.enabled": "true", + "mariadb.image.pullPolicy": "Never", + "memcached.enabled": "true", + "memcached.image.pullPolicy": "Never", + }, + ExtraCharts: []ExtraChart{ + { + Chart: helm.ChartFlags{ + RepositoryURL: ingressNginxChartRepo, + ChartName: ingressNginxChartName, + Version: ingressNginxChartVersion, + }, + Release: helm.ReleaseFlags{ + Name: "ingress-nginx", + }, + }, + }, + Ingress: CaseIngressService{ + Namespace: "default", + Name: "ingress-nginx-controller", + HTTPPortName: "http", + HTTPSPortName: "https", + WordpressHostname: "wordpress.ingress.local", + }, +}.Run() + +var _ = Case{ + Name: "k3s-single", + LoadFlags: []string{"--only-load-workers=false"}, + WordpressSet: map[string]string{ + "persistence.enabled": "true", + "persistence.storageClass": "shared-local-path", + "persistence.accessModes": "{ReadWriteMany}", + "replicaCount": "1", + "mariadb.enabled": "true", + "memcached.enabled": "true", + "service.type": "ClusterIP", + "ingress.enabled": "true", + "image.pullPolicy": "Never", + "mariadb.image.pullPolicy": "Never", + "memcached.image.pullPolicy": "Never", + }, +}.Run() + +var _ = Case{ + Name: "k3s-ha", + WordpressSet: map[string]string{ + "persistence.enabled": "true", + "persistence.storageClass": "shared-local-path", + "persistence.accessModes": "{ReadWriteMany}", + "replicaCount": "2", + "podAntiAffinityPreset": "hard", + "mariadb.enabled": "true", + "memcached.enabled": "true", + "service.type": "ClusterIP", + "ingress.enabled": "true", + "image.pullPolicy": "Never", + "mariadb.image.pullPolicy": "Never", + "memcached.image.pullPolicy": "Never", + }, +}.Run() + +var _ = Case{ + Name: "rke2", + WordpressSet: map[string]string{ + "persistence.enabled": "true", + "persistence.storageClass": "shared-local-path", + "persistence.accessModes": "{ReadWriteMany}", + "replicaCount": "2", + "podAntiAffinityPreset": "hard", + "mariadb.enabled": "true", + "memcached.enabled": "true", + "service.type": "ClusterIP", + "ingress.enabled": "true", + "image.pullPolicy": "Never", + "mariadb.image.pullPolicy": "Never", + "memcached.image.pullPolicy": "Never", + }, +}.Run() diff --git a/helm/kink/templates/_helpers.tpl b/helm/kink/templates/_helpers.tpl index 1773382..5a442bf 100644 --- a/helm/kink/templates/_helpers.tpl +++ b/helm/kink/templates/_helpers.tpl @@ -31,6 +31,15 @@ If release name contains chart name it will be used as a full name. {{- include "kink.fullname" . }}-worker {{- end }} +{{- define "kink.lb-manager.fullname" -}} +{{- include "kink.fullname" . }}-lb-manager +{{- end }} + +{{- define "kink.load-balancer.fullname" -}} +{{- include "kink.fullname" . }}-lb +{{- end }} + + {{- define "kink.kubeconfig.fullname" -}} {{- include "kink.fullname" . }}-kubeconfig {{- end }} @@ -71,6 +80,22 @@ app.kubernetes.io/component: worker {{- end }} {{- end -}} +{{- define "kink.lb-manager.labels" -}} +{{ include "kink.labels" . }} +app.kubernetes.io/component: lb-manager +{{- with .Values.loadBalancer.extraLabels }} +{{ . | toYaml }} +{{- end }} +{{- end -}} + +{{- define "kink.load-balancer.labels" -}} +{{ include "kink.labels" . }} +app.kubernetes.io/component: load-balancer +{{- with .Values.loadBalancer.service.labels }} +{{ . | toYaml }} +{{- end }} +{{- end -}} + {{- define "kink.kubeconfig.labels" -}} {{ include "kink.labels" . }} app.kubernetes.io/component: kubeconfig @@ -105,6 +130,15 @@ app.kubernetes.io/component: worker {{- end }} {{- end -}} +{{- define "kink.lb-manager.selectorLabels" -}} +{{ include "kink.selectorLabels" . }} +app.kubernetes.io/component: lb-manager +{{- with .Values.loadBalancer.extraLabels }} +{{ . | toYaml }} +{{- end }} +{{- end -}} + + {{/* Create the name of the service account to use */}} @@ -125,6 +159,14 @@ Create the name of the service account to use {{- end }} {{- end }} +{{- define "kink.lb-manager.serviceAccountName" -}} +{{- if .Values.loadBalancer.manager.serviceAccount.create }} +{{- default (include "kink.lb-manager.fullname" .) .Values.loadBalancer.manager.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.loadBalancer.manager.serviceAccount.name }} +{{- end }} +{{- end }} + {{- define "kink.kubeconfig.serviceAccountName" -}} {{- if .Values.kubeconfig.job.serviceAccount.create }} {{- default (include "kink.kubeconfig.fullname" .) .Values.kubeconfig.job.serviceAccount.name }} diff --git a/helm/kink/templates/configmap.yaml b/helm/kink/templates/configmap.yaml index 064f1d8..034a116 100644 --- a/helm/kink/templates/configmap.yaml +++ b/helm/kink/templates/configmap.yaml @@ -2,12 +2,16 @@ fullname: {{ include "kink.fullname" . }} controlplane.fullname: {{ include "kink.controlplane.fullname" . }} controlplane.port: '{{ .Values.controlplane.service.api.port }}' +load-balancer.fullname: {{ include "kink.load-balancer.fullname" . }} +lb-manager.fullname: {{ include "kink.lb-manager.fullname" . }} labels: '{{ include "kink.labels" . | fromYaml | toJson }}' selectorLabels: '{{ include "kink.selectorLabels" . | fromYaml | toJson }}' controlplane.labels: '{{ include "kink.controlplane.labels" . | fromYaml | toJson }}' controlplane.selectorLabels: '{{ include "kink.controlplane.selectorLabels" . | fromYaml | toJson }}' worker.labels: '{{ include "kink.worker.labels" . | fromYaml | toJson }}' worker.selectorLabels: '{{ include "kink.worker.selectorLabels" . | fromYaml | toJson }}' +load-balancer.labels: '{{ include "kink.load-balancer.labels" . | fromYaml | toJson }}' +load-balancer.annotations: '{{ .Values.loadBalancer.service.annotations | toJson }}' rke2.enabled: '{{ .Values.rke2.enabled }}' {{- end -}} diff --git a/helm/kink/templates/load-balancer/manager-deploy.yaml b/helm/kink/templates/load-balancer/manager-deploy.yaml new file mode 100644 index 0000000..e0f3f24 --- /dev/null +++ b/helm/kink/templates/load-balancer/manager-deploy.yaml @@ -0,0 +1,94 @@ +{{- if .Values.loadBalancer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kink.lb-manager.fullname" . }} + labels: + {{- include "kink.load-balancer.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.loadBalancer.manager.replicaCount }} + selector: + matchLabels: + {{- include "kink.lb-manager.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.loadBalancer.manager.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kink.lb-manager.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kink.lb-manager.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.loadBalancer.manager.podSecurityContext | nindent 8 }} + containers: + - name: init + securityContext: + {{- toYaml .Values.loadBalancer.manager.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- with .Values.extraEnv }} + {{- . | toYaml | nindent 10 }} + {{- end }} + {{- with .Values.loadBalancer.manager.extraEnv }} + {{- . | toYaml | nindent 10 }} + {{- end }} + command: + - kink + - lb-manager + args: + - --release-config-mount=/etc/kink/release + - --namespace={{ .Release.Namespace }} + - --leader-election-id=$(POD_NAME) + - --guest-kubeconfig=/etc/kink/kubeconfig + resources: + {{- toYaml .Values.loadBalancer.manager.resources | nindent 12 }} + volumeMounts: + - name: release + mountPath: /etc/kink/release + - name: kubeconfig + mountPath: /etc/kink/kubeconfig + subPath: config + {{- with .Values.extraVolumeMounts }} + {{- . | toYaml | nindent 10 }} + {{- end }} + {{- with .Values.loadBalancer.manager.extraVolumeMounts }} + {{- . | toYaml | nindent 10 }} + {{- end }} + + {{- with .Values.loadBalancer.manager.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.loadBalancer.manager.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.loadBalancer.manager.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: release + configMap: + name: {{ include "kink.fullname" . }} + - name: kubeconfig + secret: + secretName: {{ include "kink.kubeconfig.fullname" . }} + {{- with .Values.extraVolumes }} + {{- . | toYaml | nindent 6 }} + {{- end }} + {{- with .Values.worker.extraVolumes }} + {{- . | toYaml | nindent 6 }} + {{- end }} +{{- end }} diff --git a/helm/kink/templates/load-balancer/rbac.yaml b/helm/kink/templates/load-balancer/rbac.yaml new file mode 100644 index 0000000..c413c8a --- /dev/null +++ b/helm/kink/templates/load-balancer/rbac.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.loadBalancer.enabled .Values.loadBalancer.manager.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kink.lb-manager.fullname" . }} + labels: + {{ include "kink.lb-manager.labels" . | nindent 4 }} +rules: +- apiGroups: [''] + resources: ['services'] + verbs: [get,watch,update,patch,delete] + resourceNames: ['{{ include "kink.load-balancer.fullname" . }}'] +- apiGroups: [''] + resources: ['services'] + verbs: [create] +- apiGroups: [coordination.k8s.io] + resources: ['leases'] + verbs: ['*'] + resourceNames: ['{{ include "kink.lb-manager.fullname" . }}'] +- apiGroups: [coordination.k8s.io] + resources: ['leases'] + verbs: ['create'] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kink.lb-manager.fullname" . }} + labels: + {{ include "kink.lb-manager.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kink.lb-manager.fullname" . }} +subjects: +- apiGroup: "" + kind: ServiceAccount + name: {{ include "kink.lb-manager.serviceAccountName" . }} + namespace: {{ .Release.Namespce }} +{{- end }} diff --git a/helm/kink/templates/load-balancer/serviceaccount.yaml b/helm/kink/templates/load-balancer/serviceaccount.yaml new file mode 100644 index 0000000..4344cba --- /dev/null +++ b/helm/kink/templates/load-balancer/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.loadBalancer.enabled .Values.loadBalancer.manager.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kink.lb-manager.serviceAccountName" . }} + labels: + {{- include "kink.load-balancer.labels" . | nindent 4 }} + {{- with .Values.loadBalancer.manager.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helm/kink/values.yaml b/helm/kink/values.yaml index 10937b3..c9939ba 100644 --- a/helm/kink/values.yaml +++ b/helm/kink/values.yaml @@ -202,6 +202,65 @@ worker: extraVolumes: [] extraVolumeMounts: [] +# If enabled, an additional deployment will be created which watches the guest cluster +# for NodePort and LoadBalancer type services, and dynamically manages a service on +# the host cluster named {{ fullname }}-lb with the same ports +loadBalancer: + enabled: false + labels: [] + service: + annotations: {} + manager: + replicaCount: 1 + serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + rbac: + # If true, create a role and rolebinding to provide access to the dynamic service + create: true + + extraLabels: {} + + podAnnotations: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + extraEnv: [] + extraArgs: [] + extraVolumes: [] + extraVolumeMounts: [] # If enabled, run a hook job to create a secret containing a kubeconfig usable by mounting within another pod kubeconfig: diff --git a/integration-test/kink.k3s-ha.config.yaml b/integration-test/kink.k3s-ha.config.yaml index 887f6da..6554bd0 100644 --- a/integration-test/kink.k3s-ha.config.yaml +++ b/integration-test/kink.k3s-ha.config.yaml @@ -1,5 +1,8 @@ apiVersion: kink.meln5674.github.com/v0 -Kind: Config +kind: Config +kubernetes: + context: + namespace: k3s-ha release: set: image.pullPolicy: Never @@ -11,3 +14,4 @@ release: worker.persistence.enabled: "true" sharedPersistence.enabled: "true" sharedPersistence.storageClassName: "shared-local-path" + controlplane.extraArgs[0]: '--disable=traefik' diff --git a/integration-test/kink.k3s-single.config.yaml b/integration-test/kink.k3s-single.config.yaml index 86f9976..09996bd 100644 --- a/integration-test/kink.k3s-single.config.yaml +++ b/integration-test/kink.k3s-single.config.yaml @@ -1,5 +1,8 @@ apiVersion: kink.meln5674.github.com/v0 -Kind: Config +kind: Config +kubernetes: + context: + namespace: k3s-single release: set: image.pullPolicy: Never @@ -10,4 +13,4 @@ release: worker.replicaCount: "0" sharedPersistence.enabled: "true" sharedPersistence.storageClassName: "shared-local-path" - + controlplane.extraArgs[0]: '--disable=traefik' diff --git a/integration-test/kink.k3s.config.yaml b/integration-test/kink.k3s.config.yaml index 74a952a..512b10a 100644 --- a/integration-test/kink.k3s.config.yaml +++ b/integration-test/kink.k3s.config.yaml @@ -1,5 +1,8 @@ apiVersion: kink.meln5674.github.com/v0 -Kind: Config +kind: Config +kubernetes: + context: + namespace: k3s release: set: image.pullPolicy: Never @@ -31,6 +34,11 @@ release: workers.extraVolumeMounts[0].mountPath: /tmp/baz extraArgs[0]: -v controlplane.extraArgs[0]: '--debug' + controlplane.extraArgs[1]: '--disable=traefik' + controlplane.extraArgs[2]: '--disable=servicelb' + controlplane.extraArgs[3]: '--service-node-port-range=30000-30001' workers.extraArgs[0]: '--node-label=foo=bar' + loadBalancer.enabled: 'true' + kubeconfig.enabled: 'true' setString: extraArgs[1]: '1' diff --git a/integration-test/kink.rke2.config.yaml b/integration-test/kink.rke2.config.yaml index 56580a7..6d9a0bc 100644 --- a/integration-test/kink.rke2.config.yaml +++ b/integration-test/kink.rke2.config.yaml @@ -1,5 +1,8 @@ apiVersion: kink.meln5674.github.com/v0 -Kind: Config +kind: Config +kubernetes: + context: + namespace: rke2 release: upgradeFlags: ['--timeout=15m'] set: @@ -13,3 +16,5 @@ release: rke2.enabled: "true" sharedPersistence.enabled: "true" sharedPersistence.storageClassName: "shared-local-path" + controlplane.extraArgs[0]: '--disable=rke2-ingress-nginx' + controlplane.extraArgs[1]: '--disable=rke2-metrics-server' diff --git a/integration-test/run-case.sh b/integration-test/run-case.sh index 572c7a1..1df8e8a 100755 --- a/integration-test/run-case.sh +++ b/integration-test/run-case.sh @@ -22,6 +22,7 @@ fi if ! ("${KINK_COMMAND[@]}" get cluster | tee /dev/stderr | grep -w "${KINK_CLUSTER_NAME}") || [ -z "${KINK_IT_NO_KINK_CREATE}" ]; then "${KINK_COMMAND[@]}" create cluster \ + --chart ./helm/kink \ --set image.repository="${IMAGE_REPO}" \ --set image.tag="${IMAGE_TAG}" \ --out-kubeconfig="${KINK_KUBECONFIG}" diff --git a/pkg/config/config.go b/pkg/config/config.go index 93a90ee..96f715a 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -169,12 +169,16 @@ type ReleaseConfig struct { Fullname string `json:"fullname"` ControlplaneFullname string `json:"controlplane.fullname"` ControlplanePort Int `json:"controlplane.port"` + LoadBalancerFullname string `json:"load-balancer.fullname"` + LBManagerFullname string `json:"lb-manager.fullname"` Labels StringMap `json:"labels"` SelectorLabels StringMap `json:"selectorLabels"` ControlplaneLabels StringMap `json:"controlplane.labels"` ControlplaneSelectorLabels StringMap `json:"controlplane.selectorLabels"` WorkerLabels StringMap `json:"worker.labels"` WorkerSelectorLabels StringMap `json:"worker.selectorLabels"` + LoadBalancerLabels StringMap `json:"load-balancer.labels"` + LoadBalancerAnnotations StringMap `json:"load-balancer.annotations"` RKE2Enabled Bool `json:"rke2.enabled"` }