From 68369c861086b3010c9a0a952c547e86a2e476bb Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Tue, 10 Dec 2024 16:59:38 +0800 Subject: [PATCH] Add unit tests Signed-off-by: Jian Qiu --- pkg/registration/hub/importer/importer.go | 66 +++--- .../hub/importer/importer_test.go | 152 +++++++++++++ .../hub/importer/providers/capi/provider.go | 14 +- .../importer/providers/capi/provider_test.go | 201 ++++++++++++++++++ .../hub/importer/providers/interface.go | 41 +++- pkg/registration/hub/importer/renderers.go | 4 +- .../hub/importer/renderers_test.go | 99 +++++++++ 7 files changed, 529 insertions(+), 48 deletions(-) create mode 100644 pkg/registration/hub/importer/importer_test.go create mode 100644 pkg/registration/hub/importer/providers/capi/provider_test.go create mode 100644 pkg/registration/hub/importer/renderers_test.go diff --git a/pkg/registration/hub/importer/importer.go b/pkg/registration/hub/importer/importer.go index 946eb6810..9088cece2 100644 --- a/pkg/registration/hub/importer/importer.go +++ b/pkg/registration/hub/importer/importer.go @@ -8,10 +8,10 @@ import ( "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" appsv1 "k8s.io/api/apps/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/utils/pointer" @@ -37,6 +36,12 @@ import ( cloudproviders "open-cluster-management.io/ocm/pkg/registration/hub/importer/providers" ) +const ( + operatorNamesapce = "open-cluster-management" + bootstrapSA = "cluster-bootstrap" + conditionTypeImported = "Imported" +) + var ( genericScheme = runtime.NewScheme() genericCodecs = serializer.NewCodecFactory(genericScheme) @@ -49,8 +54,6 @@ func init() { utilruntime.Must(operatorv1.Install(genericScheme)) } -const operatorNamesapce = "open-cluster-management" - // KlusterletConfigRenderer renders the config for klusterlet chart. type KlusterletConfigRenderer func( ctx context.Context, config *chart.KlusterletChartConfig) (*chart.KlusterletChartConfig, error) @@ -104,7 +107,7 @@ func (i *Importer) sync(ctx context.Context, syncCtx factory.SyncContext) error } // If the cluster is imported, skip the reconcile - if meta.IsStatusConditionTrue(cluster.Status.Conditions, "Imported") { + if meta.IsStatusConditionTrue(cluster.Status.Conditions, conditionTypeImported) { return nil } @@ -143,10 +146,10 @@ func (i *Importer) reconcile( recorder events.Recorder, provider cloudproviders.Interface, cluster *v1.ManagedCluster) (*v1.ManagedCluster, error) { - config, err := provider.KubeConfig(cluster) + clients, err := provider.Clients(cluster) if err != nil { meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: "Imported", + Type: conditionTypeImported, Status: metav1.ConditionFalse, Reason: "KubeConfigGetFailed", Message: fmt.Sprintf("failed to get kubeconfig. See errors:\n%s", @@ -155,9 +158,9 @@ func (i *Importer) reconcile( return cluster, err } - if config == nil { + if clients == nil { meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: "Imported", + Type: conditionTypeImported, Status: metav1.ConditionFalse, Reason: "KubeConfigNotFound", Message: "Secret for kubeconfig is not found.", @@ -176,7 +179,7 @@ func (i *Importer) reconcile( klusterletChartConfig, err = renderer(ctx, klusterletChartConfig) if err != nil { meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: "Imported", + Type: conditionTypeImported, Status: metav1.ConditionFalse, Reason: "ConfigRendererFailed", Message: fmt.Sprintf("failed to render config. See errors:\n%s", @@ -190,29 +193,8 @@ func (i *Importer) reconcile( return cluster, err } - // build related client - kubeClient, err := kubernetes.NewForConfig(config) - if err != nil { - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: "Imported", - Status: metav1.ConditionFalse, - Reason: "ClientBuildErr", - Message: fmt.Sprintf("failed to import the klusterlet. See errors: %s", - err.Error()), - }) - return cluster, err - } - hubApiExtensionClient, err := apiextensionsclient.NewForConfig(config) - if err != nil { - return cluster, err - } - operatorClient, err := operatorclient.NewForConfig(config) - if err != nil { - return cluster, err - } - - clientHolder := resourceapply.NewKubeClientHolder(kubeClient). - WithAPIExtensionsClient(hubApiExtensionClient) + clientHolder := resourceapply.NewKubeClientHolder(clients.KubeClient). + WithAPIExtensionsClient(clients.APIExtClient).WithDynamicClient(clients.DynamicClient) cache := resourceapply.NewResourceCache() var results []resourceapply.ApplyResult for _, manifest := range rawManifests { @@ -225,11 +207,11 @@ func (i *Importer) reconcile( switch t := requiredObj.(type) { case *appsv1.Deployment: result.Result, result.Changed, result.Error = resourceapply.ApplyDeployment( - ctx, kubeClient.AppsV1(), recorder, t, 0) + ctx, clients.KubeClient.AppsV1(), recorder, t, 0) results = append(results, result) case *operatorv1.Klusterlet: result.Result, result.Changed, result.Error = ApplyKlusterlet( - ctx, operatorClient, t) + ctx, clients.OperatorClient, recorder, t) results = append(results, result) default: tempResults := resourceapply.ApplyDirectly(ctx, clientHolder, recorder, cache, @@ -249,15 +231,15 @@ func (i *Importer) reconcile( } if len(errs) > 0 { meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: "Imported", + Type: conditionTypeImported, Status: metav1.ConditionFalse, Reason: "ImportFailed", Message: fmt.Sprintf("failed to import the klusterlet. See errors:\n%s", - err.Error()), + utilerrors.NewAggregate(errs).Error()), }) } else { meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: "Imported", + Type: conditionTypeImported, Status: metav1.ConditionTrue, Reason: "ImportSucceed", }) @@ -266,11 +248,16 @@ func (i *Importer) reconcile( return cluster, utilerrors.NewAggregate(errs) } -func ApplyKlusterlet(ctx context.Context, client operatorclient.Interface, required *operatorv1.Klusterlet) (*operatorv1.Klusterlet, bool, error) { +func ApplyKlusterlet( + ctx context.Context, + client operatorclient.Interface, + recorder events.Recorder, + required *operatorv1.Klusterlet) (*operatorv1.Klusterlet, bool, error) { existing, err := client.OperatorV1().Klusterlets().Get(ctx, required.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { requiredCopy := required.DeepCopy() actual, err := client.OperatorV1().Klusterlets().Create(ctx, requiredCopy, metav1.CreateOptions{}) + resourcehelper.ReportCreateEvent(recorder, required, err) return actual, true, err } if err != nil { @@ -287,5 +274,6 @@ func ApplyKlusterlet(ctx context.Context, client operatorclient.Interface, requi existingCopy.Spec = required.Spec actual, err := client.OperatorV1().Klusterlets().Update(ctx, existingCopy, metav1.UpdateOptions{}) + resourcehelper.ReportUpdateEvent(recorder, required, err) return actual, true, err } diff --git a/pkg/registration/hub/importer/importer_test.go b/pkg/registration/hub/importer/importer_test.go new file mode 100644 index 000000000..402f2e520 --- /dev/null +++ b/pkg/registration/hub/importer/importer_test.go @@ -0,0 +1,152 @@ +package importer + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/openshift/library-go/pkg/controller/factory" + fakeapiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + fakedynamic "k8s.io/client-go/dynamic/fake" + kubefake "k8s.io/client-go/kubernetes/fake" + clienttesting "k8s.io/client-go/testing" + + fakeclusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned/fake" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" + fakeoperatorclient "open-cluster-management.io/api/client/operator/clientset/versioned/fake" + clusterv1 "open-cluster-management.io/api/cluster/v1" + "open-cluster-management.io/sdk-go/pkg/patcher" + + testingcommon "open-cluster-management.io/ocm/pkg/common/testing" + "open-cluster-management.io/ocm/pkg/registration/hub/importer/providers" + cloudproviders "open-cluster-management.io/ocm/pkg/registration/hub/importer/providers" +) + +func TestSync(t *testing.T) { + cases := []struct { + name string + provider *fakeProvider + key string + cluster *clusterv1.ManagedCluster + validate func(t *testing.T, actions []clienttesting.Action) + }{ + { + name: "import succeed", + provider: &fakeProvider{isOwned: true}, + key: "cluster1", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + validate: func(t *testing.T, actions []clienttesting.Action) { + testingcommon.AssertActions(t, actions, "patch") + patch := actions[0].(clienttesting.PatchAction).GetPatch() + managedCluster := &clusterv1.ManagedCluster{} + err := json.Unmarshal(patch, managedCluster) + if err != nil { + t.Fatal(err) + } + if !meta.IsStatusConditionTrue(managedCluster.Status.Conditions, conditionTypeImported) { + t.Errorf("expected managed cluster to be imported") + } + }, + }, + { + name: "no cluster", + provider: &fakeProvider{isOwned: true}, + key: "cluster1", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + validate: func(t *testing.T, actions []clienttesting.Action) { + testingcommon.AssertNoActions(t, actions) + }, + }, + { + name: "not owned by the provider", + provider: &fakeProvider{isOwned: false}, + key: "cluster1", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + validate: func(t *testing.T, actions []clienttesting.Action) { + testingcommon.AssertNoActions(t, actions) + }, + }, + { + name: "clients for remote cluster is not generated", + provider: &fakeProvider{isOwned: true, noClients: true}, + key: "cluster1", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + validate: func(t *testing.T, actions []clienttesting.Action) { + testingcommon.AssertActions(t, actions, "patch") + patch := actions[0].(clienttesting.PatchAction).GetPatch() + managedCluster := &clusterv1.ManagedCluster{} + err := json.Unmarshal(patch, managedCluster) + if err != nil { + t.Fatal(err) + } + if !meta.IsStatusConditionFalse(managedCluster.Status.Conditions, conditionTypeImported) { + t.Errorf("expected managed cluster to be imported") + } + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + clusterClient := fakeclusterclient.NewSimpleClientset(c.cluster) + clusterInformer := clusterinformers.NewSharedInformerFactory( + clusterClient, 10*time.Minute).Cluster().V1().ManagedClusters() + clusterStore := clusterInformer.Informer().GetStore() + if err := clusterStore.Add(c.cluster); err != nil { + t.Fatal(err) + } + importer := &Importer{ + providers: []cloudproviders.Interface{c.provider}, + clusterClient: clusterClient, + clusterLister: clusterInformer.Lister(), + patcher: patcher.NewPatcher[ + *clusterv1.ManagedCluster, clusterv1.ManagedClusterSpec, clusterv1.ManagedClusterStatus]( + clusterClient.ClusterV1().ManagedClusters()), + } + err := importer.sync(context.TODO(), testingcommon.NewFakeSyncContext(t, c.key)) + if err != nil { + t.Fatal(err) + } + c.validate(t, clusterClient.Actions()) + }) + } +} + +type fakeProvider struct { + isOwned bool + noClients bool + kubeConfigErr error +} + +// KubeConfig is to return the config to connect to the target cluster. +func (f *fakeProvider) Clients(_ *clusterv1.ManagedCluster) (*providers.Clients, error) { + if f.kubeConfigErr != nil { + return nil, f.kubeConfigErr + } + if f.noClients { + return nil, nil + } + return &providers.Clients{ + KubeClient: kubefake.NewClientset(), + // due to https://github.com/kubernetes/kubernetes/issues/126850, still need to use NewSimpleClientset + APIExtClient: fakeapiextensions.NewSimpleClientset(), + OperatorClient: fakeoperatorclient.NewSimpleClientset(), + DynamicClient: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), + }, nil +} + +// IsManagedClusterOwner check if the provider is used to manage this cluster +func (f *fakeProvider) IsManagedClusterOwner(_ *clusterv1.ManagedCluster) bool { + return f.isOwned +} + +// Register registers the provider to the importer. The provider should enqueue the resource +// into the queue with the name of the managed cluster +func (f *fakeProvider) Register(_ factory.SyncContext) {} + +// Run starts the provider +func (f *fakeProvider) Run(_ context.Context) {} diff --git a/pkg/registration/hub/importer/providers/capi/provider.go b/pkg/registration/hub/importer/providers/capi/provider.go index 3cdf60e20..50220df2f 100644 --- a/pkg/registration/hub/importer/providers/capi/provider.go +++ b/pkg/registration/hub/importer/providers/capi/provider.go @@ -62,7 +62,7 @@ func NewCAPIProvider( } } -func (c *CAPIProvider) KubeConfig(cluster *clusterv1.ManagedCluster) (*rest.Config, error) { +func (c *CAPIProvider) Clients(cluster *clusterv1.ManagedCluster) (*providers.Clients, error) { clusterKey := capiNameFromManagedCluster(cluster) namespace, name, err := cache.SplitMetaNamespaceKey(clusterKey) if err != nil { @@ -93,7 +93,12 @@ func (c *CAPIProvider) KubeConfig(cluster *clusterv1.ManagedCluster) (*rest.Conf if err != nil { return nil, err } - return configOverride.ClientConfig() + + config, err := configOverride.ClientConfig() + if err != nil { + return nil, err + } + return providers.NewClient(config) } func (c *CAPIProvider) Register(syncCtx factory.SyncContext) { @@ -128,7 +133,7 @@ func (c *CAPIProvider) enqueueManagedClusterByCAPI(obj interface{}, syncCtx fact } for _, obj := range objs { accessor, _ := meta.Accessor(obj) - syncCtx.Queue().Add(fmt.Sprintf("%s/%s", accessor.GetNamespace(), accessor.GetName())) + syncCtx.Queue().Add(accessor.GetName()) } } @@ -141,11 +146,10 @@ func indexByCAPIResource(obj interface{}) ([]string, error) { } func capiNameFromManagedCluster(cluster *clusterv1.ManagedCluster) string { - name := fmt.Sprintf("%s/%s", cluster.Name, cluster.Namespace) if len(cluster.Annotations) > 0 { if key, ok := cluster.Annotations[CAPIAnnotationKey]; ok { return key } } - return name + return fmt.Sprintf("%s/%s", cluster.Name, cluster.Name) } diff --git a/pkg/registration/hub/importer/providers/capi/provider_test.go b/pkg/registration/hub/importer/providers/capi/provider_test.go new file mode 100644 index 000000000..f09fd55d6 --- /dev/null +++ b/pkg/registration/hub/importer/providers/capi/provider_test.go @@ -0,0 +1,201 @@ +package capi + +import ( + "testing" + + "github.com/ghodss/yaml" + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events/eventstesting" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic/dynamicinformer" + fakedynamic "k8s.io/client-go/dynamic/fake" + fakekube "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1" + + fakecluster "open-cluster-management.io/api/client/cluster/clientset/versioned/fake" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + testingcommon "open-cluster-management.io/ocm/pkg/common/testing" +) + +func TestEnqueu(t *testing.T) { + cases := []struct { + name string + cluster *clusterv1.ManagedCluster + capiName string + capiNamespace string + expectedKey string + }{ + { + name: "enqueu by name", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + capiName: "cluster1", + capiNamespace: "cluster1", + expectedKey: "cluster1", + }, + { + name: "enqueu by annotation", + cluster: &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster2", + Annotations: map[string]string{ + CAPIAnnotationKey: "capi/cluster1", + }, + }, + }, + capiName: "cluster1", + capiNamespace: "capi", + expectedKey: "cluster2", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + client := fakecluster.NewSimpleClientset(c.cluster) + informerFactory := clusterinformers.NewSharedInformerFactory(client, 0) + clusterInformer := informerFactory.Cluster().V1().ManagedClusters() + if err := clusterInformer.Informer().AddIndexers(cache.Indexers{ + ByCAPIResource: indexByCAPIResource, + }); err != nil { + t.Fatal(err) + } + if err := clusterInformer.Informer().GetStore().Add(c.cluster); err != nil { + t.Fatal(err) + } + + provider := &CAPIProvider{ + managedClusterIndexer: clusterInformer.Informer().GetIndexer(), + } + syncCtx := factory.NewSyncContext("test", eventstesting.NewTestingEventRecorder(t)) + provider.enqueueManagedClusterByCAPI(&metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.capiName, + Namespace: c.capiNamespace, + }, + }, syncCtx) + if i, _ := syncCtx.Queue().Get(); i.(string) != c.expectedKey { + t.Errorf("expected key %s but got %s", c.expectedKey, syncCtx.QueueKey()) + } + }) + } +} + +func TestClients(t *testing.T) { + cases := []struct { + name string + capiObjects []runtime.Object + kubeObjects []runtime.Object + cluster *clusterv1.ManagedCluster + expectErr bool + }{ + { + name: "capi cluster not found", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + }, + { + name: "secret not found", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + capiObjects: []runtime.Object{ + testingcommon.NewUnstructured( + "cluster.x-k8s.io/v1beta1", "Cluster", "cluster1", "cluster1")}, + }, + { + name: "secret found with invalid key", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + capiObjects: []runtime.Object{ + testingcommon.NewUnstructured( + "cluster.x-k8s.io/v1beta1", "Cluster", "cluster1", "cluster1")}, + kubeObjects: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1-kubeconfig", + Namespace: "cluster1", + }, + }, + }, + expectErr: true, + }, + { + name: "build client successfully", + cluster: &clusterv1.ManagedCluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + capiObjects: []runtime.Object{ + testingcommon.NewUnstructured( + "cluster.x-k8s.io/v1beta1", "Cluster", "cluster1", "cluster1")}, + kubeObjects: []runtime.Object{ + func() *corev1.Secret { + clientConfig := clientcmdapiv1.Config{ + // Define a cluster stanza based on the bootstrap kubeconfig. + Clusters: []clientcmdapiv1.NamedCluster{ + { + Name: "hub", + Cluster: clientcmdapiv1.Cluster{ + Server: "https://test", + }, + }, + }, + // Define auth based on the obtained client cert. + AuthInfos: []clientcmdapiv1.NamedAuthInfo{ + { + Name: "bootstrap", + AuthInfo: clientcmdapiv1.AuthInfo{ + Token: "test", + }, + }, + }, + // Define a context that connects the auth info and cluster, and set it as the default + Contexts: []clientcmdapiv1.NamedContext{ + { + Name: "bootstrap", + Context: clientcmdapiv1.Context{ + Cluster: "hub", + AuthInfo: "bootstrap", + Namespace: "default", + }, + }, + }, + CurrentContext: "bootstrap", + } + bootstrapConfigBytes, _ := yaml.Marshal(clientConfig) + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1-kubeconfig", + Namespace: "cluster1", + }, + Data: map[string][]byte{ + "value": bootstrapConfigBytes, + }, + } + }(), + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + dynamicClient := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), c.capiObjects...) + kubeClient := fakekube.NewClientset(c.kubeObjects...) + dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 0) + for _, capiObj := range c.capiObjects { + if err := dynamicInformers.ForResource(gvr).Informer().GetStore().Add(capiObj); err != nil { + t.Fatal(err) + } + } + provider := &CAPIProvider{ + kubeClient: kubeClient, + informer: dynamicInformers, + lister: dynamicInformers.ForResource(gvr).Lister(), + } + _, err := provider.Clients(c.cluster) + if c.expectErr && err == nil { + t.Errorf("expected error but got nil") + } + if !c.expectErr && err != nil { + t.Errorf("expected no error but got %v", err) + } + }) + } +} diff --git a/pkg/registration/hub/importer/providers/interface.go b/pkg/registration/hub/importer/providers/interface.go index 915beb1d8..41cf5dc2f 100644 --- a/pkg/registration/hub/importer/providers/interface.go +++ b/pkg/registration/hub/importer/providers/interface.go @@ -4,15 +4,19 @@ import ( "context" "github.com/openshift/library-go/pkg/controller/factory" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned" clusterv1 "open-cluster-management.io/api/cluster/v1" ) // Interface is the interface that a cluster provider should implement type Interface interface { - // KubeConfig is to return the config to connect to the target cluster. - KubeConfig(cluster *clusterv1.ManagedCluster) (*rest.Config, error) + // Clients is to return the client to connect to the target cluster. + Clients(cluster *clusterv1.ManagedCluster) (*Clients, error) // IsManagedClusterOwner check if the provider is used to manage this cluster IsManagedClusterOwner(cluster *clusterv1.ManagedCluster) bool @@ -24,3 +28,36 @@ type Interface interface { // Run starts the provider Run(ctx context.Context) } + +type Clients struct { + KubeClient kubernetes.Interface + APIExtClient apiextensionsclient.Interface + OperatorClient operatorclient.Interface + DynamicClient dynamic.Interface +} + +func NewClient(config *rest.Config) (*Clients, error) { + kubeClient, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + hubApiExtensionClient, err := apiextensionsclient.NewForConfig(config) + if err != nil { + return nil, err + } + operatorClient, err := operatorclient.NewForConfig(config) + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + return &Clients{ + APIExtClient: hubApiExtensionClient, + KubeClient: kubeClient, + OperatorClient: operatorClient, + DynamicClient: dynamicClient, + }, nil +} diff --git a/pkg/registration/hub/importer/renderers.go b/pkg/registration/hub/importer/renderers.go index 9ea37e9ee..dbbbedb88 100644 --- a/pkg/registration/hub/importer/renderers.go +++ b/pkg/registration/hub/importer/renderers.go @@ -22,7 +22,7 @@ func RenderBootstrapHubKubeConfig( // get bootstrap token tr, err := kubeClient.CoreV1(). ServiceAccounts(operatorNamesapce). - CreateToken(ctx, "cluster-bootstrap", &authv1.TokenRequest{ + CreateToken(ctx, bootstrapSA, &authv1.TokenRequest{ Spec: authv1.TokenRequestSpec{ // token expired in 1 hour ExpirationSeconds: ptr.To[int64](3600), @@ -30,7 +30,7 @@ func RenderBootstrapHubKubeConfig( }, metav1.CreateOptions{}) if err != nil { return config, fmt.Errorf( - "failed to get token from sa %s/cluster-bootstrap: %v", operatorNamesapce, err) + "failed to get token from sa %s/%s: %v", operatorNamesapce, bootstrapSA, err) } // get apisever url diff --git a/pkg/registration/hub/importer/renderers_test.go b/pkg/registration/hub/importer/renderers_test.go new file mode 100644 index 000000000..23ae084a5 --- /dev/null +++ b/pkg/registration/hub/importer/renderers_test.go @@ -0,0 +1,99 @@ +package importer + +import ( + "context" + "testing" + + "github.com/ghodss/yaml" + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubefake "k8s.io/client-go/kubernetes/fake" + clienttesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/clientcmd" + clientcmdapiv1 "k8s.io/client-go/tools/clientcmd/api/v1" + + "open-cluster-management.io/ocm/pkg/operator/helpers/chart" +) + +func TestRenderBootstrapHubKubeConfig(t *testing.T) { + cases := []struct { + name string + objects []runtime.Object + apiserverURL string + expectedURL string + }{ + { + name: "render apiserver from input", + apiserverURL: "https://127.0.0.1:6443", + expectedURL: "https://127.0.0.1:6443", + }, + { + name: "render apiserver from cluster-info", + objects: []runtime.Object{ + func() *corev1.ConfigMap { + config := clientcmdapiv1.Config{ + // Define a cluster stanza based on the bootstrap kubeconfig. + Clusters: []clientcmdapiv1.NamedCluster{ + { + Name: "hub", + Cluster: clientcmdapiv1.Cluster{ + Server: "https://test", + }, + }, + }, + } + bootstrapConfigBytes, _ := yaml.Marshal(config) + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-info", + Namespace: "kube-public", + }, + Data: map[string]string{ + "kubeconfig": string(bootstrapConfigBytes), + }, + } + }(), + }, + expectedURL: "https://test", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + client := kubefake.NewClientset(c.objects...) + client.PrependReactor("create", "serviceaccounts/token", + func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + act, ok := action.(clienttesting.CreateActionImpl) + if !ok { + return false, nil, nil + } + tokenReq, ok := act.Object.(*authenticationv1.TokenRequest) + if !ok { + return false, nil, nil + } + tokenReq.Status.Token = "token" + return true, tokenReq, nil + }, + ) + config := &chart.KlusterletChartConfig{} + config, err := RenderBootstrapHubKubeConfig(client, c.apiserverURL)(context.TODO(), config) + if err != nil { + t.Fatalf("failed to render bootstrap hub kubeconfig: %v", err) + } + kConfig, err := clientcmd.NewClientConfigFromBytes([]byte(config.BootstrapHubKubeConfig)) + if err != nil { + t.Fatalf("failed to load bootstrap hub kubeconfig: %v", err) + } + rawConfig, err := kConfig.RawConfig() + if err != nil { + t.Fatalf("failed to load bootstrap hub kubeconfig: %v", err) + } + cluster := rawConfig.Contexts[rawConfig.CurrentContext].Cluster + if rawConfig.Clusters[cluster].Server != c.expectedURL { + t.Errorf("apiserver is not rendered correctly") + } + }) + } +}