Skip to content

Commit

Permalink
Fix multiple issues with LKE Enterprise (#1788)
Browse files Browse the repository at this point in the history
* workaround dashboard

* avoid context cancel

* fmt

* address comment
  • Loading branch information
yec-akamai authored Feb 24, 2025
1 parent 26afef0 commit 50f4f46
Show file tree
Hide file tree
Showing 9 changed files with 135 additions and 19 deletions.
2 changes: 1 addition & 1 deletion docs/data-sources/lke_cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ In addition to all arguments above, the following attributes are exported:

* `kubeconfig` - The base64 encoded kubeconfig for the Kubernetes cluster.

* `dashboard_url` - The Kubernetes Dashboard access URL for this cluster.
* `dashboard_url` - The Kubernetes Dashboard access URL for this cluster. LKE Enterprise does not have a dashboard URL.

* `pools` - Node pools associated with this cluster.

Expand Down
2 changes: 1 addition & 1 deletion docs/resources/lke_cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ In addition to all arguments above, the following attributes are exported:

* `kubeconfig` - The base64 encoded kubeconfig for the Kubernetes cluster.

* `dashboard_url` - The Kubernetes Dashboard access URL for this cluster.
* `dashboard_url` - The Kubernetes Dashboard access URL for this cluster. LKE Enterprise does not have a dashboard URL.

* `pool` - Additional nested attributes:

Expand Down
24 changes: 24 additions & 0 deletions linode/lke/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"fmt"
"reflect"
"strings"
"time"

"github.com/hashicorp/terraform-plugin-log/tflog"
Expand Down Expand Up @@ -613,3 +614,26 @@ func expandNodePoolTaints(poolTaints []map[string]any) []linodego.LKENodePoolTai
}
return taints
}

func waitForLKEKubeConfig(ctx context.Context, client linodego.Client, intervalMS int, clusterID int) error {
ticker := time.NewTicker(time.Duration(intervalMS) * time.Millisecond)
defer ticker.Stop()

for {
select {
case <-ticker.C:
_, err := client.GetLKEClusterKubeconfig(ctx, clusterID)
if err != nil {
if strings.Contains(err.Error(), "Cluster kubeconfig is not yet available") {
continue
} else {
return fmt.Errorf("failed to get Kubeconfig for LKE cluster %d: %w", clusterID, err)
}
} else {
return nil
}
case <-ctx.Done():
return fmt.Errorf("Error waiting for Cluster %d kubeconfig: %w", clusterID, ctx.Err())
}
}
}
23 changes: 14 additions & 9 deletions linode/lke/framework_datasource.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,20 @@ func (r *DataSource) Read(
return
}

var dashboard *linodego.LKEClusterDashboard

// Only standard LKE has a dashboard URL
if cluster.Tier == TierStandard {
dashboard, err = client.GetLKEClusterDashboard(ctx, clusterId)
if err != nil {
resp.Diagnostics.AddError(
fmt.Sprintf("Failed to get dashboard URL for LKE cluster %d", clusterId),
err.Error(),
)
return
}
}

kubeconfig, err := client.GetLKEClusterKubeconfig(ctx, clusterId)
if err != nil {
resp.Diagnostics.AddError(
Expand All @@ -89,15 +103,6 @@ func (r *DataSource) Read(
return
}

dashboard, err := client.GetLKEClusterDashboard(ctx, clusterId)
if err != nil {
resp.Diagnostics.AddError(
fmt.Sprintf("Failed to get dashboard URL for LKE cluster %d", clusterId),
err.Error(),
)
return
}

acl, err := client.GetLKEClusterControlPlaneACL(ctx, clusterId)
if err != nil {
if lerr, ok := err.(*linodego.Error); ok &&
Expand Down
12 changes: 10 additions & 2 deletions linode/lke/framework_models.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,11 @@ func (data *LKEDataModel) parseLKEAttributes(
}
data.Pools = lkePools

data.Kubeconfig = types.StringValue(kubeconfig.KubeConfig)
if kubeconfig != nil {
data.Kubeconfig = types.StringValue(kubeconfig.KubeConfig)
} else {
data.Kubeconfig = types.StringNull()
}

var urls []string
for _, e := range endpoints {
Expand All @@ -192,7 +196,11 @@ func (data *LKEDataModel) parseLKEAttributes(
}
data.APIEndpoints = apiEndpoints

data.DashboardURL = types.StringValue(dashboard.URL)
if dashboard != nil {
data.DashboardURL = types.StringValue(dashboard.URL)
} else {
data.DashboardURL = types.StringNull()
}

return nil
}
Expand Down
36 changes: 36 additions & 0 deletions linode/lke/framework_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -696,3 +696,39 @@ func TestAccResourceLKEClusterNodePoolTaintsLabels(t *testing.T) {
})
})
}

func TestAccResourceLKECluster_enterprise(t *testing.T) {
t.Parallel()

k8sVersionEnterprise := "v1.31.1+lke1"

enterpriseRegion, err := acceptance.GetRandomRegionWithCaps([]string{"Kubernetes Enterprise"}, "core")
if err != nil {
log.Fatal(err)
}
acceptance.RunTestWithRetries(t, 2, func(t *acceptance.WrappedT) {
clusterName := acctest.RandomWithPrefix("tf_test")
resource.Test(t, resource.TestCase{
PreCheck: func() { acceptance.PreCheck(t) },
ProtoV5ProviderFactories: acceptance.ProtoV5ProviderFactories,
CheckDestroy: acceptance.CheckLKEClusterDestroy,
Steps: []resource.TestStep{
{
Config: tmpl.Enterprise(t, clusterName, k8sVersionEnterprise, enterpriseRegion),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceClusterName, "label", clusterName),
resource.TestCheckResourceAttr(resourceClusterName, "region", enterpriseRegion),
resource.TestCheckResourceAttr(resourceClusterName, "k8s_version", k8sVersionEnterprise),
resource.TestCheckResourceAttr(resourceClusterName, "status", "ready"),
resource.TestCheckResourceAttr(resourceClusterName, "tier", "enterprise"),
resource.TestCheckResourceAttr(resourceClusterName, "tags.#", "1"),
resource.TestCheckResourceAttr(resourceClusterName, "pool.#", "1"),
resource.TestCheckResourceAttr(resourceClusterName, "pool.0.type", "g6-standard-1"),
resource.TestCheckResourceAttr(resourceClusterName, "pool.0.count", "3"),
resource.TestCheckResourceAttrSet(resourceClusterName, "kubeconfig"),
),
},
},
})
})
}
33 changes: 27 additions & 6 deletions linode/lke/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ const (
createLKETimeout = 35 * time.Minute
updateLKETimeout = 40 * time.Minute
deleteLKETimeout = 15 * time.Minute
TierEnterprise = "enterprise"
TierStandard = "standard"
)

func Resource() *schema.Resource {
Expand Down Expand Up @@ -109,9 +111,14 @@ func readResource(ctx context.Context, d *schema.ResourceData, meta interface{})

flattenedControlPlane := flattenLKEClusterControlPlane(cluster.ControlPlane, acl)

dashboard, err := client.GetLKEClusterDashboard(ctx, id)
if err != nil {
return diag.Errorf("failed to get dashboard URL for LKE cluster %d: %s", id, err)
// Only standard LKE has a dashboard URL
if cluster.Tier == TierStandard {
dashboard, err := client.GetLKEClusterDashboard(ctx, id)
if err != nil {
return diag.Errorf("failed to get dashboard URL for LKE cluster %d: %s", id, err)
}

d.Set("dashboard_url", dashboard.URL)
}

d.Set("label", cluster.Label)
Expand All @@ -121,7 +128,6 @@ func readResource(ctx context.Context, d *schema.ResourceData, meta interface{})
d.Set("status", cluster.Status)
d.Set("tier", cluster.Tier)
d.Set("kubeconfig", kubeconfig.KubeConfig)
d.Set("dashboard_url", dashboard.URL)
d.Set("api_endpoints", flattenLKEClusterAPIEndpoints(endpoints))

matchedPools, err := matchPoolsWithSchema(ctx, pools, declaredPools)
Expand Down Expand Up @@ -204,22 +210,37 @@ func createResource(ctx context.Context, d *schema.ResourceData, meta interface{
}
d.SetId(strconv.Itoa(cluster.ID))

// Currently the enterprise cluster kube config takes long time to generate.
// Wait for it to be ready before start waiting for nodes and allow a longer timeout for retrying
// to avoid context exceeded or canceled before getting a meaningful result.
var retryContextTimeout time.Duration
if cluster.Tier == TierEnterprise {
retryContextTimeout = time.Second * 120
err = waitForLKEKubeConfig(ctx, client, meta.(*helper.ProviderMeta).Config.EventPollMilliseconds, cluster.ID)
if err != nil {
return diag.Errorf("failed to get LKE cluster kubeconfig: %s", err)
}
} else {
retryContextTimeout = time.Second * 25
}

ctx = tflog.SetField(ctx, "cluster_id", cluster.ID)
tflog.Debug(ctx, "Waiting for a single LKE cluster node to be ready")

// Sometimes the K8S API will raise an EOF error if polling immediately after
// a cluster is created. We should retry accordingly.
// NOTE: This routine has a short retry period because we want to raise
// and meaningful errors quickly.
diag.FromErr(retry.RetryContext(ctx, time.Second*25, func() *retry.RetryError {
tflog.Trace(ctx, "client.WaitForLKEClusterCondition(...)", map[string]any{
diag.FromErr(retry.RetryContext(ctx, retryContextTimeout, func() *retry.RetryError {
tflog.Debug(ctx, "client.WaitForLKEClusterCondition(...)", map[string]any{
"condition": "ClusterHasReadyNode",
})

err := client.WaitForLKEClusterConditions(ctx, cluster.ID, linodego.LKEClusterPollOptions{
TimeoutSeconds: 15 * 60,
}, k8scondition.ClusterHasReadyNode)
if err != nil {
tflog.Debug(ctx, err.Error())
return retry.RetryableError(err)
}

Expand Down
17 changes: 17 additions & 0 deletions linode/lke/tmpl/enterprise.gotf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
{{ define "lke_cluster_enterprise" }}

resource "linode_lke_cluster" "test" {
label = "{{.Label}}"
region = "{{ .Region }}"
k8s_version = "{{.K8sVersion}}"
tags = ["test"]
tier = "enterprise"

pool {
type = "g6-standard-1"
count = 3
tags = ["test"]
}
}

{{ end }}
5 changes: 5 additions & 0 deletions linode/lke/tmpl/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,11 @@ func AutoscalerNoCount(t testing.TB, name, version, region string) string {
})
}

func Enterprise(t testing.TB, name, version, region string) string {
return acceptance.ExecuteTemplate(t,
"lke_cluster_enterprise", TemplateData{Label: name, K8sVersion: version, Region: region})
}

func DataBasic(t testing.TB, name, version, region string) string {
return acceptance.ExecuteTemplate(t,
"lke_cluster_data_basic", TemplateData{Label: name, K8sVersion: version, Region: region})
Expand Down

0 comments on commit 50f4f46

Please sign in to comment.