diff --git a/.changelog/2345.txt b/.changelog/2345.txt new file mode 100644 index 0000000000..95d32d4105 --- /dev/null +++ b/.changelog/2345.txt @@ -0,0 +1,3 @@ +```release-note:note +We have updated the logic of data sources and now the provider will return all annotations and labels attached to the object, regardless of the `ignore_annotations` and `ignore_labels` provider settings. In addition to that, a list of ignored labels when they are attached to `kubernetes_job(_v1)` and `kubernetes_cron_job(_v1)` resources were extended with labels `batch.kubernetes.io/controller-uid` and `batch.kubernetes.io/job-name` since they aim to replace `controller-uid` and `job-name` in the future Kubernetes releases. +``` diff --git a/kubernetes/data_source_kubernetes_config_map_v1.go b/kubernetes/data_source_kubernetes_config_map_v1.go index 696fc80416..c65cf9fb5e 100644 --- a/kubernetes/data_source_kubernetes_config_map_v1.go +++ b/kubernetes/data_source_kubernetes_config_map_v1.go @@ -5,10 +5,11 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesConfigMapV1() *schema.Resource { @@ -37,11 +38,46 @@ func dataSourceKubernetesConfigMapV1() *schema.Resource { } func dataSourceKubernetesConfigMapV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - om := meta_v1.ObjectMeta{ - Namespace: d.Get("metadata.0.namespace").(string), - Name: d.Get("metadata.0.name").(string), + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + + om := metav1.ObjectMeta{ + Namespace: metadata.Namespace, + Name: metadata.Name, } d.SetId(buildId(om)) - return resourceKubernetesConfigMapV1Read(ctx, d, meta) + log.Printf("[INFO] Reading config map %s", metadata.Name) + cfgMap, err := conn.CoreV1().ConfigMaps(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.FromErr(err) + } + log.Printf("[INFO] Received config map: %#v", cfgMap) + + err = d.Set("metadata", flattenMetadataFields(cfgMap.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("binary_data", flattenByteMapToBase64Map(cfgMap.BinaryData)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("data", cfgMap.Data) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("immutable", cfgMap.Immutable) + if err != nil { + return diag.FromErr(err) + } + + return nil } diff --git a/kubernetes/data_source_kubernetes_endpoints_v1.go b/kubernetes/data_source_kubernetes_endpoints_v1.go index 3ba407af6c..05c4833f6f 100644 --- a/kubernetes/data_source_kubernetes_endpoints_v1.go +++ b/kubernetes/data_source_kubernetes_endpoints_v1.go @@ -5,6 +5,7 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -28,11 +29,36 @@ func dataSourceKubernetesEndpointsV1() *schema.Resource { } func dataSourceKubernetesEndpointsV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + om := metav1.ObjectMeta{ - Namespace: d.Get("metadata.0.namespace").(string), - Name: d.Get("metadata.0.name").(string), + Namespace: metadata.Namespace, + Name: metadata.Name, } d.SetId(buildId(om)) - return resourceKubernetesEndpointsV1Read(ctx, d, meta) + log.Printf("[INFO] Reading endpoints %s", metadata.Name) + ep, err := conn.CoreV1().Endpoints(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.Errorf("Failed to read endpoint because: %s", err) + } + log.Printf("[INFO] Received endpoints: %#v", ep) + + err = d.Set("metadata", flattenMetadataFields(ep.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("subset", flattenEndpointsSubsets(ep.Subsets)) + if err != nil { + return diag.FromErr(err) + } + + return nil } diff --git a/kubernetes/data_source_kubernetes_ingress.go b/kubernetes/data_source_kubernetes_ingress.go index f23cdfbdb8..dd5f5dcd60 100644 --- a/kubernetes/data_source_kubernetes_ingress.go +++ b/kubernetes/data_source_kubernetes_ingress.go @@ -5,11 +5,12 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" networking "k8s.io/api/networking/v1beta1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesIngress() *schema.Resource { @@ -134,13 +135,45 @@ func dataSourceKubernetesIngress() *schema.Resource { } func dataSourceKubernetesIngressRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + metadata := expandMetadata(d.Get("metadata").([]interface{})) - om := meta_v1.ObjectMeta{ + om := metav1.ObjectMeta{ Namespace: metadata.Namespace, Name: metadata.Name, } d.SetId(buildId(om)) - return resourceKubernetesIngressV1Beta1Read(ctx, d, meta) + log.Printf("[INFO] Reading ingress %s", metadata.Name) + ing, err := conn.ExtensionsV1beta1().Ingresses(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.FromErr(err) + } + log.Printf("[INFO] Received ingress: %#v", ing) + + err = d.Set("metadata", flattenMetadataFields(ing.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("spec", flattenIngressSpec(ing.Spec)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("status", []interface{}{ + map[string][]interface{}{ + "load_balancer": flattenIngressStatus(ing.Status.LoadBalancer), + }, + }) + if err != nil { + return diag.FromErr(err) + } + + return nil } diff --git a/kubernetes/data_source_kubernetes_ingress_v1.go b/kubernetes/data_source_kubernetes_ingress_v1.go index 70310abf94..1aa2865b9e 100644 --- a/kubernetes/data_source_kubernetes_ingress_v1.go +++ b/kubernetes/data_source_kubernetes_ingress_v1.go @@ -5,12 +5,13 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" networking "k8s.io/api/networking/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesIngressV1() *schema.Resource { @@ -146,13 +147,44 @@ func dataSourceKubernetesIngressV1() *schema.Resource { } func dataSourceKubernetesIngressV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } metadata := expandMetadata(d.Get("metadata").([]interface{})) - om := meta_v1.ObjectMeta{ + om := metav1.ObjectMeta{ Namespace: metadata.Namespace, Name: metadata.Name, } d.SetId(buildId(om)) - return resourceKubernetesIngressV1Read(ctx, d, meta) + log.Printf("[INFO] Reading ingress %s", metadata.Name) + ing, err := conn.NetworkingV1().Ingresses(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.FromErr(err) + } + log.Printf("[INFO] Received ingress: %#v", ing) + + err = d.Set("metadata", flattenMetadataFields(ing.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("spec", flattenIngressV1Spec(ing.Spec)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("status", []interface{}{ + map[string][]interface{}{ + "load_balancer": flattenIngressV1Status(ing.Status.LoadBalancer), + }, + }) + if err != nil { + return diag.FromErr(err) + } + + return nil } diff --git a/kubernetes/data_source_kubernetes_mutating_webhook_configuration_v1.go b/kubernetes/data_source_kubernetes_mutating_webhook_configuration_v1.go index d1bdffd4e1..9e29bcbdc5 100644 --- a/kubernetes/data_source_kubernetes_mutating_webhook_configuration_v1.go +++ b/kubernetes/data_source_kubernetes_mutating_webhook_configuration_v1.go @@ -5,11 +5,13 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesMutatingWebhookConfigurationV1() *schema.Resource { @@ -111,8 +113,33 @@ func dataSourceKubernetesMutatingWebhookConfigurationV1() *schema.Resource { } func dataSourceKubernetesMutatingWebhookConfigurationV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - name := d.Get("metadata.0.name").(string) - d.SetId(name) + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + d.SetId(metadata.Name) + + log.Printf("[INFO] Reading mutating webhook configuration %s", metadata.Name) + cfg, err := conn.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.FromErr(err) + } + log.Printf("[INFO] Received mutating webhook configuration: %#v", cfg) + + err = d.Set("metadata", flattenMetadataFields(cfg.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("[DEBUG] Setting mutating webhook configuration to: %#v", cfg.Webhooks) + + err = d.Set("webhook", flattenMutatingWebhooks(cfg.Webhooks)) + if err != nil { + return diag.FromErr(err) + } - return resourceKubernetesMutatingWebhookConfigurationV1Read(ctx, d, meta) + return nil } diff --git a/kubernetes/data_source_kubernetes_namespace_v1.go b/kubernetes/data_source_kubernetes_namespace_v1.go index dac06f36a8..06b7de5e53 100644 --- a/kubernetes/data_source_kubernetes_namespace_v1.go +++ b/kubernetes/data_source_kubernetes_namespace_v1.go @@ -9,8 +9,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesNamespaceV1() *schema.Resource { @@ -49,24 +49,27 @@ func dataSourceKubernetesNamespaceV1Read(ctx context.Context, d *schema.Resource metadata := expandMetadata(d.Get("metadata").([]interface{})) d.SetId(metadata.Name) - namespace, err := conn.CoreV1().Namespaces().Get(ctx, metadata.Name, meta_v1.GetOptions{}) + namespace, err := conn.CoreV1().Namespaces().Get(ctx, metadata.Name, metav1.GetOptions{}) if err != nil { log.Printf("[DEBUG] Received error: %#v", err) return diag.FromErr(err) } log.Printf("[INFO] Received namespace: %#v", namespace) - err = d.Set("metadata", flattenMetadata(namespace.ObjectMeta, d, meta)) + + err = d.Set("metadata", flattenMetadataFields(namespace.ObjectMeta)) if err != nil { return diag.FromErr(err) } - err = d.Set("spec", flattenNamespaceSpec(&namespace.Spec)) + + err = d.Set("spec", flattenNamespaceV1Spec(&namespace.Spec)) if err != nil { return diag.FromErr(err) } + return nil } -func flattenNamespaceSpec(in *v1.NamespaceSpec) []interface{} { +func flattenNamespaceV1Spec(in *corev1.NamespaceSpec) []interface{} { if in == nil || len(in.Finalizers) == 0 { return []interface{}{} } @@ -76,5 +79,6 @@ func flattenNamespaceSpec(in *v1.NamespaceSpec) []interface{} { fin[i] = string(f) } spec["finalizers"] = fin + return []interface{}{spec} } diff --git a/kubernetes/data_source_kubernetes_nodes.go b/kubernetes/data_source_kubernetes_nodes.go index d944f31eea..9231f95a72 100644 --- a/kubernetes/data_source_kubernetes_nodes.go +++ b/kubernetes/data_source_kubernetes_nodes.go @@ -74,9 +74,9 @@ func dataSourceKubernetesNodesRead(ctx context.Context, d *schema.ResourceData, listOptions := metav1.ListOptions{} - m := d.Get("metadata").([]interface{}) - if len(m) > 0 { - metadata := expandMetadata(m) + metadata := d.Get("metadata").([]interface{}) + if len(metadata) > 0 { + metadata := expandMetadata(metadata) labelMap, err := metav1.LabelSelectorAsMap(&metav1.LabelSelector{MatchLabels: metadata.Labels}) if err != nil { return diag.FromErr(err) @@ -94,13 +94,11 @@ func dataSourceKubernetesNodesRead(ctx context.Context, d *schema.ResourceData, nodes := make([]interface{}, len(nodesRaw.Items)) for i, v := range nodesRaw.Items { log.Printf("[INFO] Received node: %s", v.Name) - prefix := fmt.Sprintf("nodes.%d.", i) - n := map[string]interface{}{ - "metadata": flattenMetadata(v.ObjectMeta, d, meta, prefix), + nodes[i] = map[string]interface{}{ + "metadata": flattenMetadataFields(v.ObjectMeta), "spec": flattenNodeSpec(v.Spec), "status": flattenNodeStatus(v.Status), } - nodes[i] = n } if err := d.Set("nodes", nodes); err != nil { return diag.FromErr(err) @@ -113,5 +111,6 @@ func dataSourceKubernetesNodesRead(ctx context.Context, d *schema.ResourceData, } id := fmt.Sprintf("%x", idsum.Sum(nil)) d.SetId(id) + return nil } diff --git a/kubernetes/data_source_kubernetes_nodes_test.go b/kubernetes/data_source_kubernetes_nodes_test.go index 7f342b5a1c..36b5832abd 100644 --- a/kubernetes/data_source_kubernetes_nodes_test.go +++ b/kubernetes/data_source_kubernetes_nodes_test.go @@ -21,11 +21,11 @@ func checkParsableQuantity(value string) error { func TestAccKubernetesDataSourceNodes_basic(t *testing.T) { dataSourceName := "data.kubernetes_nodes.test" nodeName := regexp.MustCompile(`^[a-z0-9]+(?:[-.]{1}[a-z0-9]+)*$`) - zeroOrMore := regexp.MustCompile(`^[0-9]+$`) oneOrMore := regexp.MustCompile(`^[1-9][0-9]*$`) checkFuncs := resource.ComposeAggregateTestCheckFunc( resource.TestMatchResourceAttr(dataSourceName, "nodes.#", oneOrMore), - resource.TestMatchResourceAttr(dataSourceName, "nodes.0.metadata.0.labels.%", zeroOrMore), + resource.TestMatchResourceAttr(dataSourceName, "nodes.0.metadata.0.annotations.%", oneOrMore), + resource.TestMatchResourceAttr(dataSourceName, "nodes.0.metadata.0.labels.%", oneOrMore), resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.metadata.0.resource_version"), resource.TestMatchResourceAttr(dataSourceName, "nodes.0.metadata.0.name", nodeName), resource.TestMatchResourceAttr(dataSourceName, "nodes.0.spec.0.%", oneOrMore), diff --git a/kubernetes/data_source_kubernetes_persistent_volume_claim_v1.go b/kubernetes/data_source_kubernetes_persistent_volume_claim_v1.go index 170552afbd..92771fd595 100644 --- a/kubernetes/data_source_kubernetes_persistent_volume_claim_v1.go +++ b/kubernetes/data_source_kubernetes_persistent_volume_claim_v1.go @@ -5,10 +5,11 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesPersistentVolumeClaimV1() *schema.Resource { @@ -88,13 +89,36 @@ func dataSourceKubernetesPersistentVolumeClaimV1() *schema.Resource { } func dataSourceKubernetesPersistentVolumeClaimV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + metadata := expandMetadata(d.Get("metadata").([]interface{})) - om := meta_v1.ObjectMeta{ + om := metav1.ObjectMeta{ Namespace: metadata.Namespace, Name: metadata.Name, } d.SetId(buildId(om)) - return resourceKubernetesPersistentVolumeClaimV1Read(ctx, d, meta) + log.Printf("[INFO] Reading persistent volume claim %s", metadata.Name) + claim, err := conn.CoreV1().PersistentVolumeClaims(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.FromErr(err) + } + log.Printf("[INFO] Received persistent volume claim: %#v", claim) + + err = d.Set("metadata", flattenMetadataFields(claim.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("spec", flattenPersistentVolumeClaimSpec(claim.Spec)) + if err != nil { + return diag.FromErr(err) + } + + return nil } diff --git a/kubernetes/data_source_kubernetes_persistent_volume_v1.go b/kubernetes/data_source_kubernetes_persistent_volume_v1.go index 3c4be11fe9..163fe60a09 100644 --- a/kubernetes/data_source_kubernetes_persistent_volume_v1.go +++ b/kubernetes/data_source_kubernetes_persistent_volume_v1.go @@ -5,10 +5,12 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesPersistentVolumeV1() *schema.Resource { @@ -145,7 +147,31 @@ func dataSourceKubernetesPersistentVolumeV1() *schema.Resource { } func dataSourceKubernetesPersistentVolumeV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - name := d.Get("metadata.0.name").(string) - d.SetId(name) - return resourceKubernetesPersistentVolumeV1Read(ctx, d, meta) + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + d.SetId(metadata.Name) + + log.Printf("[INFO] Reading persistent volume %s", metadata.Name) + volume, err := conn.CoreV1().PersistentVolumes().Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.FromErr(err) + } + log.Printf("[INFO] Received persistent volume: %#v", volume) + + err = d.Set("metadata", flattenMetadataFields(volume.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("spec", flattenPersistentVolumeSpec(volume.Spec)) + if err != nil { + return diag.FromErr(err) + } + + return nil } diff --git a/kubernetes/data_source_kubernetes_pod_v1.go b/kubernetes/data_source_kubernetes_pod_v1.go index 14212c776a..c944d0aa33 100644 --- a/kubernetes/data_source_kubernetes_pod_v1.go +++ b/kubernetes/data_source_kubernetes_pod_v1.go @@ -5,7 +5,6 @@ package kubernetes import ( "context" - "fmt" "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -61,7 +60,7 @@ func dataSourceKubernetesPodV1Read(ctx context.Context, d *schema.ResourceData, } log.Printf("[INFO] Received pod: %#v", pod) - err = d.Set("metadata", flattenMetadata(pod.ObjectMeta, d, meta)) + err = d.Set("metadata", flattenMetadataFields(pod.ObjectMeta)) if err != nil { return diag.FromErr(err) } @@ -75,8 +74,10 @@ func dataSourceKubernetesPodV1Read(ctx context.Context, d *schema.ResourceData, if err != nil { return diag.FromErr(err) } - statusPhase := fmt.Sprintf("%v", pod.Status.Phase) - d.Set("status", statusPhase) + err = d.Set("status", pod.Status.Phase) + if err != nil { + return diag.FromErr(err) + } return nil diff --git a/kubernetes/data_source_kubernetes_secret_v1.go b/kubernetes/data_source_kubernetes_secret_v1.go index 83a47d8b37..49e6261e1f 100644 --- a/kubernetes/data_source_kubernetes_secret_v1.go +++ b/kubernetes/data_source_kubernetes_secret_v1.go @@ -5,10 +5,11 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesSecretV1() *schema.Resource { @@ -44,11 +45,47 @@ func dataSourceKubernetesSecretV1() *schema.Resource { } func dataSourceKubernetesSecretV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - om := meta_v1.ObjectMeta{ - Namespace: d.Get("metadata.0.namespace").(string), - Name: d.Get("metadata.0.name").(string), + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + + om := metav1.ObjectMeta{ + Namespace: metadata.Namespace, + Name: metadata.Name, } d.SetId(buildId(om)) - return resourceKubernetesSecretV1Read(ctx, d, meta) + log.Printf("[INFO] Reading secret %s", metadata.Name) + secret, err := conn.CoreV1().Secrets(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + return diag.FromErr(err) + } + log.Printf("[INFO] Received secret: %#v", secret.ObjectMeta) + + err = d.Set("metadata", flattenMetadataFields(secret.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + binaryDataKeys := []string{} + if v, ok := d.GetOk("binary_data"); ok { + binaryData := map[string][]byte{} + for k := range v.(map[string]interface{}) { + binaryData[k] = secret.Data[k] + binaryDataKeys = append(binaryDataKeys, k) + } + d.Set("binary_data", base64EncodeByteMap(binaryData)) + } + + for _, k := range binaryDataKeys { + delete(secret.Data, k) + } + d.Set("data", flattenByteMapToStringMap(secret.Data)) + d.Set("type", secret.Type) + d.Set("immutable", secret.Immutable) + + return nil } diff --git a/kubernetes/data_source_kubernetes_service_account_v1.go b/kubernetes/data_source_kubernetes_service_account_v1.go index 1630688116..8dadbb2fbc 100644 --- a/kubernetes/data_source_kubernetes_service_account_v1.go +++ b/kubernetes/data_source_kubernetes_service_account_v1.go @@ -5,6 +5,7 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -80,7 +81,50 @@ func dataSourceKubernetesServiceAccountV1Read(ctx context.Context, d *schema.Res d.SetId(buildId(sa.ObjectMeta)) - diagMsg = append(diagMsg, resourceKubernetesServiceAccountV1Read(ctx, d, meta)...) + log.Printf("[INFO] Reading service account %s", metadata.Name) + svcAcc, err := conn.CoreV1().ServiceAccounts(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + diagMsg = append(diagMsg, diag.FromErr(err)...) + return diagMsg + } + log.Printf("[INFO] Received service account: %#v", svcAcc) + + err = d.Set("metadata", flattenMetadataFields(svcAcc.ObjectMeta)) + if err != nil { + diagMsg = append(diagMsg, diag.FromErr(err)...) + return diagMsg + } + + if svcAcc.AutomountServiceAccountToken == nil { + err = d.Set("automount_service_account_token", false) + if err != nil { + diagMsg = append(diagMsg, diag.FromErr(err)...) + return diagMsg + } + } else { + err = d.Set("automount_service_account_token", *svcAcc.AutomountServiceAccountToken) + if err != nil { + diagMsg = append(diagMsg, diag.FromErr(err)...) + return diagMsg + } + } + + err = d.Set("image_pull_secret", flattenLocalObjectReferenceArray(svcAcc.ImagePullSecrets)) + if err != nil { + diagMsg = append(diagMsg, diag.FromErr(err)...) + return diagMsg + } + + defaultSecretName := d.Get("default_secret_name").(string) + log.Printf("[DEBUG] Default secret name is %q", defaultSecretName) + secrets := flattenServiceAccountSecrets(svcAcc.Secrets, defaultSecretName) + log.Printf("[DEBUG] Flattened secrets: %#v", secrets) + err = d.Set("secret", secrets) + if err != nil { + diagMsg = append(diagMsg, diag.FromErr(err)...) + return diagMsg + } - return diagMsg + return nil } diff --git a/kubernetes/data_source_kubernetes_service_v1.go b/kubernetes/data_source_kubernetes_service_v1.go index f1cb39a59e..8999de8476 100644 --- a/kubernetes/data_source_kubernetes_service_v1.go +++ b/kubernetes/data_source_kubernetes_service_v1.go @@ -5,12 +5,13 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" corev1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesServiceV1() *schema.Resource { @@ -240,11 +241,45 @@ func dataSourceKubernetesServiceV1() *schema.Resource { } func dataSourceKubernetesServiceV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - om := meta_v1.ObjectMeta{ - Namespace: d.Get("metadata.0.namespace").(string), - Name: d.Get("metadata.0.name").(string), + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + + om := metav1.ObjectMeta{ + Namespace: metadata.Namespace, + Name: metadata.Name, } d.SetId(buildId(om)) - return resourceKubernetesServiceV1Read(ctx, d, meta) + log.Printf("[INFO] Reading service %s", metadata.Name) + svc, err := conn.CoreV1().Services(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.FromErr(err) + } + log.Printf("[INFO] Received service: %#v", svc) + + err = d.Set("metadata", flattenMetadataFields(svc.ObjectMeta)) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("status", []interface{}{ + map[string][]interface{}{ + "load_balancer": flattenLoadBalancerStatus(svc.Status.LoadBalancer), + }, + }) + if err != nil { + return diag.FromErr(err) + } + + err = d.Set("spec", flattenServiceSpec(svc.Spec)) + if err != nil { + return diag.FromErr(err) + } + + return nil } diff --git a/kubernetes/data_source_kubernetes_storage_class_v1.go b/kubernetes/data_source_kubernetes_storage_class_v1.go index 84f453f40c..19d1bcf6cf 100644 --- a/kubernetes/data_source_kubernetes_storage_class_v1.go +++ b/kubernetes/data_source_kubernetes_storage_class_v1.go @@ -5,9 +5,11 @@ package kubernetes import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func dataSourceKubernetesStorageClassV1() *schema.Resource { @@ -87,7 +89,36 @@ func dataSourceKubernetesStorageClassV1() *schema.Resource { } func dataSourceKubernetesStorageClassV1Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - name := d.Get("metadata.0.name").(string) - d.SetId(name) - return resourceKubernetesStorageClassV1Read(ctx, d, meta) + conn, err := meta.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + metadata := expandMetadata(d.Get("metadata").([]interface{})) + d.SetId(metadata.Name) + + log.Printf("[INFO] Reading storage class %s", metadata.Name) + storageClass, err := conn.StorageV1().StorageClasses().Get(ctx, metadata.Name, metav1.GetOptions{}) + if err != nil { + log.Printf("[DEBUG] Received error: %#v", err) + return diag.FromErr(err) + } + log.Printf("[INFO] Received storage class: %#v", storageClass) + + diags := diag.Diagnostics{} + + err = d.Set("metadata", flattenMetadataFields(storageClass.ObjectMeta)) + if err != nil { + diags = append(diags, diag.FromErr(err)[0]) + } + + sc := flattenStorageClass(*storageClass) + for k, v := range sc { + err = d.Set(k, v) + if err != nil { + diags = append(diags, diag.FromErr(err)[0]) + } + } + + return diags } diff --git a/kubernetes/provider_ignore_metadata_test.go b/kubernetes/provider_ignore_metadata_test.go index ecb99cae08..643694ab0e 100644 --- a/kubernetes/provider_ignore_metadata_test.go +++ b/kubernetes/provider_ignore_metadata_test.go @@ -6,6 +6,7 @@ package kubernetes import ( "context" "fmt" + "regexp" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -19,6 +20,7 @@ func TestAccKubernetesIgnoreKubernetesMetadata_basic(t *testing.T) { namespaceName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) ignoreKubernetesMetadata := "terraform.io/provider" dataSourceName := "data.kubernetes_namespace_v1.this" + oneOrMore := regexp.MustCompile(`^[1-9][0-9]*$`) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -33,8 +35,8 @@ func TestAccKubernetesIgnoreKubernetesMetadata_basic(t *testing.T) { { Config: testAccKubernetesIgnoreKubernetesMetadataProviderConfig(namespaceName, ignoreKubernetesMetadata), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(dataSourceName, "metadata.annotations.#", "0"), - resource.TestCheckResourceAttr(dataSourceName, "metadata.labels.#", "0"), + resource.TestMatchResourceAttr(dataSourceName, "metadata.0.annotations.%", oneOrMore), + resource.TestMatchResourceAttr(dataSourceName, "metadata.0.labels.%", oneOrMore), ), }, }, @@ -66,15 +68,12 @@ func createNamespaceIgnoreKubernetesMetadata(namespaceName string, ignoreKuberne return err } ns := corev1.Namespace{} - m := map[string]string{ignoreKubernetesMetadata: "kubernetes"} ns.SetName(namespaceName) + m := map[string]string{ignoreKubernetesMetadata: "kubernetes"} ns.SetAnnotations(m) ns.SetLabels(m) - namespace, err := conn.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{}) - switch namespace.Status.Phase { - case corev1.NamespaceActive: - return err - } + _, err = conn.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{}) + return err } diff --git a/kubernetes/resource_kubernetes_cron_job_v1.go b/kubernetes/resource_kubernetes_cron_job_v1.go index 753b2d132f..1b1572cf26 100644 --- a/kubernetes/resource_kubernetes_cron_job_v1.go +++ b/kubernetes/resource_kubernetes_cron_job_v1.go @@ -58,11 +58,6 @@ func resourceKubernetesCronJobV1Create(ctx context.Context, d *schema.ResourceDa return diag.FromErr(err) } - configAnnotations := d.Get("metadata.0.annotations").(map[string]interface{}) - ignoreAnnotations := meta.(kubeClientsets).IgnoreAnnotations - annotations := removeInternalKeys(metadata.Annotations, make(map[string]interface{})) - metadata.Annotations = removeKeys(annotations, configAnnotations, ignoreAnnotations) - job := batch.CronJob{ ObjectMeta: metadata, Spec: spec, @@ -96,11 +91,6 @@ func resourceKubernetesCronJobV1Update(ctx context.Context, d *schema.ResourceDa if err != nil { return diag.FromErr(err) } - configAnnotations := d.Get("metadata.0.annotations").(map[string]interface{}) - ignoreAnnotations := meta.(kubeClientsets).IgnoreAnnotations - annotations := removeInternalKeys(metadata.Annotations, make(map[string]interface{})) - metadata.Annotations = removeKeys(annotations, configAnnotations, ignoreAnnotations) - spec.JobTemplate.ObjectMeta.Annotations = metadata.Annotations cronjob := &batch.CronJob{ ObjectMeta: metadata, @@ -147,26 +137,13 @@ func resourceKubernetesCronJobV1Read(ctx context.Context, d *schema.ResourceData log.Printf("[INFO] Received cron job: %#v", job) // Remove server-generated labels unless using manual selector - if _, ok := d.GetOk("spec.0.manual_selector"); !ok { - labels := job.ObjectMeta.Labels - - if _, ok := labels["controller-uid"]; ok { - delete(labels, "controller-uid") - } - - if _, ok := labels["cron-job-name"]; ok { - delete(labels, "cron-job-name") - } - - if job.Spec.JobTemplate.Spec.Selector != nil && - job.Spec.JobTemplate.Spec.Selector.MatchLabels != nil { - labels = job.Spec.JobTemplate.Spec.Selector.MatchLabels - } - - if _, ok := labels["controller-uid"]; ok { - delete(labels, "controller-uid") + if _, ok := d.GetOk("spec.0.job_template.spec.0.manual_selector"); !ok { + removeGeneratedLabels(job.ObjectMeta.Labels) + if job.Spec.JobTemplate.Spec.Selector != nil { + removeGeneratedLabels(job.Spec.JobTemplate.Spec.Selector.MatchLabels) } } + err = d.Set("metadata", flattenMetadata(job.ObjectMeta, d, meta)) if err != nil { return diag.FromErr(err) diff --git a/kubernetes/resource_kubernetes_cron_job_v1beta1.go b/kubernetes/resource_kubernetes_cron_job_v1beta1.go index 038a068811..8924d6e337 100644 --- a/kubernetes/resource_kubernetes_cron_job_v1beta1.go +++ b/kubernetes/resource_kubernetes_cron_job_v1beta1.go @@ -150,24 +150,10 @@ func resourceKubernetesCronJobV1Beta1Read(ctx context.Context, d *schema.Resourc log.Printf("[INFO] Received cron job: %#v", job) // Remove server-generated labels unless using manual selector - if _, ok := d.GetOk("spec.0.manual_selector"); !ok { - labels := job.ObjectMeta.Labels - - if _, ok := labels["controller-uid"]; ok { - delete(labels, "controller-uid") - } - - if _, ok := labels["cron-job-name"]; ok { - delete(labels, "cron-job-name") - } - - if job.Spec.JobTemplate.Spec.Selector != nil && - job.Spec.JobTemplate.Spec.Selector.MatchLabels != nil { - labels = job.Spec.JobTemplate.Spec.Selector.MatchLabels - } - - if _, ok := labels["controller-uid"]; ok { - delete(labels, "controller-uid") + if _, ok := d.GetOk("spec.0.job_template.spec.0.manual_selector"); !ok { + removeGeneratedLabels(job.ObjectMeta.Labels) + if job.Spec.JobTemplate.Spec.Selector != nil { + removeGeneratedLabels(job.Spec.JobTemplate.Spec.Selector.MatchLabels) } } diff --git a/kubernetes/resource_kubernetes_job_v1.go b/kubernetes/resource_kubernetes_job_v1.go index 577b854190..25e6687b2c 100644 --- a/kubernetes/resource_kubernetes_job_v1.go +++ b/kubernetes/resource_kubernetes_job_v1.go @@ -140,21 +140,8 @@ func resourceKubernetesJobV1Read(ctx context.Context, d *schema.ResourceData, me // Remove server-generated labels unless using manual selector if _, ok := d.GetOk("spec.0.manual_selector"); !ok { - labels := job.ObjectMeta.Labels - - if _, ok := labels["controller-uid"]; ok { - delete(labels, "controller-uid") - } - - if _, ok := labels["job-name"]; ok { - delete(labels, "job-name") - } - - labels = job.Spec.Selector.MatchLabels - - if _, ok := labels["controller-uid"]; ok { - delete(labels, "controller-uid") - } + removeGeneratedLabels(job.ObjectMeta.Labels) + removeGeneratedLabels(job.Spec.Selector.MatchLabels) } err = d.Set("metadata", flattenMetadata(job.ObjectMeta, d, meta)) diff --git a/kubernetes/resource_kubernetes_mutating_webhook_configuration_v1.go b/kubernetes/resource_kubernetes_mutating_webhook_configuration_v1.go index 1a17f923e6..0cf65c0417 100644 --- a/kubernetes/resource_kubernetes_mutating_webhook_configuration_v1.go +++ b/kubernetes/resource_kubernetes_mutating_webhook_configuration_v1.go @@ -187,7 +187,7 @@ func resourceKubernetesMutatingWebhookConfigurationV1Read(ctx context.Context, d err = d.Set("metadata", flattenMetadata(cfg.ObjectMeta, d, meta)) if err != nil { - return nil + return diag.FromErr(err) } log.Printf("[DEBUG] Setting webhook to: %#v", cfg.Webhooks) diff --git a/kubernetes/structure_cron_job_v1.go b/kubernetes/structure_cron_job_v1.go index 9421af13f8..1a2128fd7a 100644 --- a/kubernetes/structure_cron_job_v1.go +++ b/kubernetes/structure_cron_job_v1.go @@ -44,7 +44,7 @@ func flattenCronJobSpecV1(in batch.CronJobSpec, d *schema.ResourceData, meta int func flattenJobTemplateV1(in batch.JobTemplateSpec, d *schema.ResourceData, meta interface{}) ([]interface{}, error) { att := make(map[string]interface{}) - att["metadata"] = flattenMetadata(in.ObjectMeta, d, meta, "spec.0.job_template.0.") + att["metadata"] = flattenMetadataFields(in.ObjectMeta) jobSpec, err := flattenJobV1Spec(in.Spec, d, meta, "spec.0.job_template.0.spec.0.template.0.") if err != nil { diff --git a/kubernetes/structure_job.go b/kubernetes/structure_job.go index ffaf259f2c..0aaedfb4f5 100644 --- a/kubernetes/structure_job.go +++ b/kubernetes/structure_job.go @@ -40,18 +40,10 @@ func flattenJobV1Spec(in batchv1.JobSpec, d *schema.ResourceData, meta interface if in.Selector != nil { att["selector"] = flattenLabelSelector(in.Selector) } - // Remove server-generated labels - labels := in.Template.ObjectMeta.Labels - if _, ok := labels["controller-uid"]; ok { - delete(labels, "controller-uid") - } - - if _, ok := labels["job-name"]; ok { - delete(labels, "job-name") - } + removeGeneratedLabels(in.Template.ObjectMeta.Labels) - podSpec, err := flattenPodTemplateSpec(in.Template, d, meta, prefix...) + podSpec, err := flattenPodTemplateSpec(in.Template) if err != nil { return nil, err } @@ -156,3 +148,27 @@ func patchJobV1Spec(pathPrefix, prefix string, d *schema.ResourceData) (PatchOpe return ops, nil } + +// removeGeneratedLabels removes server-generated labels +func removeGeneratedLabels(labels map[string]string) map[string]string { + // The Jobs controller adds the following labels to the template block dynamically + // and thus we have to ignore them to avoid perpetual diff: + // - 'batch.kubernetes.io/controller-uid' + // - 'batch.kubernetes.io/job-name' + // - 'controller-uid' // deprecated starting from Kubernetes 1.27 + // - 'job-name' // deprecated starting from Kubernetes 1.27 + // + // More information: https://kubernetes.io/docs/reference/labels-annotations-taints/ + generatedLabels := []string{ + "batch.kubernetes.io/controller-uid", + "batch.kubernetes.io/job-name", + // Starting from Kubernetes 1.27, the following labels are deprecated. + "controller-uid", + "job-name", + } + for _, l := range generatedLabels { + delete(labels, l) + } + + return labels +} diff --git a/kubernetes/structures.go b/kubernetes/structures.go index db7867f0e1..14df5c2c1e 100644 --- a/kubernetes/structures.go +++ b/kubernetes/structures.go @@ -115,72 +115,60 @@ func expandStringSlice(s []interface{}) []string { return result } -func flattenMetadata(meta metav1.ObjectMeta, d *schema.ResourceData, providerMetadata interface{}, metaPrefix ...string) []interface{} { +// flattenMetadataFields flattens all metadata fields. +func flattenMetadataFields(meta metav1.ObjectMeta) []interface{} { m := make(map[string]interface{}) - prefix := "" - if len(metaPrefix) > 0 { - prefix = metaPrefix[0] - } - - if prefix == "" { - configAnnotations := d.Get(prefix + "metadata.0.annotations").(map[string]interface{}) - ignoreAnnotations := providerMetadata.(kubeClientsets).IgnoreAnnotations - annotations := removeInternalKeys(meta.Annotations, configAnnotations) - m["annotations"] = removeKeys(annotations, configAnnotations, ignoreAnnotations) - } else { - m["annotations"] = d.Get(prefix + "metadata.0.annotations").(map[string]interface{}) - } - + m["annotations"] = meta.Annotations if meta.GenerateName != "" { m["generate_name"] = meta.GenerateName } - - configLabels := d.Get(prefix + "metadata.0.labels").(map[string]interface{}) - ignoreLabels := providerMetadata.(kubeClientsets).IgnoreLabels - labels := removeInternalKeys(meta.Labels, configLabels) - m["labels"] = removeKeys(labels, configLabels, ignoreLabels) - m["name"] = meta.Name - m["resource_version"] = meta.ResourceVersion - m["uid"] = fmt.Sprintf("%v", meta.UID) m["generation"] = meta.Generation - + m["labels"] = meta.Labels + m["name"] = meta.Name if meta.Namespace != "" { m["namespace"] = meta.Namespace } + m["resource_version"] = meta.ResourceVersion + m["uid"] = string(meta.UID) return []interface{}{m} } -func removeInternalKeys(m map[string]string, d map[string]interface{}) map[string]string { +func flattenMetadata(meta metav1.ObjectMeta, d *schema.ResourceData, providerMetadata interface{}) []interface{} { + metadata := expandMetadata(d.Get("metadata").([]interface{})) + + ignoreAnnotations := providerMetadata.(kubeClientsets).IgnoreAnnotations + removeInternalKeys(meta.Annotations, metadata.Annotations) + removeKeys(meta.Annotations, metadata.Annotations, ignoreAnnotations) + + ignoreLabels := providerMetadata.(kubeClientsets).IgnoreLabels + removeInternalKeys(meta.Labels, metadata.Labels) + removeKeys(meta.Labels, metadata.Labels, ignoreLabels) + + return flattenMetadataFields(meta) +} + +func removeInternalKeys(m map[string]string, d map[string]string) { for k := range m { if isInternalKey(k) && !isKeyInMap(k, d) { delete(m, k) } } - return m } // removeKeys removes given Kubernetes metadata(annotations and labels) keys. // In that case, they won't be available in the TF state file and will be ignored during apply/plan operations. -func removeKeys(m map[string]string, d map[string]interface{}, ignoreKubernetesMetadataKeys []string) map[string]string { +func removeKeys(m map[string]string, d map[string]string, ignoreKubernetesMetadataKeys []string) { for k := range m { if ignoreKey(k, ignoreKubernetesMetadataKeys) && !isKeyInMap(k, d) { delete(m, k) } } - return m } -func isKeyInMap(key string, d map[string]interface{}) bool { - if d == nil { - return false - } - for k := range d { - if k == key { - return true - } - } - return false +func isKeyInMap(key string, d map[string]string) bool { + _, ok := d[key] + return ok } func isInternalKey(annotationKey string) bool { diff --git a/kubernetes/structures_daemonset.go b/kubernetes/structures_daemonset.go index f7f0001019..a65d6e42c7 100644 --- a/kubernetes/structures_daemonset.go +++ b/kubernetes/structures_daemonset.go @@ -29,7 +29,7 @@ func flattenDaemonSetSpec(in appsv1.DaemonSetSpec, d *schema.ResourceData, meta } template := make(map[string]interface{}) template["spec"] = podSpec - template["metadata"] = flattenMetadata(in.Template.ObjectMeta, d, meta, "spec.0.template.0.") + template["metadata"] = flattenMetadataFields(in.Template.ObjectMeta) att["template"] = []interface{}{template} return []interface{}{att}, nil diff --git a/kubernetes/structures_deployment.go b/kubernetes/structures_deployment.go index dca4ba3a20..648229e649 100644 --- a/kubernetes/structures_deployment.go +++ b/kubernetes/structures_deployment.go @@ -40,7 +40,7 @@ func flattenDeploymentSpec(in appsv1.DeploymentSpec, d *schema.ResourceData, met } template := make(map[string]interface{}) template["spec"] = podSpec - template["metadata"] = flattenMetadata(in.Template.ObjectMeta, d, meta, "spec.0.template.0.") + template["metadata"] = flattenMetadataFields(in.Template.ObjectMeta) att["template"] = []interface{}{template} return []interface{}{att}, nil diff --git a/kubernetes/structures_pod_test.go b/kubernetes/structures_pod_test.go index b627ba32c4..2336dc038a 100644 --- a/kubernetes/structures_pod_test.go +++ b/kubernetes/structures_pod_test.go @@ -8,17 +8,17 @@ import ( "testing" "github.com/google/go-cmp/cmp" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" ) func TestFlattenTolerations(t *testing.T) { cases := []struct { - Input []v1.Toleration + Input []corev1.Toleration ExpectedOutput []interface{} }{ { - []v1.Toleration{ + []corev1.Toleration{ { Key: "node-role.kubernetes.io/spot-worker", Value: "true", @@ -32,7 +32,7 @@ func TestFlattenTolerations(t *testing.T) { }, }, { - []v1.Toleration{ + []corev1.Toleration{ { Key: "node-role.kubernetes.io/other-worker", Operator: "Exists", @@ -54,10 +54,10 @@ func TestFlattenTolerations(t *testing.T) { }, }, { - []v1.Toleration{ + []corev1.Toleration{ { Effect: "NoExecute", - TolerationSeconds: ptrToInt64(120), + TolerationSeconds: ptrToInt64(int64(120)), }, }, []interface{}{ @@ -68,7 +68,7 @@ func TestFlattenTolerations(t *testing.T) { }, }, { - []v1.Toleration{}, + []corev1.Toleration{}, []interface{}{}, }, } @@ -85,7 +85,7 @@ func TestFlattenTolerations(t *testing.T) { func TestExpandTolerations(t *testing.T) { cases := []struct { Input []interface{} - ExpectedOutput []*v1.Toleration + ExpectedOutput []*corev1.Toleration }{ { []interface{}{ @@ -94,7 +94,7 @@ func TestExpandTolerations(t *testing.T) { "value": "true", }, }, - []*v1.Toleration{ + []*corev1.Toleration{ { Key: "node-role.kubernetes.io/spot-worker", Value: "true", @@ -112,7 +112,7 @@ func TestExpandTolerations(t *testing.T) { "operator": "Exists", }, }, - []*v1.Toleration{ + []*corev1.Toleration{ { Key: "node-role.kubernetes.io/spot-worker", Value: "true", @@ -130,16 +130,16 @@ func TestExpandTolerations(t *testing.T) { "toleration_seconds": "120", }, }, - []*v1.Toleration{ + []*corev1.Toleration{ { Effect: "NoExecute", - TolerationSeconds: ptrToInt64(120), + TolerationSeconds: ptrToInt64(int64(120)), }, }, }, { []interface{}{}, - []*v1.Toleration{}, + []*corev1.Toleration{}, }, } @@ -157,18 +157,18 @@ func TestExpandTolerations(t *testing.T) { func TestFlattenSecretVolumeSource(t *testing.T) { cases := []struct { - Input *v1.SecretVolumeSource + Input *corev1.SecretVolumeSource ExpectedOutput []interface{} }{ { - &v1.SecretVolumeSource{ - DefaultMode: ptrToInt32(0644), + &corev1.SecretVolumeSource{ + DefaultMode: ptrToInt32(int32(0644)), SecretName: "secret1", Optional: ptrToBool(true), - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: "foo.txt", - Mode: ptrToInt32(0600), + Mode: ptrToInt32(int32(0600)), Path: "etc/foo.txt", }, }, @@ -189,10 +189,10 @@ func TestFlattenSecretVolumeSource(t *testing.T) { }, }, { - &v1.SecretVolumeSource{ - DefaultMode: ptrToInt32(0755), + &corev1.SecretVolumeSource{ + DefaultMode: ptrToInt32(int32(0755)), SecretName: "secret2", - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: "bar.txt", Path: "etc/bar.txt", @@ -213,7 +213,7 @@ func TestFlattenSecretVolumeSource(t *testing.T) { }, }, { - &v1.SecretVolumeSource{}, + &corev1.SecretVolumeSource{}, []interface{}{map[string]interface{}{}}, }, } @@ -230,7 +230,7 @@ func TestFlattenSecretVolumeSource(t *testing.T) { func TestExpandSecretVolumeSource(t *testing.T) { cases := []struct { Input []interface{} - ExpectedOutput *v1.SecretVolumeSource + ExpectedOutput *corev1.SecretVolumeSource }{ { []interface{}{ @@ -247,14 +247,14 @@ func TestExpandSecretVolumeSource(t *testing.T) { }, }, }, - &v1.SecretVolumeSource{ - DefaultMode: ptrToInt32(0644), + &corev1.SecretVolumeSource{ + DefaultMode: ptrToInt32(int32(0644)), SecretName: "secret1", Optional: ptrToBool(true), - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: "foo.txt", - Mode: ptrToInt32(0600), + Mode: ptrToInt32(int32(0600)), Path: "etc/foo.txt", }, }, @@ -273,10 +273,10 @@ func TestExpandSecretVolumeSource(t *testing.T) { }, }, }, - &v1.SecretVolumeSource{ - DefaultMode: ptrToInt32(0755), + &corev1.SecretVolumeSource{ + DefaultMode: ptrToInt32(int32(0755)), SecretName: "secret2", - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: "bar.txt", Path: "etc/bar.txt", @@ -286,7 +286,7 @@ func TestExpandSecretVolumeSource(t *testing.T) { }, { []interface{}{}, - &v1.SecretVolumeSource{}, + &corev1.SecretVolumeSource{}, }, } @@ -306,12 +306,12 @@ func TestFlattenEmptyDirVolumeSource(t *testing.T) { size, _ := resource.ParseQuantity("64Mi") cases := []struct { - Input *v1.EmptyDirVolumeSource + Input *corev1.EmptyDirVolumeSource ExpectedOutput []interface{} }{ { - &v1.EmptyDirVolumeSource{ - Medium: v1.StorageMediumMemory, + &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, }, []interface{}{ map[string]interface{}{ @@ -320,8 +320,8 @@ func TestFlattenEmptyDirVolumeSource(t *testing.T) { }, }, { - &v1.EmptyDirVolumeSource{ - Medium: v1.StorageMediumMemory, + &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, SizeLimit: &size, }, []interface{}{ @@ -332,7 +332,7 @@ func TestFlattenEmptyDirVolumeSource(t *testing.T) { }, }, { - &v1.EmptyDirVolumeSource{}, + &corev1.EmptyDirVolumeSource{}, []interface{}{ map[string]interface{}{ "medium": "", @@ -352,20 +352,20 @@ func TestFlattenEmptyDirVolumeSource(t *testing.T) { func TestFlattenConfigMapVolumeSource(t *testing.T) { cases := []struct { - Input *v1.ConfigMapVolumeSource + Input *corev1.ConfigMapVolumeSource ExpectedOutput []interface{} }{ { - &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ + &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ Name: "configmap1", }, - DefaultMode: ptrToInt32(0644), + DefaultMode: ptrToInt32(int32(0644)), Optional: ptrToBool(true), - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: "foo.txt", - Mode: ptrToInt32(0600), + Mode: ptrToInt32(int32(0600)), Path: "etc/foo.txt", }, }, @@ -386,12 +386,12 @@ func TestFlattenConfigMapVolumeSource(t *testing.T) { }, }, { - &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ + &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ Name: "configmap2", }, - DefaultMode: ptrToInt32(0755), - Items: []v1.KeyToPath{ + DefaultMode: ptrToInt32(int32(0755)), + Items: []corev1.KeyToPath{ { Key: "bar.txt", Path: "etc/bar.txt", @@ -412,7 +412,7 @@ func TestFlattenConfigMapVolumeSource(t *testing.T) { }, }, { - &v1.ConfigMapVolumeSource{}, + &corev1.ConfigMapVolumeSource{}, []interface{}{map[string]interface{}{"name": ""}}, }, } @@ -429,7 +429,7 @@ func TestFlattenConfigMapVolumeSource(t *testing.T) { func TestExpandConfigMapVolumeSource(t *testing.T) { cases := []struct { Input []interface{} - ExpectedOutput *v1.ConfigMapVolumeSource + ExpectedOutput *corev1.ConfigMapVolumeSource }{ { []interface{}{ @@ -446,16 +446,16 @@ func TestExpandConfigMapVolumeSource(t *testing.T) { }, }, }, - &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ + &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ Name: "configmap1", }, - DefaultMode: ptrToInt32(0644), + DefaultMode: ptrToInt32(int32(0644)), Optional: ptrToBool(true), - Items: []v1.KeyToPath{ + Items: []corev1.KeyToPath{ { Key: "foo.txt", - Mode: ptrToInt32(0600), + Mode: ptrToInt32(int32(0600)), Path: "etc/foo.txt", }, }, @@ -474,12 +474,12 @@ func TestExpandConfigMapVolumeSource(t *testing.T) { }, }, }, - &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ + &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ Name: "configmap2", }, - DefaultMode: ptrToInt32(0755), - Items: []v1.KeyToPath{ + DefaultMode: ptrToInt32(int32(0755)), + Items: []corev1.KeyToPath{ { Key: "bar.txt", Path: "etc/bar.txt", @@ -489,7 +489,7 @@ func TestExpandConfigMapVolumeSource(t *testing.T) { }, { []interface{}{}, - &v1.ConfigMapVolumeSource{}, + &corev1.ConfigMapVolumeSource{}, }, } @@ -507,35 +507,35 @@ func TestExpandConfigMapVolumeSource(t *testing.T) { func TestExpandThenFlatten_projected_volume(t *testing.T) { cases := []struct { - Input *v1.ProjectedVolumeSource + Input *corev1.ProjectedVolumeSource }{ { - Input: &v1.ProjectedVolumeSource{ - Sources: []v1.VolumeProjection{ + Input: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ { - Secret: &v1.SecretProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "secret-1"}, + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "secret-1"}, }, }, { - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "config-1"}, + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "config-1"}, }, }, { - ConfigMap: &v1.ConfigMapProjection{ - LocalObjectReference: v1.LocalObjectReference{Name: "config-2"}, + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "config-2"}, }, }, { - DownwardAPI: &v1.DownwardAPIProjection{ - Items: []v1.DownwardAPIVolumeFile{ + DownwardAPI: &corev1.DownwardAPIProjection{ + Items: []corev1.DownwardAPIVolumeFile{ {Path: "path-1"}, }, }, }, { - ServiceAccountToken: &v1.ServiceAccountTokenProjection{ + ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ Audience: "audience-1", }, }, @@ -565,7 +565,7 @@ func TestExpandThenFlatten_projected_volume(t *testing.T) { func TestExpandCSIVolumeSource(t *testing.T) { cases := []struct { Input []interface{} - ExpectedOutput *v1.CSIVolumeSource + ExpectedOutput *corev1.CSIVolumeSource }{ { Input: []interface{}{ @@ -583,14 +583,14 @@ func TestExpandCSIVolumeSource(t *testing.T) { }, }, }, - ExpectedOutput: &v1.CSIVolumeSource{ + ExpectedOutput: &corev1.CSIVolumeSource{ Driver: "secrets-store.csi.k8s.io", ReadOnly: ptrToBool(true), FSType: ptrToString("nfs"), VolumeAttributes: map[string]string{ "secretProviderClass": "azure-keyvault", }, - NodePublishSecretRef: &v1.LocalObjectReference{ + NodePublishSecretRef: &corev1.LocalObjectReference{ Name: "secrets-store", }, }, @@ -607,7 +607,7 @@ func TestExpandCSIVolumeSource(t *testing.T) { }, }, }, - ExpectedOutput: &v1.CSIVolumeSource{ + ExpectedOutput: &corev1.CSIVolumeSource{ Driver: "other-csi-driver.k8s.io", ReadOnly: nil, FSType: nil, @@ -632,18 +632,18 @@ func TestExpandCSIVolumeSource(t *testing.T) { func TestFlattenCSIVolumeSource(t *testing.T) { cases := []struct { - Input *v1.CSIVolumeSource + Input *corev1.CSIVolumeSource ExpectedOutput []interface{} }{ { - Input: &v1.CSIVolumeSource{ + Input: &corev1.CSIVolumeSource{ Driver: "secrets-store.csi.k8s.io", ReadOnly: ptrToBool(true), FSType: ptrToString("nfs"), VolumeAttributes: map[string]string{ "secretProviderClass": "azure-keyvault", }, - NodePublishSecretRef: &v1.LocalObjectReference{ + NodePublishSecretRef: &corev1.LocalObjectReference{ Name: "secrets-store", }, }, @@ -664,7 +664,7 @@ func TestFlattenCSIVolumeSource(t *testing.T) { }, }, { - Input: &v1.CSIVolumeSource{ + Input: &corev1.CSIVolumeSource{ Driver: "other-csi-driver.k8s.io", ReadOnly: nil, FSType: nil, diff --git a/kubernetes/structures_stateful_set.go b/kubernetes/structures_stateful_set.go index 7461e47840..af72e7dafc 100644 --- a/kubernetes/structures_stateful_set.go +++ b/kubernetes/structures_stateful_set.go @@ -129,7 +129,7 @@ func flattenStatefulSetSpec(spec v1.StatefulSetSpec, d *schema.ResourceData, met if spec.ServiceName != "" { att["service_name"] = spec.ServiceName } - template, err := flattenPodTemplateSpec(spec.Template, d, meta) + template, err := flattenPodTemplateSpec(spec.Template) if err != nil { return []interface{}{att}, err } @@ -146,14 +146,10 @@ func flattenStatefulSetSpec(spec v1.StatefulSetSpec, d *schema.ResourceData, met return []interface{}{att}, nil } -func flattenPodTemplateSpec(t corev1.PodTemplateSpec, d *schema.ResourceData, meta interface{}, prefix ...string) ([]interface{}, error) { +func flattenPodTemplateSpec(t corev1.PodTemplateSpec) ([]interface{}, error) { template := make(map[string]interface{}) - metaPrefix := "spec.0.template.0." - if len(prefix) > 0 { - metaPrefix = prefix[0] - } - template["metadata"] = flattenMetadata(t.ObjectMeta, d, meta, metaPrefix) + template["metadata"] = flattenMetadataFields(t.ObjectMeta) spec, err := flattenPodSpec(t.Spec) if err != nil { return []interface{}{template}, err @@ -164,13 +160,13 @@ func flattenPodTemplateSpec(t corev1.PodTemplateSpec, d *schema.ResourceData, me } func flattenPersistentVolumeClaim(in []corev1.PersistentVolumeClaim, d *schema.ResourceData, meta interface{}) []interface{} { - pvcs := make([]interface{}, 0, len(in)) + pvcs := make([]interface{}, len(in)) for i, pvc := range in { - p := make(map[string]interface{}) - p["metadata"] = flattenMetadata(pvc.ObjectMeta, d, meta, fmt.Sprintf("spec.0.volume_claim_template.%d.", i)) - p["spec"] = flattenPersistentVolumeClaimSpec(pvc.Spec) - pvcs = append(pvcs, p) + pvcs[i] = map[string]interface{}{ + "metadata": flattenMetadataFields(pvc.ObjectMeta), + "spec": flattenPersistentVolumeClaimSpec(pvc.Spec), + } } return pvcs } diff --git a/kubernetes/structures_test.go b/kubernetes/structures_test.go index 43b3b55493..7fad6563c7 100644 --- a/kubernetes/structures_test.go +++ b/kubernetes/structures_test.go @@ -5,10 +5,17 @@ package kubernetes import ( "fmt" + "reflect" "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) func TestIsInternalKey(t *testing.T) { + t.Parallel() + testCases := []struct { Key string Expected bool @@ -36,28 +43,251 @@ func TestIsInternalKey(t *testing.T) { } } -func TestPointerOf(t *testing.T) { - b := false - bp := pointerOf(b) - if b != *bp { - t.Error("Failed to get bool pointer") - } +func TestFlattenMetadataFields(t *testing.T) { + t.Parallel() - s := "this" - sp := pointerOf(s) - if s != *sp { - t.Error("Failed to get string pointer") + annotations := map[string]string{ + "fake.kubernetes.io": "fake", } - - i := int(1984) - ip := pointerOf(i) - if i != *ip { - t.Error("Failed to get int pointer") + labels := map[string]string{ + "foo": "bar", + } + uid := "7e9439cb-2584-4b50-81bc-441127e11b26" + cases := map[string]struct { + meta metav1.ObjectMeta + expected []interface{} + }{ + "DefaultNamespaceStaticName": { + metav1.ObjectMeta{ + Annotations: annotations, + GenerateName: "", + Generation: 1, + Labels: labels, + Name: "foo", + Namespace: "", + ResourceVersion: "1", + UID: types.UID(uid), + }, + []interface{}{map[string]interface{}{ + "annotations": annotations, + "generation": int64(1), + "labels": labels, + "name": "foo", + "resource_version": "1", + "uid": uid, + }}, + }, + "NonDefaultNamespaceStaticName": { + metav1.ObjectMeta{ + Annotations: annotations, + GenerateName: "", + Generation: 1, + Labels: labels, + Name: "foo", + Namespace: "Test", + ResourceVersion: "1", + UID: types.UID(uid), + }, + []interface{}{map[string]interface{}{ + "annotations": annotations, + "generation": int64(1), + "labels": labels, + "name": "foo", + "namespace": "Test", + "resource_version": "1", + "uid": uid, + }}, + }, + "DefaultNamespaceGeneratedName": { + metav1.ObjectMeta{ + Annotations: annotations, + GenerateName: "gen-foo", + Generation: 1, + Labels: labels, + Name: "", + Namespace: "", + ResourceVersion: "1", + UID: types.UID(uid), + }, + []interface{}{map[string]interface{}{ + "annotations": annotations, + "generate_name": "gen-foo", + "generation": int64(1), + "labels": labels, + "name": "", + "resource_version": "1", + "uid": uid, + }}, + }, + "NonDefaultNamespaceGeneratedName": { + metav1.ObjectMeta{ + Annotations: annotations, + GenerateName: "gen-foo", + Generation: 1, + Labels: labels, + Name: "", + Namespace: "Test", + ResourceVersion: "1", + UID: types.UID(uid), + }, + []interface{}{map[string]interface{}{ + "annotations": annotations, + "generate_name": "gen-foo", + "generation": int64(1), + "labels": labels, + "name": "", + "namespace": "Test", + "resource_version": "1", + "uid": uid, + }}, + }, + } + for n, c := range cases { + t.Run(n, func(t *testing.T) { + out := flattenMetadataFields(c.meta) + if !reflect.DeepEqual(out, c.expected) { + t.Fatalf("Error matching output and expected: %#v vs %#v", out, c.expected) + } + }) } +} - i64 := int64(1984) - i64p := pointerOf(i64) - if i64 != *i64p { - t.Error("Failed to get int64 pointer") +// TestFlattenMetadata aims to validate whether or not 'ignore_annotations' and 'ignore_labels' +// are cut out along with well-known Kubernetes annotations and labels. +func TestFlattenMetadata(t *testing.T) { + t.Parallel() + + uid := "7e9439cb-2584-4b50-81bc-441127e11b26" + cases := map[string]struct { + meta metav1.ObjectMeta + providerMeta kubeClientsets + expected []interface{} + }{ + "IgnoreAnnotations": { + metav1.ObjectMeta{ + Annotations: map[string]string{ + "fake.kubernetes.io": "fake", + "foo.example.com": "bar", + "bar.example.com": "foo", + }, + GenerateName: "", + Generation: 1, + Labels: map[string]string{ + "foo": "bar", + "bar": "foo", + }, + Name: "foo", + Namespace: "", + ResourceVersion: "1", + UID: types.UID(uid), + }, + kubeClientsets{ + IgnoreAnnotations: []string{"foo.example.com"}, + IgnoreLabels: []string{}, + }, + []interface{}{map[string]interface{}{ + "annotations": map[string]string{ + "bar.example.com": "foo", + }, + "generation": int64(1), + "labels": map[string]string{ + "foo": "bar", + "bar": "foo", + }, + "name": "foo", + "resource_version": "1", + "uid": uid, + }}, + }, + "IgnoreLabels": { + metav1.ObjectMeta{ + Annotations: map[string]string{ + "fake.kubernetes.io": "fake", + "foo.example.com": "bar", + "bar.example.com": "foo", + }, + GenerateName: "", + Generation: 1, + Labels: map[string]string{ + "foo": "bar", + "bar": "foo", + }, + Name: "foo", + Namespace: "", + ResourceVersion: "1", + UID: types.UID(uid), + }, + kubeClientsets{ + IgnoreAnnotations: []string{}, + IgnoreLabels: []string{"foo"}, + }, + []interface{}{map[string]interface{}{ + "annotations": map[string]string{ + "foo.example.com": "bar", + "bar.example.com": "foo", + }, + "generation": int64(1), + "labels": map[string]string{ + "bar": "foo", + }, + "name": "foo", + "resource_version": "1", + "uid": uid, + }}, + }, + "IgnoreAnnotationsAndLabels": { + metav1.ObjectMeta{ + Annotations: map[string]string{ + "fake.kubernetes.io": "fake", + "foo.example.com": "bar", + "bar.example.com": "foo", + }, + GenerateName: "", + Generation: 1, + Labels: map[string]string{ + "foo": "bar", + "bar": "foo", + }, + Name: "foo", + Namespace: "", + ResourceVersion: "1", + UID: types.UID(uid), + }, + kubeClientsets{ + IgnoreAnnotations: []string{"foo.example.com"}, + IgnoreLabels: []string{"foo"}, + }, + []interface{}{map[string]interface{}{ + "annotations": map[string]string{ + "bar.example.com": "foo", + }, + "generation": int64(1), + "labels": map[string]string{ + "bar": "foo", + }, + "name": "foo", + "resource_version": "1", + "uid": uid, + }}, + }, + } + rawData := map[string]interface{}{ + "metadata": []interface{}{map[string]interface{}{ + "annotations": map[string]interface{}{}, + "generation": 0, + "labels": map[string]interface{}{}, + "name": "", + "resource_version": "", + "uid": "", + }}, + } + d := schema.TestResourceDataRaw(t, map[string]*schema.Schema{"metadata": namespacedMetadataSchema("fake", true)}, rawData) + for n, c := range cases { + t.Run(n, func(t *testing.T) { + out := flattenMetadata(c.meta, d, c.providerMeta) + if !reflect.DeepEqual(out, c.expected) { + t.Fatalf("Error matching output and expected: %#v vs %#v", out, c.expected) + } + }) } } diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 4b23d248f5..1080f529cb 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -136,7 +136,7 @@ For further reading, see these examples which demonstrate different approaches t ## Ignore Kubernetes annotations and labels -In certain cases, external systems can add and modify resources annotations and labels for their own purposes. However, Terraform will remove them since they are not presented in the code. It also might be hard to update code accordingly to stay tuned with the changes that come outside. In order to address this `ignore_annotations` and `ignore_labels` attributes were introduced on the provider level. They allow Terraform to ignore certain annotations and labels across all resources. +In certain cases, external systems can add and modify resources annotations and labels for their own purposes. However, Terraform will remove them since they are not presented in the code. It also might be hard to update code accordingly to stay tuned with the changes that come outside. In order to address this `ignore_annotations` and `ignore_labels` attributes were introduced on the provider level. They allow Terraform to ignore certain annotations and labels across all resources. Please bear in mind, that all data sources remain unaffected and the provider always returns all labels and annotations, in spite of the `ignore_annotations` and `ignore_labels` settings. The same is applicable for the pod and job definitions that fall under templates. Both attributes support RegExp to match metadata objects more effectively.