From 82f81c52d3a98b65ec965784f85ccda23c91bbc8 Mon Sep 17 00:00:00 2001 From: Venkata Krishna Rohit Sakala Date: Wed, 17 Jul 2024 13:34:13 -0700 Subject: [PATCH] Remove monitoring v1 from terraform Co-authored-by: Diogo --- docs/data-sources/cluster.md | 2 - docs/data-sources/project.md | 1 - docs/resources/cluster.md | 69 +------- docs/resources/cluster_sync.md | 1 - docs/resources/cluster_template.md | 1 - docs/resources/project.md | 34 +--- rancher2/config.go | 4 - rancher2/data_source_rancher2_cluster.go | 14 -- rancher2/data_source_rancher2_project.go | 5 - rancher2/resource_rancher2_cluster.go | 163 +----------------- rancher2/resource_rancher2_cluster_sync.go | 18 -- ...resource_rancher2_cluster_template_test.go | 2 - rancher2/resource_rancher2_project.go | 117 +------------ rancher2/schema_cluster.go | 40 +---- rancher2/schema_cluster_sync.go | 6 - rancher2/schema_cluster_template.go | 18 -- rancher2/schema_monitoring_input.go | 24 --- rancher2/schema_project.go | 15 -- rancher2/structure_cluster.go | 12 +- rancher2/structure_cluster_template.go | 5 - rancher2/structure_cluster_template_test.go | 2 - rancher2/structure_cluster_test.go | 56 ++---- rancher2/structure_monitoring_input.go | 53 ------ rancher2/structure_monitoring_input_test.go | 64 ------- rancher2/structure_project.go | 14 +- rancher2/structure_project_test.go | 2 - 26 files changed, 32 insertions(+), 710 deletions(-) delete mode 100644 rancher2/schema_monitoring_input.go delete mode 100644 rancher2/structure_monitoring_input.go delete mode 100644 rancher2/structure_monitoring_input_test.go diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index 1f339df0..2cd25c80 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -44,13 +44,11 @@ The following attributes are exported: * `oke_config` - (Computed) The Oracle OKE configuration for `oke` Clusters. Conflicts with `aks_config`, `aks_config_v2`, `eks_config`, `eks_config_v2`, `gke_config`, `gke_config_v2`, `k3s_config` and `rke_config` (list maxitems:1) * `description` - (Computed) The description for Cluster (string) * `cluster_auth_endpoint` - (Computed) Enabling the [local cluster authorized endpoint](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#local-cluster-auth-endpoint) allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1) -* `cluster_monitoring_input` - (Computed) Cluster monitoring config (list maxitems:1) * `cluster_template_answers` - (Computed) Cluster template answers (list maxitems:1) * `cluster_template_id` - (Computed) Cluster template ID (string) * `cluster_template_questions` - (Computed) Cluster template questions (list) * `cluster_template_revision_id` - (Computed) Cluster template revision ID (string) * `default_pod_security_policy_template_id` - (Optional/Computed) [Default pod security policy template id](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#pod-security-policy-support) (string) -* `enable_cluster_monitoring` - (Computed) Enable built-in cluster monitoring. Default `false` (bool) * `enable_network_policy` - (Computed) Enable project network isolation. Default `false` (bool) * `enable_cluster_istio` - (Computed) Enable built-in cluster istio. Default `false` (bool) * `fleet_workspace_name` - (Computed) Fleet workspace name (string) diff --git a/docs/data-sources/project.md b/docs/data-sources/project.md index 7fadab4c..85fe972d 100644 --- a/docs/data-sources/project.md +++ b/docs/data-sources/project.md @@ -36,7 +36,6 @@ resource "kubernetes_namespace" "my_namespace" { * `id` - (Computed) Cluster-wide unique ID of the Rancher 2 project (string) * `container_resource_limit` - (Computed) Default containers resource limits on project (List maxitem:1) - * `enable_project_monitoring` - (Computed) Enable built-in project monitoring. Default `false` (bool) * `pod_security_policy_template_id` - (Computed) Default Pod Security Policy ID for the project (string) * `resource_quota` - (Computed) Resource quota for project. Rancher v2.1.x or higher (list maxitems:1) * `uuid` - (Computed) UUID of the project as stored by Rancher 2 (string) diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index 406ab85c..fba08cbd 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -8,7 +8,7 @@ Provides a Rancher v2 Cluster resource. This can be used to create Clusters for ## Example Usage -**Note optional/computed arguments** If any `optional/computed` argument of this resource is defined by the user, removing it from tf file will NOT reset its value. To reset it, let its definition at tf file as empty/false object. Ex: `enable_cluster_monitoring = false`, `cloud_provider {}`, `name = ""` +**Note optional/computed arguments** If any `optional/computed` argument of this resource is defined by the user, removing it from tf file will NOT reset its value. To reset it, let its definition at tf file as empty/false object. Ex: `cloud_provider {}`, `name = ""` ### Creating Rancher v2 imported cluster @@ -60,9 +60,7 @@ resource "rancher2_cluster" "foo-custom" { } ``` -### Creating Rancher v2 RKE cluster enabling and customizing monitoring - -**Note** Cluster monitoring version `0.2.0` and above, can't be enabled until cluster is fully deployed as [`kubeVersion`](https://github.com/rancher/system-charts/blob/52be656700468904b9bf15c3f39cd7112e1f8c9b/charts/rancher-monitoring/v0.2.0/Chart.yaml#L12) requirement has been introduced to helm chart +### Creating Rancher v2 RKE cluster enabling ```hcl # Create a new rancher2 RKE Cluster @@ -74,34 +72,10 @@ resource "rancher2_cluster" "foo-custom" { plugin = "canal" } } - enable_cluster_monitoring = true - cluster_monitoring_input { - answers = { - "exporter-kubelets.https" = true - "exporter-node.enabled" = true - "exporter-node.ports.metrics.port" = 9796 - "exporter-node.resources.limits.cpu" = "200m" - "exporter-node.resources.limits.memory" = "200Mi" - "grafana.persistence.enabled" = false - "grafana.persistence.size" = "10Gi" - "grafana.persistence.storageClass" = "default" - "operator.resources.limits.memory" = "500Mi" - "prometheus.persistence.enabled" = "false" - "prometheus.persistence.size" = "50Gi" - "prometheus.persistence.storageClass" = "default" - "prometheus.persistent.useReleaseName" = "true" - "prometheus.resources.core.limits.cpu" = "1000m", - "prometheus.resources.core.limits.memory" = "1500Mi" - "prometheus.resources.core.requests.cpu" = "750m" - "prometheus.resources.core.requests.memory" = "750Mi" - "prometheus.retention" = "12h" - } - version = "0.1.0" - } } ``` -### Creating Rancher v2 RKE cluster enabling/customizing monitoring and istio +### Creating Rancher v2 RKE cluster enabling/customizing istio ```hcl # Create a new rancher2 RKE Cluster @@ -113,35 +87,10 @@ resource "rancher2_cluster" "foo-custom" { plugin = "canal" } } - enable_cluster_monitoring = true - cluster_monitoring_input { - answers = { - "exporter-kubelets.https" = true - "exporter-node.enabled" = true - "exporter-node.ports.metrics.port" = 9796 - "exporter-node.resources.limits.cpu" = "200m" - "exporter-node.resources.limits.memory" = "200Mi" - "grafana.persistence.enabled" = false - "grafana.persistence.size" = "10Gi" - "grafana.persistence.storageClass" = "default" - "operator.resources.limits.memory" = "500Mi" - "prometheus.persistence.enabled" = "false" - "prometheus.persistence.size" = "50Gi" - "prometheus.persistence.storageClass" = "default" - "prometheus.persistent.useReleaseName" = "true" - "prometheus.resources.core.limits.cpu" = "1000m", - "prometheus.resources.core.limits.memory" = "1500Mi" - "prometheus.resources.core.requests.cpu" = "750m" - "prometheus.resources.core.requests.memory" = "750Mi" - "prometheus.retention" = "12h" - } - version = "0.1.0" - } } # Create a new rancher2 Cluster Sync for foo-custom cluster resource "rancher2_cluster_sync" "foo-custom" { cluster_id = rancher2_cluster.foo-custom.id - wait_monitoring = rancher2_cluster.foo-custom.enable_cluster_monitoring } # Create a new rancher2 Namespace resource "rancher2_namespace" "foo-istio" { @@ -149,7 +98,7 @@ resource "rancher2_namespace" "foo-istio" { project_id = rancher2_cluster_sync.foo-custom.system_project_id description = "istio namespace" } -# Create a new rancher2 App deploying istio (should wait until monitoring is up and running) +# Create a new rancher2 App deploying istio resource "rancher2_app" "istio" { catalog_name = "system-library" name = "cluster-istio" @@ -168,7 +117,6 @@ resource "rancher2_app" "istio" { "gateways.istio-ingressgateway.resources.requests.cpu" = "100m" "gateways.istio-ingressgateway.resources.requests.memory" = "128Mi" "gateways.istio-ingressgateway.type" = "NodePort" - "global.monitoring.type" = "cluster-monitoring" "global.rancher.clusterId" = rancher2_cluster_sync.foo-custom.cluster_id "istio_cni.enabled" = "false" "istiocoredns.enabled" = "false" @@ -653,7 +601,6 @@ The following arguments are supported: * `oke_config` - (Optional) The Oracle OKE configuration for `oke` Clusters. Conflicts with `aks_config`, `aks_config_v2`, `eks_config`, `eks_config_v2`, `gke_config`, `gke_config_v2`, `k3s_config` and `rke_config` (list maxitems:1) * `description` - (Optional) The description for Cluster (string) * `cluster_auth_endpoint` - (Optional/Computed) Enabling the [local cluster authorized endpoint](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#local-cluster-auth-endpoint) allows direct communication with the cluster, bypassing the Rancher API proxy. (list maxitems:1) -* `cluster_monitoring_input` - (Optional) Cluster monitoring config. Any parameter defined in [rancher-monitoring charts](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) could be configured (list maxitems:1) * `cluster_template_answers` - (Optional/Computed) Cluster template answers. For Rancher v2.3.x and above (list maxitems:1) * `cluster_template_id` - (Optional) Cluster template ID. For Rancher v2.3.x and above (string) * `cluster_template_questions` - (Optional/Computed) Cluster template questions. For Rancher v2.3.x and above (list) @@ -663,7 +610,6 @@ The following arguments are supported: * `desired_agent_image` - (Optional/Computed) Desired agent image. For Rancher v2.3.x and above (string) * `desired_auth_image` - (Optional/Computed) Desired auth image. For Rancher v2.3.x and above (string) * `docker_root_dir` - (Optional/Computed) Desired auth image. For Rancher v2.3.x and above (string) -* `enable_cluster_monitoring` - (Optional/Computed) Enable built-in cluster monitoring (bool) * `enable_cluster_istio` - (Deprecated) Deploy istio on `system` project and `istio-system` namespace, using rancher2_app resource instead. See above example. * `enable_network_policy` - (Optional/Computed) Enable project network isolation (bool) * `fleet_workspace_name` - (Optional/Computed) Fleet workspace name (string) @@ -1944,13 +1890,6 @@ The following arguments are supported: * `enabled` - (Optional) Enable the authorized cluster endpoint. Default `true` (bool) * `fqdn` - (Optional) FQDN for the authorized cluster endpoint (string) -### `cluster_monitoring_input` - -#### Arguments - -* `answers` - (Optional/Computed) Key/value answers for monitor input (map) -* `version` - (Optional) rancher-monitoring chart version (string) - ### `cluster_template_answers` #### Arguments diff --git a/docs/resources/cluster_sync.md b/docs/resources/cluster_sync.md index 63b51c09..1c9ec678 100644 --- a/docs/resources/cluster_sync.md +++ b/docs/resources/cluster_sync.md @@ -87,7 +87,6 @@ The following arguments are supported: * `cluster_id` - (Required/ForceNew) The cluster ID that is syncing (string) * `node_pool_ids` - (Optional) The node pool IDs used by the cluster id (list) * `wait_catalogs` - (Optional) Wait until all catalogs are downloaded and active. Default: `false` (bool) -* `wait_monitoring` - (Optional) Wait until monitoring is up and running. Default: `false` (bool) * `state_confirm` - (Optional) Wait until active status is confirmed a number of times (wait interval of 5s). Default: `1` means no confirmation (int) **Note:** `state_confirm` would be useful, if you have troubles for creating/updating custom clusters that eventually are reaching `active` state before they are fully installed. For example: setting `state_confirm = 2` will assure that the cluster has been in `active` state for at least 5 seconds, `state_confirm = 3` assure at least 10 seconds, etc diff --git a/docs/resources/cluster_template.md b/docs/resources/cluster_template.md index 4b5fd29b..1233263f 100644 --- a/docs/resources/cluster_template.md +++ b/docs/resources/cluster_template.md @@ -126,7 +126,6 @@ resource "rancher2_cluster_template" "foo" { * `desired_agent_image` - (Optional/Computed) Desired agent image (string) * `desired_auth_image` - (Optional/Computed) Desired auth image (string) * `docker_root_dir` - (Optional/Computed) Desired auth image (string) -* `enable_cluster_monitoring` - (Optional) Enable built-in cluster monitoring. Default: `false` (bool) * `enable_network_policy` - (Optional) Enable project network isolation. Default: `false` (bool) * `rke_config` - (Required) Rancher Kubernetes Engine Config (list maxitems: 1) * `windows_prefered_cluster` - (Optional) Windows prefered cluster. Default: `false` (bool) diff --git a/docs/resources/project.md b/docs/resources/project.md index 9250b76f..70bc41a0 100644 --- a/docs/resources/project.md +++ b/docs/resources/project.md @@ -35,7 +35,7 @@ resource "rancher2_project" "foo" { ``` ```hcl -# Create a new rancher2 Project enabling and customizing monitoring +# Create a new rancher2 Project resource "rancher2_project" "foo" { name = "foo" cluster_id = "" @@ -57,29 +57,6 @@ resource "rancher2_project" "foo" { requests_cpu = "1m" requests_memory = "1Mi" } - enable_project_monitoring = true - project_monitoring_input { - answers = { - "exporter-kubelets.https" = true - "exporter-node.enabled" = true - "exporter-node.ports.metrics.port" = 9796 - "exporter-node.resources.limits.cpu" = "200m" - "exporter-node.resources.limits.memory" = "200Mi" - "grafana.persistence.enabled" = false - "grafana.persistence.size" = "10Gi" - "grafana.persistence.storageClass" = "default" - "operator.resources.limits.memory" = "500Mi" - "prometheus.persistence.enabled" = "false" - "prometheus.persistence.size" = "50Gi" - "prometheus.persistence.storageClass" = "default" - "prometheus.persistent.useReleaseName" = "true" - "prometheus.resources.core.limits.cpu" = "1000m", - "prometheus.resources.core.limits.memory" = "1500Mi" - "prometheus.resources.core.requests.cpu" = "750m" - "prometheus.resources.core.requests.memory" = "750Mi" - "prometheus.retention" = "12h" - } - } } ``` @@ -91,9 +68,7 @@ The following arguments are supported: * `cluster_id` - (Required) The cluster id where create project (string) * `container_resource_limit` - (Optional) Default containers resource limits on project (List maxitem:1) * `description` - (Optional) A project description (string) -* `enable_project_monitoring` - (Optional) Enable built-in project monitoring. Default `false` (bool) * `pod_security_policy_template_id` - (Optional) Default Pod Security Policy ID for the project (string) -* `project_monitoring_input` - (Optional) Project monitoring config. Any parameter defined in [rancher-monitoring charts](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) could be configured (list maxitems:1) * `resource_quota` - (Optional) Resource quota for project. Rancher v2.1.x or higher (list maxitems:1) * `wait_for_cluster` - (Optional) Wait for cluster becomes active. Default `false` (bool) * `annotations` - (Optional/Computed) Annotations for Node Pool object (map) @@ -116,13 +91,6 @@ The following attributes are exported: * `requests_cpu` - (Optional) CPU reservation for containers (string) * `requests_memory` - (Optional) Memory reservation for containers (string) -### `project_monitoring_input` - -#### Arguments - -* `answers` - (Optional/Computed) Key/value answers for monitor input (map) -* `version` - (Optional) rancher-monitoring chart version (string) - ### `resource_quota` #### Arguments diff --git a/rancher2/config.go b/rancher2/config.go index 247ae8a6..cf815757 100644 --- a/rancher2/config.go +++ b/rancher2/config.go @@ -909,10 +909,6 @@ func (c *Config) isClusterConnected(id string) (bool, *managementClient.Cluster, return c.checkClusterCondition(id, clusterConnectedCondition) } -func (c *Config) isClusterMonitoringEnabledCondition(id string) (bool, *managementClient.Cluster, error) { - return c.checkClusterCondition(id, clusterMonitoringEnabledCondition) -} - func (c *Config) ClusterExist(id string) error { _, err := c.GetClusterByID(id) if err != nil { diff --git a/rancher2/data_source_rancher2_cluster.go b/rancher2/data_source_rancher2_cluster.go index 6b25e248..28f8619c 100644 --- a/rancher2/data_source_rancher2_cluster.go +++ b/rancher2/data_source_rancher2_cluster.go @@ -136,15 +136,6 @@ func dataSourceRancher2Cluster() *schema.Resource { Schema: clusterAuthEndpoint(), }, }, - "cluster_monitoring_input": { - Type: schema.TypeList, - MaxItems: 1, - Computed: true, - Description: "Cluster monitoring configuration", - Elem: &schema.Resource{ - Schema: monitoringInputFields(), - }, - }, "cluster_registration_token": { Type: schema.TypeList, MaxItems: 1, @@ -191,11 +182,6 @@ func dataSourceRancher2Cluster() *schema.Resource { Computed: true, Description: "Default pod security admission configuration template name", }, - "enable_cluster_monitoring": { - Type: schema.TypeBool, - Computed: true, - Description: "Enable built-in cluster monitoring", - }, "enable_network_policy": { Type: schema.TypeBool, Computed: true, diff --git a/rancher2/data_source_rancher2_project.go b/rancher2/data_source_rancher2_project.go index d2029b37..b8828869 100644 --- a/rancher2/data_source_rancher2_project.go +++ b/rancher2/data_source_rancher2_project.go @@ -37,11 +37,6 @@ func dataSourceRancher2Project() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "enable_project_monitoring": { - Type: schema.TypeBool, - Computed: true, - Description: "Enable built-in project monitoring", - }, "pod_security_policy_template_id": { Type: schema.TypeString, Computed: true, diff --git a/rancher2/resource_rancher2_cluster.go b/rancher2/resource_rancher2_cluster.go index 69afb742..b4fd4fc6 100644 --- a/rancher2/resource_rancher2_cluster.go +++ b/rancher2/resource_rancher2_cluster.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/schema" norman "github.com/rancher/norman/types" managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3" - projectClient "github.com/rancher/rancher/pkg/client/generated/project/v3" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" ) @@ -145,7 +144,6 @@ func resourceRancher2ClusterCreate(d *schema.ResourceData, meta interface{}) err } // Creating cluster with monitoring disabled - cluster.EnableClusterMonitoring = false newCluster := &Cluster{} if cluster.EKSConfig != nil && !cluster.EKSConfig.Imported { if !checkClusterEKSConfigV2NodeGroupsDesiredSize(cluster) { @@ -167,7 +165,6 @@ func resourceRancher2ClusterCreate(d *schema.ResourceData, meta interface{}) err return err } - newCluster.EnableClusterMonitoring = d.Get("enable_cluster_monitoring").(bool) d.SetId(newCluster.ID) stateConf := &resource.StateChangeConf{ @@ -183,39 +180,6 @@ func resourceRancher2ClusterCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("[ERROR] waiting for cluster (%s) to be created: %s", newCluster.ID, waitErr) } - monitoringInput := expandMonitoringInput(d.Get("cluster_monitoring_input").([]interface{})) - if newCluster.EnableClusterMonitoring { - if len(newCluster.Actions[monitoringActionEnable]) == 0 { - err = client.APIBaseClient.ByID(managementClient.ClusterType, newCluster.ID, newCluster) - if err != nil { - return err - } - } - clusterResource := &norman.Resource{ - ID: newCluster.ID, - Type: newCluster.Type, - Links: newCluster.Links, - Actions: newCluster.Actions, - } - // Retry enable monitoring until timeout if got api error 500 - ctx, cancel := context.WithTimeout(context.Background(), meta.(*Config).Timeout) - defer cancel() - for { - err = client.APIBaseClient.Action(managementClient.ClusterType, monitoringActionEnable, clusterResource, monitoringInput, nil) - if err == nil { - return resourceRancher2ClusterRead(d, meta) - } - if !IsServerError(err) { - return err - } - select { - case <-time.After(rancher2RetriesWait * time.Second): - case <-ctx.Done(): - break - } - } - } - return resourceRancher2ClusterRead(d, meta) } @@ -254,24 +218,13 @@ func resourceRancher2ClusterRead(d *schema.ResourceData, meta interface{}) error return resource.NonRetryableError(err) } - var monitoringInput *managementClient.MonitoringInput - if len(cluster.Annotations[monitoringInputAnnotation]) > 0 { - monitoringInput = &managementClient.MonitoringInput{} - err = jsonToInterface(cluster.Annotations[monitoringInputAnnotation], monitoringInput) - if err != nil { - return resource.NonRetryableError(err) - } - - } - if err = flattenCluster( d, cluster, clusterRegistrationToken, kubeConfig, defaultProjectID, - systemProjectID, - monitoringInput); err != nil { + systemProjectID); err != nil { return resource.NonRetryableError(err) } @@ -316,7 +269,6 @@ func resourceRancher2ClusterUpdate(d *schema.ResourceData, meta interface{}) err "desiredAuthImage": d.Get("desired_auth_image").(string), "dockerRootDir": d.Get("docker_root_dir").(string), "fleetWorkspaceName": d.Get("fleet_workspace_name").(string), - "enableClusterMonitoring": d.Get("enable_cluster_monitoring").(bool), "enableNetworkPolicy": &enableNetworkPolicy, "istioEnabled": d.Get("enable_cluster_istio").(bool), "localClusterAuthEndpoint": expandClusterAuthEndpoint(d.Get("cluster_auth_endpoint").([]interface{})), @@ -324,11 +276,6 @@ func resourceRancher2ClusterUpdate(d *schema.ResourceData, meta interface{}) err "labels": toMapString(d.Get("labels").(map[string]interface{})), } - // cluster_monitoring is not updated here. Setting old `enable_cluster_monitoring` value if it was updated - if d.HasChange("enable_cluster_monitoring") { - update["enableClusterMonitoring"] = !d.Get("enable_cluster_monitoring").(bool) - } - if clusterTemplateID, ok := d.Get("cluster_template_id").(string); ok && len(clusterTemplateID) > 0 { update["clusterTemplateId"] = clusterTemplateID if clusterTemplateRevisionID, ok := d.Get("cluster_template_revision_id").(string); ok && len(clusterTemplateRevisionID) > 0 { @@ -419,17 +366,6 @@ func resourceRancher2ClusterUpdate(d *schema.ResourceData, meta interface{}) err return resource.NonRetryableError(fmt.Errorf("[ERROR] waiting for cluster (%s) to be updated: %s", newCluster.ID, waitErr)) } - // update cluster monitoring if it has changed - if d.HasChange("enable_cluster_monitoring") || d.HasChange("cluster_monitoring_input") { - err = updateClusterMonitoring(client, d, meta, *newCluster) - if err != nil { - if IsServerError(err) { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - } - d.SetId(newCluster.ID) // read cluster after update. If an error is returned then the read failed and is non retryable, else @@ -731,100 +667,3 @@ func getClusterKubeconfig(c *Config, id, origconfig string) (*managementClient.G } } } - -func updateClusterMonitoring(client *managementClient.Client, d *schema.ResourceData, meta interface{}, newCluster Cluster) error { - clusterResource := &norman.Resource{ - ID: newCluster.ID, - Type: newCluster.Type, - Links: newCluster.Links, - Actions: newCluster.Actions, - } - enableMonitoring := d.Get("enable_cluster_monitoring").(bool) - - if enableMonitoring { - monitoringInput := expandMonitoringInput(d.Get("cluster_monitoring_input").([]interface{})) - if len(newCluster.Actions[monitoringActionEnable]) > 0 { - err := client.APIBaseClient.Action(managementClient.ClusterType, monitoringActionEnable, clusterResource, monitoringInput, nil) - if err != nil { - return err - } - } else { - monitorVersionChanged := false - if d.HasChange("cluster_monitoring_input") { - old, new := d.GetChange("cluster_monitoring_input") - oldInput := old.([]interface{}) - oldInputLen := len(oldInput) - oldVersion := "" - if oldInputLen > 0 { - oldRow, oldOK := oldInput[0].(map[string]interface{}) - if oldOK { - oldVersion = oldRow["version"].(string) - } - } - newInput := new.([]interface{}) - newInputLen := len(newInput) - newVersion := "" - if newInputLen > 0 { - newRow, newOK := newInput[0].(map[string]interface{}) - if newOK { - newVersion = newRow["version"].(string) - } - } - if oldVersion != newVersion { - monitorVersionChanged = true - } - } - if monitorVersionChanged && monitoringInput != nil { - err := updateClusterMonitoringApps(meta, d.Get("system_project_id").(string), monitoringInput.Version) - if err != nil { - return err - } - } - err := client.APIBaseClient.Action(managementClient.ClusterType, monitoringActionEdit, clusterResource, monitoringInput, nil) - if err != nil { - return err - } - } - } else if len(newCluster.Actions[monitoringActionDisable]) > 0 { - err := client.APIBaseClient.Action(managementClient.ClusterType, monitoringActionDisable, clusterResource, nil, nil) - if err != nil { - return err - } - } - return nil -} - -func updateClusterMonitoringApps(meta interface{}, systemProjectID, version string) error { - cliProject, err := meta.(*Config).ProjectClient(systemProjectID) - if err != nil { - return err - } - - filters := map[string]interface{}{ - "targetNamespace": clusterMonitoringV1Namespace, - } - - listOpts := NewListOpts(filters) - - apps, err := cliProject.App.List(listOpts) - if err != nil { - return err - } - - for _, a := range apps.Data { - if a.Name == "cluster-monitoring" || a.Name == "monitoring-operator" { - externalID := updateVersionExternalID(a.ExternalID, version) - upgrade := &projectClient.AppUpgradeConfig{ - Answers: a.Answers, - ExternalID: externalID, - ForceUpgrade: true, - } - - err = cliProject.App.ActionUpgrade(&a, upgrade) - if err != nil { - return err - } - } - } - return nil -} diff --git a/rancher2/resource_rancher2_cluster_sync.go b/rancher2/resource_rancher2_cluster_sync.go index 5f8f22d4..bc6d7d4c 100644 --- a/rancher2/resource_rancher2_cluster_sync.go +++ b/rancher2/resource_rancher2_cluster_sync.go @@ -55,13 +55,6 @@ func resourceRancher2ClusterSyncCreate(d *schema.ResourceData, meta interface{}) } } - if cluster.EnableClusterMonitoring && d.Get("wait_monitoring").(bool) { - _, err := meta.(*Config).WaitForClusterState(clusterID, clusterMonitoringEnabledCondition, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("[ERROR] waiting for cluster ID (%s) monitoring to be running: %v", clusterID, err) - } - } - if d.Get("wait_catalogs").(bool) { _, err := waitAllCatalogV2Downloaded(meta.(*Config), clusterID) if err != nil { @@ -121,17 +114,6 @@ func resourceRancher2ClusterSyncRead(d *schema.ResourceData, meta interface{}) e } d.Set("nodes", flattenClusterNodes(nodes)) - if clus.EnableClusterMonitoring && d.Get("wait_monitoring").(bool) { - monitor, _, err := meta.(*Config).isClusterMonitoringEnabledCondition(clusterID) - if err != nil { - return resource.NonRetryableError(err) - } - if !monitor { - d.Set("synced", false) - return nil - } - } - if d.Get("wait_catalogs").(bool) { _, err := waitAllCatalogV2Downloaded(meta.(*Config), clusterID) if err != nil { diff --git a/rancher2/resource_rancher2_cluster_template_test.go b/rancher2/resource_rancher2_cluster_template_test.go index 471b528b..a9191b0b 100644 --- a/rancher2/resource_rancher2_cluster_template_test.go +++ b/rancher2/resource_rancher2_cluster_template_test.go @@ -26,7 +26,6 @@ resource "` + testAccRancher2ClusterTemplateType + `" "foo" { cluster_auth_endpoint { enabled = true } - enable_cluster_monitoring = true enable_network_policy = false rke_config { ignore_docker_version = true @@ -103,7 +102,6 @@ resource "` + testAccRancher2ClusterTemplateType + `" "foo" { cluster_auth_endpoint { enabled = true } - enable_cluster_monitoring = true enable_network_policy = false rke_config { ignore_docker_version = true diff --git a/rancher2/resource_rancher2_project.go b/rancher2/resource_rancher2_project.go index 326ae4bb..536f5d79 100644 --- a/rancher2/resource_rancher2_project.go +++ b/rancher2/resource_rancher2_project.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3" - projectClient "github.com/rancher/rancher/pkg/client/generated/project/v3" ) func resourceRancher2Project() *schema.Resource { @@ -58,12 +57,10 @@ func resourceRancher2ProjectCreate(d *schema.ResourceData, meta interface{}) err log.Printf("[INFO] Creating Project %s on Cluster ID %s", project.Name, project.ClusterID) // Creating cluster with monitoring disabled - project.EnableProjectMonitoring = false newProject, err := client.Project.Create(project) if err != nil { return err } - newProject.EnableProjectMonitoring = d.Get("enable_project_monitoring").(bool) d.SetId(newProject.ID) stateConf := &resource.StateChangeConf{ @@ -80,20 +77,6 @@ func resourceRancher2ProjectCreate(d *schema.ResourceData, meta interface{}) err "[ERROR] waiting for project (%s) to be created: %s", newProject.ID, waitErr) } - monitoringInput := expandMonitoringInput(d.Get("project_monitoring_input").([]interface{})) - if newProject.EnableProjectMonitoring { - if len(newProject.Actions[monitoringActionEnable]) == 0 { - newProject, err = client.Project.ByID(newProject.ID) - if err != nil { - return err - } - } - err = client.Project.ActionEnableMonitoring(newProject, monitoringInput) - if err != nil { - return err - } - } - if pspID, ok := d.Get("pod_security_policy_template_id").(string); ok && len(pspID) > 0 { pspInput := &managementClient.SetPodSecurityPolicyTemplateInput{ PodSecurityPolicyTemplateName: pspID, @@ -141,17 +124,7 @@ func resourceRancher2ProjectRead(d *schema.ResourceData, meta interface{}) error return resource.NonRetryableError(err) } - var monitoringInput *managementClient.MonitoringInput - if len(project.Annotations[monitoringInputAnnotation]) > 0 { - monitoringInput = &managementClient.MonitoringInput{} - err = jsonToInterface(project.Annotations[monitoringInputAnnotation], monitoringInput) - if err != nil { - return resource.NonRetryableError(err) - } - - } - - if err = flattenProject(d, project, monitoringInput); err != nil { + if err = flattenProject(d, project); err != nil { return resource.NonRetryableError(err) } @@ -205,61 +178,6 @@ func resourceRancher2ProjectUpdate(d *schema.ResourceData, meta interface{}) err } } - if d.HasChange("enable_project_monitoring") || d.HasChange("project_monitoring_input") { - enableMonitoring := d.Get("enable_project_monitoring").(bool) - if !enableMonitoring && len(newProject.Actions[monitoringActionDisable]) > 0 { - err = client.Project.ActionDisableMonitoring(newProject) - if err != nil { - return err - } - } - if enableMonitoring { - monitoringInput := expandMonitoringInput(d.Get("project_monitoring_input").([]interface{})) - if len(newProject.Actions[monitoringActionEnable]) > 0 { - err = client.Project.ActionEnableMonitoring(newProject, monitoringInput) - if err != nil { - return err - } - } else { - monitorVersionChanged := false - if d.HasChange("project_monitoring_input") { - old, new := d.GetChange("project_monitoring_input") - oldInput := old.([]interface{}) - oldInputLen := len(oldInput) - oldVersion := "" - if oldInputLen > 0 { - oldRow, oldOK := oldInput[0].(map[string]interface{}) - if oldOK { - oldVersion = oldRow["version"].(string) - } - } - newInput := new.([]interface{}) - newInputLen := len(newInput) - newVersion := "" - if newInputLen > 0 { - newRow, newOK := newInput[0].(map[string]interface{}) - if newOK { - newVersion = newRow["version"].(string) - } - } - if oldVersion != newVersion { - monitorVersionChanged = true - } - } - if monitorVersionChanged && monitoringInput != nil { - err = updateProjectMonitoringApps(meta, newProject.ID, monitoringInput.Version) - if err != nil { - return err - } - } - err = client.Project.ActionEditMonitoring(newProject, monitoringInput) - if err != nil { - return err - } - } - } - } - return resourceRancher2ProjectRead(d, meta) } @@ -321,36 +239,3 @@ func projectStateRefreshFunc(client *managementClient.Client, projectID string) return obj, obj.State, nil } } - -func updateProjectMonitoringApps(meta interface{}, projectID, version string) error { - cliProject, err := meta.(*Config).ProjectClient(projectID) - if err != nil { - return err - } - - filters := map[string]interface{}{ - "name": "project-monitoring", - } - - listOpts := NewListOpts(filters) - - apps, err := cliProject.App.List(listOpts) - if err != nil { - return err - } - - for _, a := range apps.Data { - externalID := updateVersionExternalID(a.ExternalID, version) - upgrade := &projectClient.AppUpgradeConfig{ - Answers: a.Answers, - ExternalID: externalID, - ForceUpgrade: true, - } - - err = cliProject.App.ActionUpgrade(&a, upgrade) - if err != nil { - return err - } - } - return nil -} diff --git a/rancher2/schema_cluster.go b/rancher2/schema_cluster.go index a3ccf5b1..e70f7711 100644 --- a/rancher2/schema_cluster.go +++ b/rancher2/schema_cluster.go @@ -7,12 +7,10 @@ import ( ) const ( - clusterDriverImported = "imported" - clusterRegistrationTokenName = "default-token" - clusterMonitoringV1Namespace = "cattle-prometheus" - clusterActiveCondition = "Updated" - clusterConnectedCondition = "Connected" - clusterMonitoringEnabledCondition = "MonitoringEnabled" + clusterDriverImported = "imported" + clusterRegistrationTokenName = "default-token" + clusterActiveCondition = "Updated" + clusterConnectedCondition = "Connected" ) var ( @@ -193,15 +191,6 @@ func clusterFieldsV0() map[string]*schema.Schema { Schema: clusterAuthEndpoint(), }, }, - "cluster_monitoring_input": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Description: "Cluster monitoring configuration", - Elem: &schema.Resource{ - Schema: monitoringInputFields(), - }, - }, "cluster_registration_token": { Type: schema.TypeList, MaxItems: 1, @@ -265,12 +254,6 @@ func clusterFieldsV0() map[string]*schema.Schema { Optional: true, Computed: true, }, - "enable_cluster_monitoring": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Enable built-in cluster monitoring", - }, "enable_cluster_istio": { Type: schema.TypeBool, Computed: true, @@ -465,15 +448,6 @@ func clusterFields() map[string]*schema.Schema { Schema: clusterAuthEndpoint(), }, }, - "cluster_monitoring_input": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Description: "Cluster monitoring configuration", - Elem: &schema.Resource{ - Schema: monitoringInputFields(), - }, - }, "cluster_registration_token": { Type: schema.TypeList, MaxItems: 1, @@ -538,12 +512,6 @@ func clusterFields() map[string]*schema.Schema { Optional: true, Computed: true, }, - "enable_cluster_monitoring": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Enable built-in cluster monitoring", - }, "enable_cluster_istio": { Type: schema.TypeBool, Computed: true, diff --git a/rancher2/schema_cluster_sync.go b/rancher2/schema_cluster_sync.go index 85399919..a117c1ca 100644 --- a/rancher2/schema_cluster_sync.go +++ b/rancher2/schema_cluster_sync.go @@ -28,12 +28,6 @@ func clusterSyncFields() map[string]*schema.Schema { Default: false, Description: "Wait until all catalogs are downloaded and active", }, - "wait_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Wait until monitoring is up and running", - }, "node_pool_ids": { Type: schema.TypeList, Optional: true, diff --git a/rancher2/schema_cluster_template.go b/rancher2/schema_cluster_template.go index ac382b4b..8c7956b8 100644 --- a/rancher2/schema_cluster_template.go +++ b/rancher2/schema_cluster_template.go @@ -96,12 +96,6 @@ func clusterSpecBaseFieldsV0() map[string]*schema.Schema { Computed: true, Description: "Docker Root Dir", }, - "enable_cluster_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Enable built-in cluster monitoring", - }, "enable_network_policy": { Type: schema.TypeBool, Optional: true, @@ -176,12 +170,6 @@ func clusterSpecBaseFields() map[string]*schema.Schema { Computed: true, Description: "Docker Root Dir", }, - "enable_cluster_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Enable built-in cluster monitoring", - }, "enable_network_policy": { Type: schema.TypeBool, Optional: true, @@ -257,12 +245,6 @@ func clusterSpecBaseFieldsData() map[string]*schema.Schema { Computed: true, Description: "Docker Root Dir", }, - "enable_cluster_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Enable built-in cluster monitoring", - }, "enable_network_policy": { Type: schema.TypeBool, Optional: true, diff --git a/rancher2/schema_monitoring_input.go b/rancher2/schema_monitoring_input.go deleted file mode 100644 index 19f2628d..00000000 --- a/rancher2/schema_monitoring_input.go +++ /dev/null @@ -1,24 +0,0 @@ -package rancher2 - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -//Schemas - -func monitoringInputFields() map[string]*schema.Schema { - s := map[string]*schema.Schema{ - "answers": { - Type: schema.TypeMap, - Optional: true, - Description: "Answers for monitor input", - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: "Monitoring version", - }, - } - - return s -} diff --git a/rancher2/schema_project.go b/rancher2/schema_project.go index 85f81f2d..5c0de29d 100644 --- a/rancher2/schema_project.go +++ b/rancher2/schema_project.go @@ -116,25 +116,10 @@ func projectFields() map[string]*schema.Schema { Type: schema.TypeString, Optional: true, }, - "enable_project_monitoring": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Enable built-in project monitoring", - }, "pod_security_policy_template_id": { Type: schema.TypeString, Optional: true, }, - "project_monitoring_input": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Description: "Cluster monitoring configuration", - Elem: &schema.Resource{ - Schema: monitoringInputFields(), - }, - }, "resource_quota": { Type: schema.TypeList, MaxItems: 1, diff --git a/rancher2/structure_cluster.go b/rancher2/structure_cluster.go index fd0ce248..9bb53fb7 100644 --- a/rancher2/structure_cluster.go +++ b/rancher2/structure_cluster.go @@ -45,7 +45,7 @@ func flattenClusterAuthEndpoint(in *managementClient.LocalClusterAuthEndpoint) [ return []interface{}{obj} } -func flattenCluster(d *schema.ResourceData, in *Cluster, clusterRegToken *managementClient.ClusterRegistrationToken, kubeConfig *managementClient.GenerateKubeConfigOutput, defaultProjectID, systemProjectID string, monitoringInput *managementClient.MonitoringInput) error { +func flattenCluster(d *schema.ResourceData, in *Cluster, clusterRegToken *managementClient.ClusterRegistrationToken, kubeConfig *managementClient.GenerateKubeConfigOutput, defaultProjectID, systemProjectID string) error { if in == nil { return fmt.Errorf("[ERROR] flattening cluster: Input cluster is nil") } @@ -124,7 +124,6 @@ func flattenCluster(d *schema.ResourceData, in *Cluster, clusterRegToken *manage d.Set("fleet_workspace_name", in.FleetWorkspaceName) } - d.Set("enable_cluster_monitoring", in.EnableClusterMonitoring) d.Set("istio_enabled", in.IstioEnabled) if in.EnableNetworkPolicy != nil { @@ -148,11 +147,6 @@ func flattenCluster(d *schema.ResourceData, in *Cluster, clusterRegToken *manage return err } - err = d.Set("cluster_monitoring_input", flattenMonitoringInput(monitoringInput)) - if err != nil { - return err - } - if len(in.CACert) > 0 { d.Set("ca_cert", in.CACert) } @@ -481,10 +475,6 @@ func expandCluster(in *schema.ResourceData) (*Cluster, error) { obj.DockerRootDir = v } - if v, ok := in.Get("enable_cluster_monitoring").(bool); ok { - obj.EnableClusterMonitoring = v - } - if v, ok := in.Get("enable_network_policy").(bool); ok { obj.EnableNetworkPolicy = &v } diff --git a/rancher2/structure_cluster_template.go b/rancher2/structure_cluster_template.go index 2d090f4b..0762ec94 100644 --- a/rancher2/structure_cluster_template.go +++ b/rancher2/structure_cluster_template.go @@ -80,7 +80,6 @@ func flattenClusterSpecBase(in *managementClient.ClusterSpecBase, p []interface{ obj["docker_root_dir"] = in.DockerRootDir } - obj["enable_cluster_monitoring"] = in.EnableClusterMonitoring obj["enable_network_policy"] = *in.EnableNetworkPolicy if in.RancherKubernetesEngineConfig != nil { @@ -315,10 +314,6 @@ func expandClusterSpecBase(p []interface{}) (*managementClient.ClusterSpecBase, obj.DockerRootDir = v } - if v, ok := in["enable_cluster_monitoring"].(bool); ok { - obj.EnableClusterMonitoring = v - } - if v, ok := in["enable_network_policy"].(bool); ok { obj.EnableNetworkPolicy = &v } diff --git a/rancher2/structure_cluster_template_test.go b/rancher2/structure_cluster_template_test.go index 880de987..88b1ab36 100644 --- a/rancher2/structure_cluster_template_test.go +++ b/rancher2/structure_cluster_template_test.go @@ -108,7 +108,6 @@ func testClusterTemplate() { DesiredAgentImage: "desired_agent_image", DesiredAuthImage: "desired_auth_image", DockerRootDir: "docker_root_dir", - EnableClusterMonitoring: true, EnableNetworkPolicy: newTrue(), LocalClusterAuthEndpoint: testClusterTemplateRevisionsConfigAuthEndpointConf, RancherKubernetesEngineConfig: testClusterTemplateRevisionsConfigRKEConf, @@ -122,7 +121,6 @@ func testClusterTemplate() { "desired_agent_image": "desired_agent_image", "desired_auth_image": "desired_auth_image", "docker_root_dir": "docker_root_dir", - "enable_cluster_monitoring": true, "enable_network_policy": true, "rke_config": testClusterTemplateRevisionsConfigRKEInterface, "windows_prefered_cluster": true, diff --git a/rancher2/structure_cluster_test.go b/rancher2/structure_cluster_test.go index 9965887e..b6d3dd08 100644 --- a/rancher2/structure_cluster_test.go +++ b/rancher2/structure_cluster_test.go @@ -267,7 +267,6 @@ func testCluster() { testClusterConfAKS.AgentEnvVars = testClusterEnvVarsConf testClusterConfAKS.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfAKS.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfAKS.EnableClusterMonitoring = true testClusterConfAKS.EnableNetworkPolicy = newTrue() testClusterConfAKS.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceAKS = map[string]interface{}{ @@ -280,7 +279,6 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, "enable_network_policy": true, "kube_config": "kube_config", "driver": clusterDriverAKS, @@ -296,7 +294,6 @@ func testCluster() { testClusterConfEKS.AgentEnvVars = testClusterEnvVarsConf testClusterConfEKS.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfEKS.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfEKS.EnableClusterMonitoring = true testClusterConfEKS.EnableNetworkPolicy = newTrue() testClusterConfEKS.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceEKS = map[string]interface{}{ @@ -309,7 +306,6 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, "enable_network_policy": true, "kube_config": "kube_config", "driver": clusterDriverEKS, @@ -326,7 +322,6 @@ func testCluster() { testClusterConfEKSV2.FleetAgentDeploymentCustomization = testClusterAgentDeploymentCustomizationConf testClusterConfEKSV2.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfEKSV2.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfEKSV2.EnableClusterMonitoring = true testClusterConfEKSV2.EnableNetworkPolicy = newTrue() testClusterConfEKSV2.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceEKSV2 = map[string]interface{}{ @@ -341,12 +336,11 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, - "enable_network_policy": true, - "kube_config": "kube_config", - "driver": clusterDriverEKSV2, - "eks_config_v2": testClusterEKSConfigV2Interface, - "system_project_id": "system_project_id", + "enable_network_policy": true, + "kube_config": "kube_config", + "driver": clusterDriverEKSV2, + "eks_config_v2": testClusterEKSConfigV2Interface, + "system_project_id": "system_project_id", } testClusterConfGKE = &Cluster{ GoogleKubernetesEngineConfig: testClusterGKEConfigConf, @@ -357,7 +351,6 @@ func testCluster() { testClusterConfGKE.AgentEnvVars = testClusterEnvVarsConf testClusterConfGKE.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfGKE.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfGKE.EnableClusterMonitoring = true testClusterConfGKE.EnableNetworkPolicy = newTrue() testClusterConfGKE.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceGKE = map[string]interface{}{ @@ -370,7 +363,6 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, "enable_network_policy": true, "kube_config": "kube_config", "driver": clusterDriverGKE, @@ -385,7 +377,6 @@ func testCluster() { testClusterConfK3S.AgentEnvVars = testClusterEnvVarsConf testClusterConfK3S.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfK3S.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfK3S.EnableClusterMonitoring = true testClusterConfK3S.EnableNetworkPolicy = newTrue() testClusterConfK3S.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceK3S = map[string]interface{}{ @@ -398,7 +389,6 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, "enable_network_policy": true, "kube_config": "kube_config", "driver": clusterDriverK3S, @@ -414,7 +404,6 @@ func testCluster() { testClusterConfGKEV2.AgentEnvVars = testClusterEnvVarsConf testClusterConfGKEV2.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfGKEV2.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfGKEV2.EnableClusterMonitoring = true testClusterConfGKEV2.EnableNetworkPolicy = newTrue() testClusterConfGKEV2.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceGKEV2 = map[string]interface{}{ @@ -427,7 +416,6 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, "enable_network_policy": true, "kube_config": "kube_config", "driver": clusterDriverGKEV2, @@ -443,7 +431,6 @@ func testCluster() { testClusterConfOKE.AgentEnvVars = testClusterEnvVarsConf testClusterConfOKE.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfOKE.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfOKE.EnableClusterMonitoring = true testClusterConfOKE.EnableNetworkPolicy = newTrue() testClusterConfOKE.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceOKE = map[string]interface{}{ @@ -456,7 +443,6 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, "enable_network_policy": true, "kube_config": "kube_config", "driver": clusterOKEKind, @@ -474,7 +460,6 @@ func testCluster() { testClusterConfRKE.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfRKE.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" testClusterConfRKE.FleetWorkspaceName = "fleet-test" - testClusterConfRKE.EnableClusterMonitoring = true testClusterConfRKE.EnableNetworkPolicy = newTrue() testClusterConfRKE.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceRKE = map[string]interface{}{ @@ -489,14 +474,13 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, - "enable_network_policy": true, - "fleet_workspace_name": "fleet-test", - "kube_config": "kube_config", - "driver": clusterDriverRKE, - "rke_config": testClusterRKEConfigInterface, - "system_project_id": "system_project_id", - "windows_prefered_cluster": false, + "enable_network_policy": true, + "fleet_workspace_name": "fleet-test", + "kube_config": "kube_config", + "driver": clusterDriverRKE, + "rke_config": testClusterRKEConfigInterface, + "system_project_id": "system_project_id", + "windows_prefered_cluster": false, } testClusterConfRKE2 = &Cluster{} testClusterConfRKE2.Name = "test" @@ -508,7 +492,6 @@ func testCluster() { testClusterConfRKE2.FleetAgentDeploymentCustomization = testClusterAgentDeploymentCustomizationConf testClusterConfRKE2.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfRKE2.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfRKE2.EnableClusterMonitoring = true testClusterConfRKE2.EnableNetworkPolicy = newTrue() testClusterConfRKE2.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceRKE2 = map[string]interface{}{ @@ -523,13 +506,12 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, - "enable_network_policy": true, - "kube_config": "kube_config", - "driver": clusterDriverRKE2, - "rke2_config": testClusterRKE2ConfigInterface, - "system_project_id": "system_project_id", - "windows_prefered_cluster": false, + "enable_network_policy": true, + "kube_config": "kube_config", + "driver": clusterDriverRKE2, + "rke2_config": testClusterRKE2ConfigInterface, + "system_project_id": "system_project_id", + "windows_prefered_cluster": false, } testClusterConfTemplate = &Cluster{} testClusterConfTemplate.Name = "test" @@ -542,7 +524,6 @@ func testCluster() { testClusterConfTemplate.AgentEnvVars = testClusterEnvVarsConf testClusterConfTemplate.DefaultPodSecurityPolicyTemplateID = "default_pod_security_policy_template_id" testClusterConfTemplate.DefaultPodSecurityAdmissionConfigurationTemplateName = "default_pod_security_admission_configuration_template_name" - testClusterConfTemplate.EnableClusterMonitoring = true testClusterConfTemplate.EnableNetworkPolicy = newTrue() testClusterConfTemplate.LocalClusterAuthEndpoint = testLocalClusterAuthEndpointConf testClusterInterfaceTemplate = map[string]interface{}{ @@ -555,7 +536,6 @@ func testCluster() { "cluster_registration_token": testClusterRegistrationTokenInterface, "default_pod_security_policy_template_id": "default_pod_security_policy_template_id", "default_pod_security_admission_configuration_template_name": "default_pod_security_admission_configuration_template_name", - "enable_cluster_monitoring": true, "enable_network_policy": true, "kube_config": "kube_config", "driver": clusterDriverRKE, diff --git a/rancher2/structure_monitoring_input.go b/rancher2/structure_monitoring_input.go deleted file mode 100644 index 04866d66..00000000 --- a/rancher2/structure_monitoring_input.go +++ /dev/null @@ -1,53 +0,0 @@ -package rancher2 - -import ( - "reflect" - - managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3" -) - -const ( - monitoringInputAnnotation = "field.cattle.io/overwriteAppAnswers" - monitoringActionDisable = "disableMonitoring" - monitoringActionEdit = "editMonitoring" - monitoringActionEnable = "enableMonitoring" -) - -// Flatteners - -func flattenMonitoringInput(in *managementClient.MonitoringInput) []interface{} { - if in == nil || reflect.DeepEqual(in, &managementClient.MonitoringInput{}) { - return []interface{}{} - } - obj := map[string]interface{}{} - - if len(in.Answers) > 0 { - obj["answers"] = toMapInterface(in.Answers) - } - - if len(in.Version) > 0 { - obj["version"] = in.Version - } - - return []interface{}{obj} -} - -// Expanders - -func expandMonitoringInput(p []interface{}) *managementClient.MonitoringInput { - if len(p) == 0 || p[0] == nil { - return nil - } - obj := &managementClient.MonitoringInput{} - in := p[0].(map[string]interface{}) - - if v, ok := in["answers"].(map[string]interface{}); ok && len(v) > 0 { - obj.Answers = toMapString(v) - } - - if v, ok := in["version"].(string); ok && len(v) > 0 { - obj.Version = v - } - - return obj -} diff --git a/rancher2/structure_monitoring_input_test.go b/rancher2/structure_monitoring_input_test.go deleted file mode 100644 index 8f118a14..00000000 --- a/rancher2/structure_monitoring_input_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package rancher2 - -import ( - "testing" - - managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3" - "github.com/stretchr/testify/assert" -) - -var ( - testMonitoringInputConf *managementClient.MonitoringInput - testMonitoringInputInterface []interface{} -) - -func init() { - testMonitoringInputConf = &managementClient.MonitoringInput{ - Answers: map[string]string{ - "answer_one": "one", - "answer_two": "two", - }, - } - testMonitoringInputInterface = []interface{}{ - map[string]interface{}{ - "answers": map[string]interface{}{ - "answer_one": "one", - "answer_two": "two", - }, - }, - } -} - -func TestFlattenMonitoringInput(t *testing.T) { - - cases := []struct { - Input *managementClient.MonitoringInput - ExpectedOutput []interface{} - }{ - { - testMonitoringInputConf, - testMonitoringInputInterface, - }, - } - for _, tc := range cases { - output := flattenMonitoringInput(tc.Input) - assert.Equal(t, tc.ExpectedOutput, output, "Unexpected output from flattener.") - } -} - -func TestExpandMonitoringInput(t *testing.T) { - - cases := []struct { - Input []interface{} - ExpectedOutput *managementClient.MonitoringInput - }{ - { - testMonitoringInputInterface, - testMonitoringInputConf, - }, - } - for _, tc := range cases { - output := expandMonitoringInput(tc.Input) - assert.Equal(t, tc.ExpectedOutput, output, "Unexpected output from expander.") - } -} diff --git a/rancher2/structure_project.go b/rancher2/structure_project.go index af9683cb..9a99103e 100644 --- a/rancher2/structure_project.go +++ b/rancher2/structure_project.go @@ -109,7 +109,7 @@ func flattenProjectResourceQuota(pQuota *managementClient.ProjectResourceQuota, return []interface{}{obj} } -func flattenProject(d *schema.ResourceData, in *managementClient.Project, monitoringInput *managementClient.MonitoringInput) error { +func flattenProject(d *schema.ResourceData, in *managementClient.Project) error { if in == nil { return nil } @@ -118,7 +118,6 @@ func flattenProject(d *schema.ResourceData, in *managementClient.Project, monito d.Set("cluster_id", in.ClusterID) d.Set("name", in.Name) d.Set("description", in.Description) - d.Set("enable_project_monitoring", in.EnableProjectMonitoring) if in.ContainerDefaultResourceLimit != nil { containerLimit := flattenProjectContainerResourceLimit(in.ContainerDefaultResourceLimit) @@ -138,12 +137,7 @@ func flattenProject(d *schema.ResourceData, in *managementClient.Project, monito } } - err := d.Set("project_monitoring_input", flattenMonitoringInput(monitoringInput)) - if err != nil { - return err - } - - err = d.Set("annotations", toMapInterface(in.Annotations)) + err := d.Set("annotations", toMapInterface(in.Annotations)) if err != nil { return err } @@ -286,10 +280,6 @@ func expandProject(in *schema.ResourceData) *managementClient.Project { obj.ContainerDefaultResourceLimit = containerLimit } - if v, ok := in.Get("enable_project_monitoring").(bool); ok { - obj.EnableProjectMonitoring = v - } - obj.PodSecurityPolicyTemplateName = in.Get("pod_security_policy_template_id").(string) if v, ok := in.Get("resource_quota").([]interface{}); ok && len(v) > 0 { diff --git a/rancher2/structure_project_test.go b/rancher2/structure_project_test.go index 37192bc4..7cea0191 100644 --- a/rancher2/structure_project_test.go +++ b/rancher2/structure_project_test.go @@ -119,7 +119,6 @@ func init() { Name: "test", ContainerDefaultResourceLimit: testProjectContainerResourceLimitConf, Description: "description", - EnableProjectMonitoring: true, PodSecurityPolicyTemplateName: "pod_security_policy_template_id", ResourceQuota: testProjectResourceQuotaConf, NamespaceDefaultResourceQuota: testProjectNamespaceResourceQuotaConf, @@ -129,7 +128,6 @@ func init() { "name": "test", "container_resource_limit": testProjectContainerResourceLimitInterface, "description": "description", - "enable_project_monitoring": true, "pod_security_policy_template_id": "pod_security_policy_template_id", "resource_quota": testProjectResourceQuotaInterface, }