diff --git a/.changelog/2697.txt b/.changelog/2697.txt new file mode 100644 index 0000000000..5b1ca0d681 --- /dev/null +++ b/.changelog/2697.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +tencentcloud_teo_realtime_log_delivery +``` diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index 21fa657c95..1d70f214c7 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -1556,6 +1556,7 @@ func Provider() *schema.Provider { "tencentcloud_teo_acceleration_domain": teo.ResourceTencentCloudTeoAccelerationDomain(), "tencentcloud_teo_application_proxy": teo.ResourceTencentCloudTeoApplicationProxy(), "tencentcloud_teo_application_proxy_rule": teo.ResourceTencentCloudTeoApplicationProxyRule(), + "tencentcloud_teo_realtime_log_delivery": teo.ResourceTencentCloudTeoRealtimeLogDelivery(), "tencentcloud_tcm_mesh": tcm.ResourceTencentCloudTcmMesh(), "tencentcloud_tcm_cluster_attachment": tcm.ResourceTencentCloudTcmClusterAttachment(), "tencentcloud_tcm_prometheus_attachment": tcm.ResourceTencentCloudTcmPrometheusAttachment(), diff --git a/tencentcloud/provider.md b/tencentcloud/provider.md index 85d7694dd6..ed5594657a 100644 --- a/tencentcloud/provider.md +++ b/tencentcloud/provider.md @@ -1407,6 +1407,7 @@ TencentCloud EdgeOne(TEO) tencentcloud_teo_certificate_config tencentcloud_teo_acceleration_domain tencentcloud_teo_l4_proxy + tencentcloud_teo_realtime_log_delivery TencentCloud ServiceMesh(TCM) Data Source diff --git a/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery.go b/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery.go new file mode 100644 index 0000000000..f8fe278976 --- /dev/null +++ b/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery.go @@ -0,0 +1,1065 @@ +// Code generated by iacg; DO NOT EDIT. +package teo + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + teo "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/teo/v20220901" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudTeoRealtimeLogDelivery() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudTeoRealtimeLogDeliveryCreate, + Read: resourceTencentCloudTeoRealtimeLogDeliveryRead, + Update: resourceTencentCloudTeoRealtimeLogDeliveryUpdate, + Delete: resourceTencentCloudTeoRealtimeLogDeliveryDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "zone_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the site.", + }, + + "task_name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the real-time log delivery task. The format is a combination of numbers, English, -, and _. The maximum length is 200 characters.", + }, + + "task_type": { + Type: schema.TypeString, + Required: true, + Description: "The real-time log delivery task type. The possible values are: `cls`: push to Tencent Cloud CLS; `custom_endpoint`: push to a custom HTTP(S) address; `s3`: push to an AWS S3 compatible storage bucket address.", + }, + + "entity_list": { + Type: schema.TypeList, + Required: true, + Description: "List of entities (seven-layer domain names or four-layer proxy instances) corresponding to real-time log delivery tasks. Example values are as follows: Seven-layer domain name: `domain.example.com`; four-layer proxy instance: sid-2s69eb5wcms7. For values, refer to: `https://cloud.tencent.com/document/api/1552/80690`, `https://cloud.tencent.com/document/api/1552/86336`.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "log_type": { + Type: schema.TypeString, + Required: true, + Description: "Data delivery type, the values are: `domain`: site acceleration log; `application`: four-layer proxy log; `web-rateLiming`: rate limit and CC attack protection log; `web-attack`: managed rule log; `web-rule`: custom rule log; `web-bot`: Bot management log.", + }, + + "area": { + Type: schema.TypeString, + Required: true, + Description: "Data delivery area, possible values are: `mainland`: within mainland China; `overseas`: worldwide (excluding mainland China).", + }, + + "fields": { + Type: schema.TypeList, + Required: true, + Description: "A list of preset fields for delivery.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "custom_fields": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "The list of custom fields delivered supports extracting specified field values from HTTP request headers, response headers, and cookies. Custom field names cannot be repeated and cannot exceed 200 fields.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Extract data from the specified location in the HTTP request and response. The values are: `ReqHeader`: extract the specified field value from the HTTP request header; `RspHeader`: extract the specified field value from the HTTP response header; `Cookie`: extract the specified field value from the Cookie.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "The name of the parameter whose value needs to be extracted, for example: Accept-Language.", + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to deliver this field. If left blank, this field will not be delivered.", + }, + }, + }, + }, + + "delivery_conditions": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: "The filter condition for log delivery. If it is not filled, all logs will be delivered.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conditions": { + Type: schema.TypeList, + Optional: true, + Description: "Log filtering conditions, the detailed filtering conditions are as follows: - `EdgeResponseStatusCode`: filter according to the status code returned by the EdgeOne node to the client. Supported operators: `equal`, `great`, `less`, `great_equal`, `less_equal`; Value range: any integer greater than or equal to 0; - `OriginResponseStatusCode`: filter according to the origin response status code. Supported operators: `equal`, `great`, `less`, `great_equal`, `less_equal`; Value range: any integer greater than or equal to -1; - `SecurityAction`: filter according to the final disposal action after the request hits the security rule. Supported operators: `equal`; Optional options are as follows: `-`: unknown/miss; `Monitor`: observe; `JSChallenge`: JavaScript challenge; `Deny`: intercept; `Allow`: allow; `BlockIP`: IP ban; `Redirect`: redirect; `ReturnCustomPage`: return to a custom page; `ManagedChallenge`: managed challenge; `Silence`: silent; `LongDelay`: respond after a long wait; `ShortDelay`: respond after a short wait; -`SecurityModule`: filter according to the name of the security module that finally handles the request. Supported operators: `equal`; Optional options: `-`: unknown/missed; `CustomRule`: Web Protection - Custom Rules; `RateLimitingCustomRule`: Web Protection - Rate Limiting Rules; `ManagedRule`: Web Protection - Managed Rules; `L7DDoS`: Web Protection - CC Attack Protection; `BotManagement`: Bot Management - Bot Basic Management; `BotClientReputation`: Bot Management - Client Profile Analysis; `BotBehaviorAnalysis`: Bot Management - Bot Intelligent Analysis; `BotCustomRule`: Bot Management - Custom Bot Rules; `BotActiveDetection`: Bot Management - Active Feature Recognition.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: "The key of the filter condition.", + }, + "operator": { + Type: schema.TypeString, + Required: true, + Description: "Query condition operator, operation types are: `equals`: equal; `notEquals`: not equal; `include`: include; `notInclude`: not include; `startWith`: start with value; `notStartWith`: not start with value; `endWith`: end with value; `notEndWith`: not end with value.", + }, + "value": { + Type: schema.TypeList, + Required: true, + Description: "The value of the filter condition.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + + "sample": { + Type: schema.TypeInt, + Required: true, + Description: "The sampling ratio is in thousandths, with a value range of 1-1000. For example, filling in 605 means the sampling ratio is 60.5%. Leaving it blank means the sampling ratio is 100%.", + }, + + "log_format": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: "The output format of log delivery. If it is not filled, it means the default format. The default format logic is as follows: when TaskType is `custom_endpoint`, the default format is an array of multiple JSON objects, each JSON object is a log; when TaskType is `s3`, the default format is JSON Lines; in particular, when TaskType is `cls`, the value of LogFormat.FormatType can only be json, and other parameters in LogFormat will be ignored. It is recommended not to pass LogFormat.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "format_type": { + Type: schema.TypeString, + Required: true, + Description: "The default output format type for log delivery. The possible values are: `json`: Use the default log output format JSON Lines. The fields in a single log are presented as key-value pairs; `csv`: Use the default log output format csv. Only field values are presented in a single log, without field names.", + }, + "batch_prefix": { + Type: schema.TypeString, + Optional: true, + Description: "A string to be added before each log delivery batch. Each log delivery batch may contain multiple log records.", + }, + "batch_suffix": { + Type: schema.TypeString, + Optional: true, + Description: "A string to append after each log delivery batch.", + }, + "record_prefix": { + Type: schema.TypeString, + Optional: true, + Description: "A string to prepend to each log record.", + }, + "record_suffix": { + Type: schema.TypeString, + Optional: true, + Description: "A string to append to each log record.", + }, + "record_delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "The string inserted between log records as a separator. The possible values are: `\n`: newline character; `\t`: tab character; `,`: comma.", + }, + "field_delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "In a single log record, a string is inserted between fields as a separator. The possible values are: `\t`: tab character; `,`: comma; `;`: semicolon.", + }, + }, + }, + }, + + "cls": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: "CLS configuration information. This parameter is required when TaskType is cls.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_set_id": { + Type: schema.TypeString, + Required: true, + Description: "Tencent Cloud CLS log set ID.", + }, + "topic_id": { + Type: schema.TypeString, + Required: true, + Description: "Tencent Cloud CLS log topic ID.", + }, + "log_set_region": { + Type: schema.TypeString, + Required: true, + Description: "The region where the Tencent Cloud CLS log set is located.", + }, + }, + }, + }, + + "custom_endpoint": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: "Customize the configuration information of the HTTP service. This parameter is required when TaskType is set to custom_endpoint.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: "The custom HTTP interface address for real-time log delivery. Currently, only HTTP/HTTPS protocols are supported.", + }, + "access_id": { + Type: schema.TypeString, + Optional: true, + Description: "Fill in a custom SecretId to generate an encrypted signature. This parameter is required if the source site requires authentication.", + }, + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "Fill in the custom SecretKey to generate the encrypted signature. This parameter is required if the source site requires authentication.", + }, + "compress_type": { + Type: schema.TypeString, + Optional: true, + Description: "Data compression type, the possible values are: `gzip`: use gzip compression. If it is not filled in, compression is not enabled.", + }, + "protocol": { + Type: schema.TypeString, + Optional: true, + Description: "When sending logs via POST request, the application layer protocol type used can be: `http`: HTTP protocol; `https`: HTTPS protocol. If not filled in, the protocol type will be parsed according to the filled in URL address.", + }, + "headers": { + Type: schema.TypeList, + Optional: true, + Description: "The custom request header carried when delivering logs. If the header name you fill in is the default header carried by EdgeOne log push, such as Content-Type, then the header value you fill in will overwrite the default value. The header value references a single variable ${batchSize} to obtain the number of logs included in each POST request.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "HTTP header name.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "HTTP header value.", + }, + }, + }, + }, + }, + }, + }, + + "s3": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: "Configuration information of AWS S3 compatible storage bucket. This parameter is required when TaskType is s3.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint": { + Type: schema.TypeString, + Required: true, + Description: "URLs that do not include bucket names or paths, for example: `https://storage.googleapis.com`, `https://s3.ap-northeast-2.amazonaws.com`, `https://cos.ap-nanjing.myqcloud.com`.", + }, + "region": { + Type: schema.TypeString, + Required: true, + Description: "The region where the bucket is located, for example: ap-northeast-2.", + }, + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "Bucket name and log storage directory, for example: `your_bucket_name/EO-logs/`. If this directory does not exist in the bucket, it will be created automatically.", + }, + "access_id": { + Type: schema.TypeString, + Required: true, + Description: "The Access Key ID used to access the bucket.", + }, + "access_key": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + Description: "The secret key used to access the bucket.", + }, + "compress_type": { + Type: schema.TypeString, + Optional: true, + Description: "Data compression type, the values are: gzip: gzip compression. If it is not filled in, compression is not enabled.", + }, + }, + }, + }, + + "delivery_status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The status of the real-time log delivery task. The values are: `enabled`: enabled; `disabled`: disabled. Leave it blank to keep the original configuration. Not required when creating.", + }, + + "task_id": { + Type: schema.TypeString, + Computed: true, + Description: "Real-time log delivery task ID.", + }, + }, + } +} + +func resourceTencentCloudTeoRealtimeLogDeliveryCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_teo_realtime_log_delivery.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + zoneId string + taskId string + ) + var ( + request = teo.NewCreateRealtimeLogDeliveryTaskRequest() + response = teo.NewCreateRealtimeLogDeliveryTaskResponse() + ) + + if v, ok := d.GetOk("zone_id"); ok { + zoneId = v.(string) + } + + request.ZoneId = helper.String(zoneId) + + if v, ok := d.GetOk("task_name"); ok { + request.TaskName = helper.String(v.(string)) + } + + if v, ok := d.GetOk("task_type"); ok { + request.TaskType = helper.String(v.(string)) + } + + if v, ok := d.GetOk("entity_list"); ok { + for _, item := range v.([]interface{}) { + request.EntityList = append(request.EntityList, helper.String(item.(string))) + } + } + + if v, ok := d.GetOk("log_type"); ok { + request.LogType = helper.String(v.(string)) + } + + if v, ok := d.GetOk("area"); ok { + request.Area = helper.String(v.(string)) + } + + if v, ok := d.GetOk("fields"); ok { + for _, item := range v.([]interface{}) { + request.Fields = append(request.Fields, helper.String(item.(string))) + } + } + + if v, ok := d.GetOk("custom_fields"); ok { + for _, item := range v.([]interface{}) { + customFieldsMap := item.(map[string]interface{}) + customField := teo.CustomField{} + if v, ok := customFieldsMap["name"]; ok { + customField.Name = helper.String(v.(string)) + } + if v, ok := customFieldsMap["value"]; ok { + customField.Value = helper.String(v.(string)) + } + if v, ok := customFieldsMap["enabled"]; ok { + customField.Enabled = helper.Bool(v.(bool)) + } + request.CustomFields = append(request.CustomFields, &customField) + } + } + + if v, ok := d.GetOk("delivery_conditions"); ok { + for _, item := range v.([]interface{}) { + deliveryConditionsMap := item.(map[string]interface{}) + deliveryCondition := teo.DeliveryCondition{} + if v, ok := deliveryConditionsMap["conditions"]; ok { + for _, item := range v.([]interface{}) { + conditionsMap := item.(map[string]interface{}) + queryCondition := teo.QueryCondition{} + if v, ok := conditionsMap["key"]; ok { + queryCondition.Key = helper.String(v.(string)) + } + if v, ok := conditionsMap["operator"]; ok { + queryCondition.Operator = helper.String(v.(string)) + } + if v, ok := conditionsMap["value"]; ok { + for _, item := range v.([]interface{}) { + queryCondition.Value = append(queryCondition.Value, helper.String(item.(string))) + } + } + deliveryCondition.Conditions = append(deliveryCondition.Conditions, &queryCondition) + } + } + request.DeliveryConditions = append(request.DeliveryConditions, &deliveryCondition) + } + } + + if v, ok := d.GetOkExists("sample"); ok { + request.Sample = helper.IntUint64(v.(int)) + } + + if logFormatMap, ok := helper.InterfacesHeadMap(d, "log_format"); ok { + logFormat := teo.LogFormat{} + if v, ok := logFormatMap["format_type"]; ok { + logFormat.FormatType = helper.String(v.(string)) + } + if v, ok := logFormatMap["batch_prefix"]; ok { + logFormat.BatchPrefix = helper.String(v.(string)) + } + if v, ok := logFormatMap["batch_suffix"]; ok { + logFormat.BatchSuffix = helper.String(v.(string)) + } + if v, ok := logFormatMap["record_prefix"]; ok { + logFormat.RecordPrefix = helper.String(v.(string)) + } + if v, ok := logFormatMap["record_suffix"]; ok { + logFormat.RecordSuffix = helper.String(v.(string)) + } + if v, ok := logFormatMap["record_delimiter"]; ok { + logFormat.RecordDelimiter = helper.String(v.(string)) + } + if v, ok := logFormatMap["field_delimiter"]; ok { + logFormat.FieldDelimiter = helper.String(v.(string)) + } + request.LogFormat = &logFormat + } + + if cLSMap, ok := helper.InterfacesHeadMap(d, "cls"); ok { + cLSTopic := teo.CLSTopic{} + if v, ok := cLSMap["log_set_id"]; ok { + cLSTopic.LogSetId = helper.String(v.(string)) + } + if v, ok := cLSMap["topic_id"]; ok { + cLSTopic.TopicId = helper.String(v.(string)) + } + if v, ok := cLSMap["log_set_region"]; ok { + cLSTopic.LogSetRegion = helper.String(v.(string)) + } + request.CLS = &cLSTopic + } + + if customEndpointMap, ok := helper.InterfacesHeadMap(d, "custom_endpoint"); ok { + customEndpoint := teo.CustomEndpoint{} + if v, ok := customEndpointMap["url"]; ok { + customEndpoint.Url = helper.String(v.(string)) + } + if v, ok := customEndpointMap["access_id"]; ok { + customEndpoint.AccessId = helper.String(v.(string)) + } + if v, ok := customEndpointMap["access_key"]; ok { + customEndpoint.AccessKey = helper.String(v.(string)) + } + if v, ok := customEndpointMap["compress_type"]; ok { + customEndpoint.CompressType = helper.String(v.(string)) + } + if v, ok := customEndpointMap["protocol"]; ok { + customEndpoint.Protocol = helper.String(v.(string)) + } + if v, ok := customEndpointMap["headers"]; ok { + for _, item := range v.([]interface{}) { + headersMap := item.(map[string]interface{}) + header := teo.Header{} + if v, ok := headersMap["name"]; ok { + header.Name = helper.String(v.(string)) + } + if v, ok := headersMap["value"]; ok { + header.Value = helper.String(v.(string)) + } + customEndpoint.Headers = append(customEndpoint.Headers, &header) + } + } + request.CustomEndpoint = &customEndpoint + } + + if s3Map, ok := helper.InterfacesHeadMap(d, "s3"); ok { + s3 := teo.S3{} + if v, ok := s3Map["endpoint"]; ok { + s3.Endpoint = helper.String(v.(string)) + } + if v, ok := s3Map["region"]; ok { + s3.Region = helper.String(v.(string)) + } + if v, ok := s3Map["bucket"]; ok { + s3.Bucket = helper.String(v.(string)) + } + if v, ok := s3Map["access_id"]; ok { + s3.AccessId = helper.String(v.(string)) + } + if v, ok := s3Map["access_key"]; ok { + s3.AccessKey = helper.String(v.(string)) + } + if v, ok := s3Map["compress_type"]; ok { + s3.CompressType = helper.String(v.(string)) + } + request.S3 = &s3 + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTeoClient().CreateRealtimeLogDeliveryTaskWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s create teo realtime log delivery failed, reason:%+v", logId, err) + return err + } + + taskId = *response.Response.TaskId + + if err := resourceTencentCloudTeoRealtimeLogDeliveryCreatePostHandleResponse0(ctx, response); err != nil { + return err + } + + d.SetId(strings.Join([]string{zoneId, taskId}, tccommon.FILED_SP)) + + return resourceTencentCloudTeoRealtimeLogDeliveryRead(d, meta) +} + +func resourceTencentCloudTeoRealtimeLogDeliveryRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_teo_realtime_log_delivery.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := TeoService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + zoneId := idSplit[0] + taskId := idSplit[1] + + _ = d.Set("zone_id", zoneId) + + respData, err := service.DescribeTeoRealtimeLogDeliveryById(ctx, zoneId, taskId) + if err != nil { + return err + } + + if respData == nil { + d.SetId("") + log.Printf("[WARN]%s resource `teo_realtime_log_delivery` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + if respData.TaskId != nil { + _ = d.Set("task_id", respData.TaskId) + taskId = *respData.TaskId + } + + if respData.TaskName != nil { + _ = d.Set("task_name", respData.TaskName) + } + + if respData.DeliveryStatus != nil { + _ = d.Set("delivery_status", respData.DeliveryStatus) + } + + if respData.TaskType != nil { + _ = d.Set("task_type", respData.TaskType) + } + + if respData.TaskType != nil { + _ = d.Set("task_type", respData.TaskType) + } + + if respData.EntityList != nil { + _ = d.Set("entity_list", respData.EntityList) + } + + if respData.LogType != nil { + _ = d.Set("log_type", respData.LogType) + } + + if respData.Area != nil { + _ = d.Set("area", respData.Area) + } + + if respData.Fields != nil { + _ = d.Set("fields", respData.Fields) + } + + customFieldsList := make([]map[string]interface{}, 0, len(respData.CustomFields)) + if respData.CustomFields != nil { + for _, customFields := range respData.CustomFields { + customFieldsMap := map[string]interface{}{} + + if customFields.Name != nil { + customFieldsMap["name"] = customFields.Name + } + + if customFields.Value != nil { + customFieldsMap["value"] = customFields.Value + } + + if customFields.Enabled != nil { + customFieldsMap["enabled"] = customFields.Enabled + } + + customFieldsList = append(customFieldsList, customFieldsMap) + } + + _ = d.Set("custom_fields", customFieldsList) + } + + deliveryConditionsList := make([]map[string]interface{}, 0, len(respData.DeliveryConditions)) + if respData.DeliveryConditions != nil { + for _, deliveryConditions := range respData.DeliveryConditions { + deliveryConditionsMap := map[string]interface{}{} + + conditionsList := make([]map[string]interface{}, 0, len(deliveryConditions.Conditions)) + if deliveryConditions.Conditions != nil { + for _, conditions := range deliveryConditions.Conditions { + conditionsMap := map[string]interface{}{} + + if conditions.Key != nil { + conditionsMap["key"] = conditions.Key + } + + if conditions.Operator != nil { + conditionsMap["operator"] = conditions.Operator + } + + if conditions.Value != nil { + conditionsMap["value"] = conditions.Value + } + + conditionsList = append(conditionsList, conditionsMap) + } + + deliveryConditionsMap["conditions"] = conditionsList + } + deliveryConditionsList = append(deliveryConditionsList, deliveryConditionsMap) + } + + _ = d.Set("delivery_conditions", deliveryConditionsList) + } + + if respData.Sample != nil { + _ = d.Set("sample", respData.Sample) + } + + logFormatMap := map[string]interface{}{} + + if respData.LogFormat != nil { + if respData.LogFormat.FormatType != nil { + logFormatMap["format_type"] = respData.LogFormat.FormatType + } + + if respData.LogFormat.BatchPrefix != nil { + logFormatMap["batch_prefix"] = respData.LogFormat.BatchPrefix + } + + if respData.LogFormat.BatchSuffix != nil { + logFormatMap["batch_suffix"] = respData.LogFormat.BatchSuffix + } + + if respData.LogFormat.RecordPrefix != nil { + logFormatMap["record_prefix"] = respData.LogFormat.RecordPrefix + } + + if respData.LogFormat.RecordSuffix != nil { + logFormatMap["record_suffix"] = respData.LogFormat.RecordSuffix + } + + if respData.LogFormat.RecordDelimiter != nil { + logFormatMap["record_delimiter"] = respData.LogFormat.RecordDelimiter + } + + if respData.LogFormat.FieldDelimiter != nil { + logFormatMap["field_delimiter"] = respData.LogFormat.FieldDelimiter + } + + _ = d.Set("log_format", []interface{}{logFormatMap}) + } + + cLSMap := map[string]interface{}{} + + if respData.CLS != nil { + if respData.CLS.LogSetId != nil { + cLSMap["log_set_id"] = respData.CLS.LogSetId + } + + if respData.CLS.TopicId != nil { + cLSMap["topic_id"] = respData.CLS.TopicId + } + + if respData.CLS.LogSetRegion != nil { + cLSMap["log_set_region"] = respData.CLS.LogSetRegion + } + + _ = d.Set("cls", []interface{}{cLSMap}) + } + + customEndpointMap := map[string]interface{}{} + + if respData.CustomEndpoint != nil { + if respData.CustomEndpoint.Url != nil { + customEndpointMap["url"] = respData.CustomEndpoint.Url + } + + if respData.CustomEndpoint.AccessId != nil { + customEndpointMap["access_id"] = respData.CustomEndpoint.AccessId + } + + if respData.CustomEndpoint.AccessKey != nil { + customEndpointMap["access_key"] = respData.CustomEndpoint.AccessKey + } + + if respData.CustomEndpoint.CompressType != nil { + customEndpointMap["compress_type"] = respData.CustomEndpoint.CompressType + } + + if respData.CustomEndpoint.Protocol != nil { + customEndpointMap["protocol"] = respData.CustomEndpoint.Protocol + } + + headersList := make([]map[string]interface{}, 0, len(respData.CustomEndpoint.Headers)) + if respData.CustomEndpoint.Headers != nil { + for _, headers := range respData.CustomEndpoint.Headers { + headersMap := map[string]interface{}{} + + if headers.Name != nil { + headersMap["name"] = headers.Name + } + + if headers.Value != nil { + headersMap["value"] = headers.Value + } + + headersList = append(headersList, headersMap) + } + + customEndpointMap["headers"] = headersList + } + _ = d.Set("custom_endpoint", []interface{}{customEndpointMap}) + } + + s3Map := map[string]interface{}{} + + if respData.S3 != nil { + if respData.S3.Endpoint != nil { + s3Map["endpoint"] = respData.S3.Endpoint + } + + if respData.S3.Region != nil { + s3Map["region"] = respData.S3.Region + } + + if respData.S3.Bucket != nil { + s3Map["bucket"] = respData.S3.Bucket + } + + if respData.S3.AccessId != nil { + s3Map["access_id"] = respData.S3.AccessId + } + + if respData.S3.AccessKey != nil { + s3Map["access_key"] = respData.S3.AccessKey + } + + if respData.S3.CompressType != nil { + s3Map["compress_type"] = respData.S3.CompressType + } + + _ = d.Set("s3", []interface{}{s3Map}) + } + + return nil +} + +func resourceTencentCloudTeoRealtimeLogDeliveryUpdate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_teo_realtime_log_delivery.update")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + immutableArgs := []string{"task_type", "log_type", "area", "cls"} + for _, v := range immutableArgs { + if d.HasChange(v) { + return fmt.Errorf("argument `%s` cannot be changed", v) + } + } + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + zoneId := idSplit[0] + taskId := idSplit[1] + + needChange := false + mutableArgs := []string{"task_name", "delivery_status", "entity_list", "fields", "custom_fields", "delivery_conditions", "sample", "log_format", "custom_endpoint", "s3"} + for _, v := range mutableArgs { + if d.HasChange(v) { + needChange = true + break + } + } + + if needChange { + request := teo.NewModifyRealtimeLogDeliveryTaskRequest() + + response := teo.NewModifyRealtimeLogDeliveryTaskResponse() + + request.ZoneId = helper.String(zoneId) + + request.TaskId = helper.String(taskId) + + if v, ok := d.GetOk("task_name"); ok { + request.TaskName = helper.String(v.(string)) + } + + if v, ok := d.GetOk("delivery_status"); ok { + request.DeliveryStatus = helper.String(v.(string)) + } + + if v, ok := d.GetOk("entity_list"); ok { + for _, item := range v.([]interface{}) { + request.EntityList = append(request.EntityList, helper.String(item.(string))) + } + } + + if v, ok := d.GetOk("fields"); ok { + for _, item := range v.([]interface{}) { + request.Fields = append(request.Fields, helper.String(item.(string))) + } + } + + if v, ok := d.GetOk("custom_fields"); ok { + for _, item := range v.([]interface{}) { + customFieldsMap := item.(map[string]interface{}) + customField := teo.CustomField{} + if v, ok := customFieldsMap["name"]; ok { + customField.Name = helper.String(v.(string)) + } + if v, ok := customFieldsMap["value"]; ok { + customField.Value = helper.String(v.(string)) + } + if v, ok := customFieldsMap["enabled"]; ok { + customField.Enabled = helper.Bool(v.(bool)) + } + request.CustomFields = append(request.CustomFields, &customField) + } + } + + if v, ok := d.GetOk("delivery_conditions"); ok { + for _, item := range v.([]interface{}) { + deliveryConditionsMap := item.(map[string]interface{}) + deliveryCondition := teo.DeliveryCondition{} + if v, ok := deliveryConditionsMap["conditions"]; ok { + for _, item := range v.([]interface{}) { + conditionsMap := item.(map[string]interface{}) + queryCondition := teo.QueryCondition{} + if v, ok := conditionsMap["key"]; ok { + queryCondition.Key = helper.String(v.(string)) + } + if v, ok := conditionsMap["operator"]; ok { + queryCondition.Operator = helper.String(v.(string)) + } + if v, ok := conditionsMap["value"]; ok { + for _, item := range v.([]interface{}) { + queryCondition.Value = append(queryCondition.Value, helper.String(item.(string))) + } + } + deliveryCondition.Conditions = append(deliveryCondition.Conditions, &queryCondition) + } + } + request.DeliveryConditions = append(request.DeliveryConditions, &deliveryCondition) + } + } + + if v, ok := d.GetOkExists("sample"); ok { + request.Sample = helper.IntUint64(v.(int)) + } + + if logFormatMap, ok := helper.InterfacesHeadMap(d, "log_format"); ok { + logFormat := teo.LogFormat{} + if v, ok := logFormatMap["format_type"]; ok { + logFormat.FormatType = helper.String(v.(string)) + } + if v, ok := logFormatMap["batch_prefix"]; ok { + logFormat.BatchPrefix = helper.String(v.(string)) + } + if v, ok := logFormatMap["batch_suffix"]; ok { + logFormat.BatchSuffix = helper.String(v.(string)) + } + if v, ok := logFormatMap["record_prefix"]; ok { + logFormat.RecordPrefix = helper.String(v.(string)) + } + if v, ok := logFormatMap["record_suffix"]; ok { + logFormat.RecordSuffix = helper.String(v.(string)) + } + if v, ok := logFormatMap["record_delimiter"]; ok { + logFormat.RecordDelimiter = helper.String(v.(string)) + } + if v, ok := logFormatMap["field_delimiter"]; ok { + logFormat.FieldDelimiter = helper.String(v.(string)) + } + request.LogFormat = &logFormat + } + + if customEndpointMap, ok := helper.InterfacesHeadMap(d, "custom_endpoint"); ok { + customEndpoint := teo.CustomEndpoint{} + if v, ok := customEndpointMap["url"]; ok { + customEndpoint.Url = helper.String(v.(string)) + } + if v, ok := customEndpointMap["access_id"]; ok { + customEndpoint.AccessId = helper.String(v.(string)) + } + if v, ok := customEndpointMap["access_key"]; ok { + customEndpoint.AccessKey = helper.String(v.(string)) + } + if v, ok := customEndpointMap["compress_type"]; ok { + customEndpoint.CompressType = helper.String(v.(string)) + } + if v, ok := customEndpointMap["protocol"]; ok { + customEndpoint.Protocol = helper.String(v.(string)) + } + if v, ok := customEndpointMap["headers"]; ok { + for _, item := range v.([]interface{}) { + headersMap := item.(map[string]interface{}) + header := teo.Header{} + if v, ok := headersMap["name"]; ok { + header.Name = helper.String(v.(string)) + } + if v, ok := headersMap["value"]; ok { + header.Value = helper.String(v.(string)) + } + customEndpoint.Headers = append(customEndpoint.Headers, &header) + } + } + request.CustomEndpoint = &customEndpoint + } + + if s3Map, ok := helper.InterfacesHeadMap(d, "s3"); ok { + s3 := teo.S3{} + if v, ok := s3Map["endpoint"]; ok { + s3.Endpoint = helper.String(v.(string)) + } + if v, ok := s3Map["region"]; ok { + s3.Region = helper.String(v.(string)) + } + if v, ok := s3Map["bucket"]; ok { + s3.Bucket = helper.String(v.(string)) + } + if v, ok := s3Map["access_id"]; ok { + s3.AccessId = helper.String(v.(string)) + } + if v, ok := s3Map["access_key"]; ok { + s3.AccessKey = helper.String(v.(string)) + } + if v, ok := s3Map["compress_type"]; ok { + s3.CompressType = helper.String(v.(string)) + } + request.S3 = &s3 + } + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTeoClient().ModifyRealtimeLogDeliveryTaskWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s update teo realtime log delivery failed, reason:%+v", logId, err) + return err + } + if err := resourceTencentCloudTeoRealtimeLogDeliveryUpdatePostHandleResponse0(ctx, response); err != nil { + return err + } + + } + + return resourceTencentCloudTeoRealtimeLogDeliveryRead(d, meta) +} + +func resourceTencentCloudTeoRealtimeLogDeliveryDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_teo_realtime_log_delivery.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + zoneId := idSplit[0] + taskId := idSplit[1] + + var ( + request = teo.NewDeleteRealtimeLogDeliveryTaskRequest() + response = teo.NewDeleteRealtimeLogDeliveryTaskResponse() + ) + + request.ZoneId = helper.String(zoneId) + + request.TaskId = helper.String(taskId) + + err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTeoClient().DeleteRealtimeLogDeliveryTaskWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if err != nil { + log.Printf("[CRITAL]%s delete teo realtime log delivery failed, reason:%+v", logId, err) + return err + } + + _ = response + if err := resourceTencentCloudTeoRealtimeLogDeliveryDeletePostHandleResponse0(ctx, response); err != nil { + return err + } + + return nil +} diff --git a/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery.md b/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery.md new file mode 100644 index 0000000000..3dcd933144 --- /dev/null +++ b/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery.md @@ -0,0 +1,57 @@ +Provides a resource to create a teo teo_realtime_log_delivery + +Example Usage + +```hcl +resource "tencentcloud_teo_realtime_log_delivery" "teo_realtime_log_delivery" { + area = "overseas" + delivery_status = "disabled" + entity_list = [ + "sid-2yvhjw98uaco", + ] + fields = [ + "ServiceID", + "ConnectTimeStamp", + "DisconnetTimeStamp", + "DisconnetReason", + "ClientRealIP", + "ClientRegion", + "EdgeIP", + "ForwardProtocol", + "ForwardPort", + "SentBytes", + "ReceivedBytes", + "LogTimeStamp", + ] + log_type = "application" + sample = 0 + task_name = "test" + task_type = "s3" + zone_id = "zone-2qtuhspy7cr6" + + log_format { + field_delimiter = "," + format_type = "json" + record_delimiter = "\n" + record_prefix = "{" + record_suffix = "}" + } + + s3 { + access_id = "xxxxxxxxxx" + access_key = "xxxxxxxxxx" + bucket = "test-1253833068" + compress_type = "gzip" + endpoint = "https://test-1253833068.cos.ap-nanjing.myqcloud.com" + region = "ap-nanjing" + } +} +``` + +Import + +teo teo_realtime_log_delivery can be imported using the id, e.g. + +``` +terraform import tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery zoneId#taskId +``` diff --git a/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery_extension.go b/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery_extension.go new file mode 100644 index 0000000000..ba17d0c90d --- /dev/null +++ b/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery_extension.go @@ -0,0 +1,83 @@ +package teo + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + teo "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/teo/v20220901" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" +) + +func resourceTencentCloudTeoRealtimeLogDeliveryCreatePostHandleResponse0(ctx context.Context, resp *teo.CreateRealtimeLogDeliveryTaskResponse) error { + taskId := *resp.Response.TaskId + return checkRealtimeLogDeliveryStatus(ctx, taskId, "enabled") +} + +func resourceTencentCloudTeoRealtimeLogDeliveryUpdatePostHandleResponse0(ctx context.Context, resp *teo.ModifyRealtimeLogDeliveryTaskResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + var taskId string + if v, ok := d.GetOk("task_id"); ok { + taskId = v.(string) + } + return checkRealtimeLogDeliveryStatus(ctx, taskId, "update") +} + +func resourceTencentCloudTeoRealtimeLogDeliveryDeletePostHandleResponse0(ctx context.Context, resp *teo.DeleteRealtimeLogDeliveryTaskResponse) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + var taskId string + if v, ok := d.GetOk("task_id"); ok { + taskId = v.(string) + } + return checkRealtimeLogDeliveryStatus(ctx, taskId, "deleted") +} + +func checkRealtimeLogDeliveryStatus(ctx context.Context, taskId string, expectedStatuses ...string) error { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return fmt.Errorf("resource data can not be nil") + } + meta := tccommon.ProviderMetaFromContext(ctx) + if meta == nil { + return fmt.Errorf("provider meta can not be nil") + } + + var zoneId string + if v, ok := d.GetOk("zone_id"); ok { + zoneId = v.(string) + } + + if v, ok := d.GetOk("delivery_status"); ok && expectedStatuses[0] == "update" { + expectedStatuses = append(expectedStatuses, v.(string)) + } + + service := TeoService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + return resource.Retry(6*tccommon.ReadRetryTimeout, func() *resource.RetryError { + instance, errRet := service.DescribeTeoRealtimeLogDeliveryById(ctx, zoneId, taskId) + if errRet != nil { + return tccommon.RetryError(errRet, tccommon.InternalError) + } + + if instance == nil { + if expectedStatuses[0] != "deleted" { + return resource.NonRetryableError(fmt.Errorf("RealtimeLogDeliveryTask data not found, taskId: %v", taskId)) + } + return nil + } + + for _, s := range expectedStatuses { + if s == *instance.DeliveryStatus { + return nil + } + } + + return resource.RetryableError(fmt.Errorf("RealtimeLogDeliveryTask status is %v, retry...", *instance.DeliveryStatus)) + }) +} diff --git a/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery_test.go b/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery_test.go new file mode 100644 index 0000000000..7c2d57b6b9 --- /dev/null +++ b/tencentcloud/services/teo/resource_tc_teo_realtime_log_delivery_test.go @@ -0,0 +1,164 @@ +package teo_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudTeoRealtimeLogDeliveryResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccTeoRealtimeLogDelivery, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "id"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "area", "overseas"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "delivery_status", "enabled"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "entity_list.#", "1"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "fields.#"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "log_type", "application"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "sample", "0"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "task_name", "test"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "task_type", "s3"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "log_format.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.#", "1"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.access_id"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.access_key"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.bucket"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.compress_type"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.endpoint"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.region"), + ), + }, + { + ResourceName: "tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTeoRealtimeLogDeliveryUp, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "id"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "area", "overseas"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "delivery_status", "disabled"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "entity_list.#", "1"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "fields.#"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "log_type", "application"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "sample", "0"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "task_name", "test"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "task_type", "s3"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "log_format.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.#", "1"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.access_id"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.access_key"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.bucket"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.compress_type"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.endpoint"), + resource.TestCheckResourceAttrSet("tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery", "s3.0.region"), + ), + }, + }, + }) +} + +const testAccTeoRealtimeLogDelivery = ` + +resource "tencentcloud_teo_realtime_log_delivery" "teo_realtime_log_delivery" { + area = "overseas" + delivery_status = "enabled" + entity_list = [ + "sid-2yvhjw98uaco", + ] + fields = [ + "ServiceID", + "ConnectTimeStamp", + "DisconnetTimeStamp", + "DisconnetReason", + "ClientRealIP", + "ClientRegion", + "EdgeIP", + "ForwardProtocol", + "ForwardPort", + "SentBytes", + "ReceivedBytes", + "LogTimeStamp", + ] + log_type = "application" + sample = 0 + task_name = "test" + task_type = "s3" + zone_id = "zone-2qtuhspy7cr6" + + log_format { + field_delimiter = "," + format_type = "json" + record_delimiter = "\n" + record_prefix = "{" + record_suffix = "}" + } + + s3 { + access_id = "xxxxxxxxxx" + access_key = "xxxxxxxxxx" + bucket = "test-1253833068" + compress_type = "gzip" + endpoint = "https://test-1253833068.cos.ap-nanjing.myqcloud.com" + region = "ap-nanjing" + } +} +` + +const testAccTeoRealtimeLogDeliveryUp = ` + +resource "tencentcloud_teo_realtime_log_delivery" "teo_realtime_log_delivery" { + area = "overseas" + delivery_status = "disabled" + entity_list = [ + "sid-2yvhjw98uaco", + ] + fields = [ + "ServiceID", + "ConnectTimeStamp", + "DisconnetTimeStamp", + "DisconnetReason", + "ClientRealIP", + "ClientRegion", + "EdgeIP", + "ForwardProtocol", + "ForwardPort", + "SentBytes", + "ReceivedBytes", + "LogTimeStamp", + ] + log_type = "application" + sample = 0 + task_name = "test" + task_type = "s3" + zone_id = "zone-2qtuhspy7cr6" + + log_format { + field_delimiter = "," + format_type = "json" + record_delimiter = "\n" + record_prefix = "{" + record_suffix = "}" + } + + s3 { + access_id = "xxxxxxxxxx" + access_key = "xxxxxxxxxx" + bucket = "test-1253833068" + compress_type = "gzip" + endpoint = "https://test-1253833068.cos.ap-nanjing.myqcloud.com" + region = "ap-nanjing" + } +} +` diff --git a/tencentcloud/services/teo/service_tencentcloud_teo.go b/tencentcloud/services/teo/service_tencentcloud_teo.go index 40433cba28..8dca21c4c1 100644 --- a/tencentcloud/services/teo/service_tencentcloud_teo.go +++ b/tencentcloud/services/teo/service_tencentcloud_teo.go @@ -1104,3 +1104,37 @@ func (me *TeoService) DescribeTeoL4ProxyById(ctx context.Context, zoneId string, ret = response.Response.L4Proxies[0] return } + +func (me *TeoService) DescribeTeoRealtimeLogDeliveryById(ctx context.Context, zoneId string, taskId string) (ret *teo.RealtimeLogDeliveryTask, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := teo.NewDescribeRealtimeLogDeliveryTasksRequest() + request.ZoneId = helper.String(zoneId) + advancedFilter := &teo.AdvancedFilter{ + Name: helper.String("task-id"), + Values: []*string{helper.String(taskId)}, + } + request.Filters = append(request.Filters, advancedFilter) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseTeoClient().DescribeRealtimeLogDeliveryTasks(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.RealtimeLogDeliveryTasks) < 1 { + return + } + + ret = response.Response.RealtimeLogDeliveryTasks[0] + return +} diff --git a/website/docs/r/teo_realtime_log_delivery.html.markdown b/website/docs/r/teo_realtime_log_delivery.html.markdown new file mode 100644 index 0000000000..29ff37f687 --- /dev/null +++ b/website/docs/r/teo_realtime_log_delivery.html.markdown @@ -0,0 +1,153 @@ +--- +subcategory: "TencentCloud EdgeOne(TEO)" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_teo_realtime_log_delivery" +sidebar_current: "docs-tencentcloud-resource-teo_realtime_log_delivery" +description: |- + Provides a resource to create a teo teo_realtime_log_delivery +--- + +# tencentcloud_teo_realtime_log_delivery + +Provides a resource to create a teo teo_realtime_log_delivery + +## Example Usage + +```hcl +resource "tencentcloud_teo_realtime_log_delivery" "teo_realtime_log_delivery" { + area = "overseas" + delivery_status = "disabled" + entity_list = [ + "sid-2yvhjw98uaco", + ] + fields = [ + "ServiceID", + "ConnectTimeStamp", + "DisconnetTimeStamp", + "DisconnetReason", + "ClientRealIP", + "ClientRegion", + "EdgeIP", + "ForwardProtocol", + "ForwardPort", + "SentBytes", + "ReceivedBytes", + "LogTimeStamp", + ] + log_type = "application" + sample = 0 + task_name = "test" + task_type = "s3" + zone_id = "zone-2qtuhspy7cr6" + + log_format { + field_delimiter = "," + format_type = "json" + record_delimiter = "\n" + record_prefix = "{" + record_suffix = "}" + } + + s3 { + access_id = "xxxxxxxxxx" + access_key = "xxxxxxxxxx" + bucket = "test-1253833068" + compress_type = "gzip" + endpoint = "https://test-1253833068.cos.ap-nanjing.myqcloud.com" + region = "ap-nanjing" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `area` - (Required, String) Data delivery area, possible values are: `mainland`: within mainland China; `overseas`: worldwide (excluding mainland China). +* `entity_list` - (Required, List: [`String`]) List of entities (seven-layer domain names or four-layer proxy instances) corresponding to real-time log delivery tasks. Example values are as follows: Seven-layer domain name: `domain.example.com`; four-layer proxy instance: sid-2s69eb5wcms7. For values, refer to: `https://cloud.tencent.com/document/api/1552/80690`, `https://cloud.tencent.com/document/api/1552/86336`. +* `fields` - (Required, List: [`String`]) A list of preset fields for delivery. +* `log_type` - (Required, String) Data delivery type, the values are: `domain`: site acceleration log; `application`: four-layer proxy log; `web-rateLiming`: rate limit and CC attack protection log; `web-attack`: managed rule log; `web-rule`: custom rule log; `web-bot`: Bot management log. +* `sample` - (Required, Int) The sampling ratio is in thousandths, with a value range of 1-1000. For example, filling in 605 means the sampling ratio is 60.5%. Leaving it blank means the sampling ratio is 100%. +* `task_name` - (Required, String) The name of the real-time log delivery task. The format is a combination of numbers, English, -, and _. The maximum length is 200 characters. +* `task_type` - (Required, String) The real-time log delivery task type. The possible values are: `cls`: push to Tencent Cloud CLS; `custom_endpoint`: push to a custom HTTP(S) address; `s3`: push to an AWS S3 compatible storage bucket address. +* `zone_id` - (Required, String, ForceNew) ID of the site. +* `cls` - (Optional, List) CLS configuration information. This parameter is required when TaskType is cls. +* `custom_endpoint` - (Optional, List) Customize the configuration information of the HTTP service. This parameter is required when TaskType is set to custom_endpoint. +* `custom_fields` - (Optional, List) The list of custom fields delivered supports extracting specified field values from HTTP request headers, response headers, and cookies. Custom field names cannot be repeated and cannot exceed 200 fields. +* `delivery_conditions` - (Optional, List) The filter condition for log delivery. If it is not filled, all logs will be delivered. +* `delivery_status` - (Optional, String) The status of the real-time log delivery task. The values are: `enabled`: enabled; `disabled`: disabled. Leave it blank to keep the original configuration. Not required when creating. +* `log_format` - (Optional, List) The output format of log delivery. If it is not filled, it means the default format. The default format logic is as follows: when TaskType is `custom_endpoint`, the default format is an array of multiple JSON objects, each JSON object is a log; when TaskType is `s3`, the default format is JSON Lines; in particular, when TaskType is `cls`, the value of LogFormat.FormatType can only be json, and other parameters in LogFormat will be ignored. It is recommended not to pass LogFormat. +* `s3` - (Optional, List) Configuration information of AWS S3 compatible storage bucket. This parameter is required when TaskType is s3. + +The `cls` object supports the following: + +* `log_set_id` - (Required, String) Tencent Cloud CLS log set ID. +* `log_set_region` - (Required, String) The region where the Tencent Cloud CLS log set is located. +* `topic_id` - (Required, String) Tencent Cloud CLS log topic ID. + +The `conditions` object of `delivery_conditions` supports the following: + +* `key` - (Required, String) The key of the filter condition. +* `operator` - (Required, String) Query condition operator, operation types are: `equals`: equal; `notEquals`: not equal; `include`: include; `notInclude`: not include; `startWith`: start with value; `notStartWith`: not start with value; `endWith`: end with value; `notEndWith`: not end with value. +* `value` - (Required, List) The value of the filter condition. + +The `custom_endpoint` object supports the following: + +* `url` - (Required, String) The custom HTTP interface address for real-time log delivery. Currently, only HTTP/HTTPS protocols are supported. +* `access_id` - (Optional, String) Fill in a custom SecretId to generate an encrypted signature. This parameter is required if the source site requires authentication. +* `access_key` - (Optional, String) Fill in the custom SecretKey to generate the encrypted signature. This parameter is required if the source site requires authentication. +* `compress_type` - (Optional, String) Data compression type, the possible values are: `gzip`: use gzip compression. If it is not filled in, compression is not enabled. +* `headers` - (Optional, List) The custom request header carried when delivering logs. If the header name you fill in is the default header carried by EdgeOne log push, such as Content-Type, then the header value you fill in will overwrite the default value. The header value references a single variable ${batchSize} to obtain the number of logs included in each POST request. +* `protocol` - (Optional, String) When sending logs via POST request, the application layer protocol type used can be: `http`: HTTP protocol; `https`: HTTPS protocol. If not filled in, the protocol type will be parsed according to the filled in URL address. + +The `custom_fields` object supports the following: + +* `name` - (Required, String) Extract data from the specified location in the HTTP request and response. The values are: `ReqHeader`: extract the specified field value from the HTTP request header; `RspHeader`: extract the specified field value from the HTTP response header; `Cookie`: extract the specified field value from the Cookie. +* `value` - (Required, String) The name of the parameter whose value needs to be extracted, for example: Accept-Language. +* `enabled` - (Optional, Bool) Whether to deliver this field. If left blank, this field will not be delivered. + +The `delivery_conditions` object supports the following: + +* `conditions` - (Optional, List) Log filtering conditions, the detailed filtering conditions are as follows: - `EdgeResponseStatusCode`: filter according to the status code returned by the EdgeOne node to the client. Supported operators: `equal`, `great`, `less`, `great_equal`, `less_equal`; Value range: any integer greater than or equal to 0; - `OriginResponseStatusCode`: filter according to the origin response status code. Supported operators: `equal`, `great`, `less`, `great_equal`, `less_equal`; Value range: any integer greater than or equal to -1; - `SecurityAction`: filter according to the final disposal action after the request hits the security rule. Supported operators: `equal`; Optional options are as follows: `-`: unknown/miss; `Monitor`: observe; `JSChallenge`: JavaScript challenge; `Deny`: intercept; `Allow`: allow; `BlockIP`: IP ban; `Redirect`: redirect; `ReturnCustomPage`: return to a custom page; `ManagedChallenge`: managed challenge; `Silence`: silent; `LongDelay`: respond after a long wait; `ShortDelay`: respond after a short wait; -`SecurityModule`: filter according to the name of the security module that finally handles the request. Supported operators: `equal`; Optional options: `-`: unknown/missed; `CustomRule`: Web Protection - Custom Rules; `RateLimitingCustomRule`: Web Protection - Rate Limiting Rules; `ManagedRule`: Web Protection - Managed Rules; `L7DDoS`: Web Protection - CC Attack Protection; `BotManagement`: Bot Management - Bot Basic Management; `BotClientReputation`: Bot Management - Client Profile Analysis; `BotBehaviorAnalysis`: Bot Management - Bot Intelligent Analysis; `BotCustomRule`: Bot Management - Custom Bot Rules; `BotActiveDetection`: Bot Management - Active Feature Recognition. + +The `headers` object of `custom_endpoint` supports the following: + +* `name` - (Required, String) HTTP header name. +* `value` - (Required, String) HTTP header value. + +The `log_format` object supports the following: + +* `format_type` - (Required, String) The default output format type for log delivery. The possible values are: `json`: Use the default log output format JSON Lines. The fields in a single log are presented as key-value pairs; `csv`: Use the default log output format csv. Only field values are presented in a single log, without field names. +* `batch_prefix` - (Optional, String) A string to be added before each log delivery batch. Each log delivery batch may contain multiple log records. +* `batch_suffix` - (Optional, String) A string to append after each log delivery batch. +* `field_delimiter` - (Optional, String) In a single log record, a string is inserted between fields as a separator. The possible values are: ` `: tab character; `,`: comma; `;`: semicolon. +* `record_delimiter` - (Optional, String) The string inserted between log records as a separator. The possible values are: ` +`: newline character; ` `: tab character; `,`: comma. +* `record_prefix` - (Optional, String) A string to prepend to each log record. +* `record_suffix` - (Optional, String) A string to append to each log record. + +The `s3` object supports the following: + +* `access_id` - (Required, String) The Access Key ID used to access the bucket. +* `access_key` - (Required, String) The secret key used to access the bucket. +* `bucket` - (Required, String) Bucket name and log storage directory, for example: `your_bucket_name/EO-logs/`. If this directory does not exist in the bucket, it will be created automatically. +* `endpoint` - (Required, String) URLs that do not include bucket names or paths, for example: `https://storage.googleapis.com`, `https://s3.ap-northeast-2.amazonaws.com`, `https://cos.ap-nanjing.myqcloud.com`. +* `region` - (Required, String) The region where the bucket is located, for example: ap-northeast-2. +* `compress_type` - (Optional, String) Data compression type, the values are: gzip: gzip compression. If it is not filled in, compression is not enabled. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. +* `task_id` - Real-time log delivery task ID. + + +## Import + +teo teo_realtime_log_delivery can be imported using the id, e.g. + +``` +terraform import tencentcloud_teo_realtime_log_delivery.teo_realtime_log_delivery zoneId#taskId +``` + diff --git a/website/tencentcloud.erb b/website/tencentcloud.erb index 013fb9d099..44aaca6deb 100644 --- a/website/tencentcloud.erb +++ b/website/tencentcloud.erb @@ -4874,6 +4874,9 @@
  • tencentcloud_teo_ownership_verify
  • +
  • + tencentcloud_teo_realtime_log_delivery +
  • tencentcloud_teo_rule_engine