From d1bdb19294b460ca65ba8be84ee14c37c77b2b63 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 9 Aug 2024 11:52:13 -0400 Subject: [PATCH 01/47] Extract service and environment attribute from incoming telemetry (#762) --- plugins/processors/awsentity/config.go | 8 +- plugins/processors/awsentity/factory.go | 7 +- plugins/processors/awsentity/processor.go | 127 +++++++++++++++++- .../processors/awsentity/processor_test.go | 121 ++++++++++++++++- 4 files changed, 253 insertions(+), 10 deletions(-) diff --git a/plugins/processors/awsentity/config.go b/plugins/processors/awsentity/config.go index de7724c13b..b2b8ddde51 100644 --- a/plugins/processors/awsentity/config.go +++ b/plugins/processors/awsentity/config.go @@ -7,7 +7,13 @@ import ( "go.opentelemetry.io/collector/component" ) -type Config struct{} +type Config struct { + // ScrapeDatapointAttribute determines if the processor should scrape OTEL datapoint + // attributes for entity related information. This option is mainly used for components + // that emit all attributes to datapoint level instead of resource level. All telegraf + // plugins have this behavior. + ScrapeDatapointAttribute bool `mapstructure:"scrape_datapoint_attribute,omitempty"` +} // Verify Config implements Processor interface. var _ component.Config = (*Config)(nil) diff --git a/plugins/processors/awsentity/factory.go b/plugins/processors/awsentity/factory.go index 1372032ecb..32887ffa6a 100644 --- a/plugins/processors/awsentity/factory.go +++ b/plugins/processors/awsentity/factory.go @@ -5,6 +5,7 @@ package awsentity import ( "context" + "errors" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" @@ -38,7 +39,11 @@ func createMetricsProcessor( cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { - metricsProcessor := newAwsEntityProcessor(set.Logger) + processorConfig, ok := cfg.(*Config) + if !ok { + return nil, errors.New("configuration parsing error") + } + metricsProcessor := newAwsEntityProcessor(processorConfig, set.Logger) return processorhelper.NewMetricsProcessor( ctx, diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 9814e4abba..746c49885e 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -7,16 +7,22 @@ import ( "context" "strings" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" ) const ( - attributeAwsLogGroupNames = "aws.log.group.names" - attributeDeploymentEnvironment = "deployment.environment" - attributeServiceName = "service.name" + attributeAwsLogGroupNames = "aws.log.group.names" + attributeDeploymentEnvironment = "deployment.environment" + attributeServiceName = "service.name" + attributeService = "Service" + attributeEntityServiceName = "aws.entity.service.name" + attributeEntityDeploymentEnvironment = "aws.entity.deployment.environment" + EMPTY = "" ) // exposed as a variable for unit testing @@ -32,11 +38,13 @@ var addToEntityStore = func(logGroupName entitystore.LogGroupName, serviceName s // deployment.environment resource attributes set, then adds the association between the log group(s) and the // service/environment names to the entitystore extension. type awsEntityProcessor struct { + config *Config logger *zap.Logger } -func newAwsEntityProcessor(logger *zap.Logger) *awsEntityProcessor { +func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcessor { return &awsEntityProcessor{ + config: config, logger: logger, } } @@ -49,13 +57,25 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric serviceName, _ := resourceAttrs.Get(attributeServiceName) environmentName, _ := resourceAttrs.Get(attributeDeploymentEnvironment) - if logGroupNames.Str() == "" || (serviceName.Str() == "" && environmentName.Str() == "") { + entityServiceName := getServiceAttributes(resourceAttrs) + entityEnvironmentName := environmentName.Str() + if (entityServiceName == EMPTY || entityEnvironmentName == EMPTY) && p.config.ScrapeDatapointAttribute { + entityServiceName, entityEnvironmentName = p.scrapeServiceAttribute(rm.At(i).ScopeMetrics()) + } + if entityServiceName != EMPTY { + resourceAttrs.PutStr(attributeEntityServiceName, entityServiceName) + } + if entityEnvironmentName != EMPTY { + resourceAttrs.PutStr(attributeEntityDeploymentEnvironment, entityEnvironmentName) + } + + if logGroupNames.Str() == EMPTY || (serviceName.Str() == EMPTY && environmentName.Str() == EMPTY) { continue } logGroupNamesSlice := strings.Split(logGroupNames.Str(), "&") for _, logGroupName := range logGroupNamesSlice { - if logGroupName == "" { + if logGroupName == EMPTY { continue } addToEntityStore(entitystore.LogGroupName(logGroupName), serviceName.Str(), environmentName.Str()) @@ -64,3 +84,98 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric return md, nil } + +// scrapeServiceAttribute expands the datapoint attributes and search for +// service name and environment attributes. This is only used for components +// that only emit attributes on datapoint level. +func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMetricsSlice) (string, string) { + entityServiceName := EMPTY + entityEnvironmentName := EMPTY + for j := 0; j < scopeMetric.Len(); j++ { + metric := scopeMetric.At(j).Metrics() + for k := 0; k < metric.Len(); k++ { + if entityServiceName != EMPTY && entityEnvironmentName != EMPTY { + return entityServiceName, entityEnvironmentName + } + m := metric.At(k) + switch m.Type() { + case pmetric.MetricTypeGauge: + dps := m.Gauge().DataPoints() + for l := 0; l < dps.Len(); l++ { + dpService := getServiceAttributes(dps.At(l).Attributes()) + if dpService != EMPTY { + entityServiceName = dpService + } + if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { + entityEnvironmentName = dpEnvironment.Str() + } + } + case pmetric.MetricTypeSum: + dps := m.Sum().DataPoints() + for l := 0; l < dps.Len(); l++ { + dpService := getServiceAttributes(dps.At(l).Attributes()) + if dpService != EMPTY { + entityServiceName = dpService + } + if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { + entityEnvironmentName = dpEnvironment.Str() + } + } + case pmetric.MetricTypeHistogram: + dps := m.Histogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + dpService := getServiceAttributes(dps.At(l).Attributes()) + if dpService != EMPTY { + entityServiceName = dpService + } + if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { + entityEnvironmentName = dpEnvironment.Str() + } + } + case pmetric.MetricTypeExponentialHistogram: + dps := m.ExponentialHistogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + dpService := getServiceAttributes(dps.At(l).Attributes()) + if dpService != EMPTY { + entityServiceName = dpService + } + if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { + entityEnvironmentName = dpEnvironment.Str() + } + } + case pmetric.MetricTypeSummary: + dps := m.Sum().DataPoints() + for l := 0; l < dps.Len(); l++ { + dpService := getServiceAttributes(dps.At(l).Attributes()) + if dpService != EMPTY { + entityServiceName = dpService + } + if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { + entityEnvironmentName = dpEnvironment.Str() + } + } + default: + p.logger.Debug("Ignore unknown metric type", zap.String("type", m.Type().String())) + } + + } + } + return entityServiceName, entityEnvironmentName +} + +// getServiceAttributes prioritize service name retrieval based on +// following attribute priority +// 1. service.name +// 2. Service +// Service is needed because Container Insights mainly uses Service as +// attribute for customer workflows +// https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-metrics-EKS.html +func getServiceAttributes(p pcommon.Map) string { + if serviceName, ok := p.Get(semconv.AttributeServiceName); ok { + return serviceName.Str() + } + if serviceName, ok := p.Get(attributeService); ok { + return serviceName.Str() + } + return EMPTY +} diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index d785352a23..223c8b423b 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -40,9 +40,9 @@ func newAddToMockEntityStore(rs *mockEntityStore) func(entitystore.LogGroupName, } } -func TestProcessMetrics(t *testing.T) { +func TestProcessMetricsLogGroupAssociation(t *testing.T) { logger, _ := zap.NewDevelopment() - p := newAwsEntityProcessor(logger) + p := newAwsEntityProcessor(&Config{}, logger) ctx := context.Background() // empty metrics, no action @@ -125,6 +125,110 @@ func TestProcessMetrics(t *testing.T) { } } +func TestProcessMetricsResourceAttributeScraping(t *testing.T) { + logger, _ := zap.NewDevelopment() + ctx := context.Background() + tests := []struct { + name string + metrics pmetric.Metrics + want map[string]any + }{ + { + name: "EmptyMetrics", + metrics: pmetric.NewMetrics(), + want: map[string]any{}, + }, + { + name: "ResourceAttributeServiceNameOnly", + metrics: generateMetrics(attributeServiceName, "test-service"), + want: map[string]any{ + attributeEntityServiceName: "test-service", + attributeServiceName: "test-service", + }, + }, + { + name: "ResourceAttributeEnvironmentOnly", + metrics: generateMetrics(attributeDeploymentEnvironment, "test-environment"), + want: map[string]any{ + attributeEntityDeploymentEnvironment: "test-environment", + attributeDeploymentEnvironment: "test-environment", + }, + }, + { + name: "ResourceAttributeServiceNameAndEnvironment", + metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + want: map[string]any{ + attributeEntityServiceName: "test-service", + attributeEntityDeploymentEnvironment: "test-environment", + attributeServiceName: "test-service", + attributeDeploymentEnvironment: "test-environment", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := newAwsEntityProcessor(&Config{}, logger) + _, err := p.processMetrics(ctx, tt.metrics) + assert.NoError(t, err) + rm := tt.metrics.ResourceMetrics() + if rm.Len() > 0 { + assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) + } + }) + } +} + +func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { + logger, _ := zap.NewDevelopment() + ctx := context.Background() + tests := []struct { + name string + metrics pmetric.Metrics + want map[string]any + }{ + { + name: "EmptyMetrics", + metrics: pmetric.NewMetrics(), + want: map[string]any{}, + }, + { + name: "DatapointAttributeServiceNameOnly", + metrics: generateDatapointMetrics(attributeServiceName, "test-service"), + want: map[string]any{ + attributeEntityServiceName: "test-service", + }, + }, + { + name: "DatapointAttributeEnvironmentOnly", + metrics: generateDatapointMetrics(attributeDeploymentEnvironment, "test-environment"), + want: map[string]any{ + attributeEntityDeploymentEnvironment: "test-environment", + }, + }, + { + name: "DatapointAttributeServiceNameAndEnvironment", + metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + want: map[string]any{ + attributeEntityServiceName: "test-service", + attributeEntityDeploymentEnvironment: "test-environment", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := newAwsEntityProcessor(&Config{ScrapeDatapointAttribute: true}, logger) + _, err := p.processMetrics(ctx, tt.metrics) + assert.NoError(t, err) + rm := tt.metrics.ResourceMetrics() + if rm.Len() > 0 { + assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) + } + }) + } +} + func generateMetrics(resourceAttrs ...string) pmetric.Metrics { md := pmetric.NewMetrics() generateResource(md, resourceAttrs...) @@ -138,9 +242,22 @@ func generateMetricsWithTwoResources() pmetric.Metrics { return md } +func generateDatapointMetrics(datapointAttrs ...string) pmetric.Metrics { + md := pmetric.NewMetrics() + generateDatapoints(md, datapointAttrs...) + return md +} + func generateResource(md pmetric.Metrics, resourceAttrs ...string) { attrs := md.ResourceMetrics().AppendEmpty().Resource().Attributes() for i := 0; i < len(resourceAttrs); i += 2 { attrs.PutStr(resourceAttrs[i], resourceAttrs[i+1]) } } + +func generateDatapoints(md pmetric.Metrics, datapointAttrs ...string) { + attrs := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetEmptyGauge().DataPoints().AppendEmpty().Attributes() + for i := 0; i < len(datapointAttrs); i += 2 { + attrs.PutStr(datapointAttrs[i], datapointAttrs[i+1]) + } +} From 76e2a9dfdbd9a2a4a3e704fb2bf6218c604fbf7f Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 14 Aug 2024 11:36:50 -0400 Subject: [PATCH 02/47] Scrape EKS related entity attributes from Application Signals (#776) --- plugins/processors/awsentity/config.go | 5 + .../eksattributescraper.go | 101 ++++++ .../eksattributescraper_test.go | 310 ++++++++++++++++++ .../entityattributes/entityattributes.go | 13 + plugins/processors/awsentity/processor.go | 33 +- .../processors/awsentity/processor_test.go | 25 +- .../appsignals_and_eks_config.yaml | 4 +- .../appsignals_fallback_and_eks_config.yaml | 4 +- .../appsignals_over_fallback_config.yaml | 4 +- .../otel/processor/awsentity/translator.go | 30 +- .../processor/awsentity/translator_test.go | 48 +++ 11 files changed, 547 insertions(+), 30 deletions(-) create mode 100644 plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go create mode 100644 plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go create mode 100644 plugins/processors/awsentity/internal/entityattributes/entityattributes.go create mode 100644 translator/translate/otel/processor/awsentity/translator_test.go diff --git a/plugins/processors/awsentity/config.go b/plugins/processors/awsentity/config.go index b2b8ddde51..2d2aafab8e 100644 --- a/plugins/processors/awsentity/config.go +++ b/plugins/processors/awsentity/config.go @@ -13,6 +13,11 @@ type Config struct { // that emit all attributes to datapoint level instead of resource level. All telegraf // plugins have this behavior. ScrapeDatapointAttribute bool `mapstructure:"scrape_datapoint_attribute,omitempty"` + // ClusterName can be used to explicitly provide the Cluster's Name for scenarios where it's not + // possible to auto-detect it using EC2 tags. + ClusterName string `mapstructure:"cluster_name,omitempty"` + // Mode is the platform that the component is being used on, such as EKS + Mode string `mapstructure:"mode,omitempty"` } // Verify Config implements Processor interface. diff --git a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go b/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go new file mode 100644 index 0000000000..6c2f1c3c29 --- /dev/null +++ b/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go @@ -0,0 +1,101 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package eksattributescraper + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" +) + +var ( + clusterAllowlist = []string{ + semconv.AttributeK8SClusterName, + } + + namespaceAllowlist = []string{ + semconv.AttributeK8SNamespaceName, + } + + workloadAllowlist = []string{ + semconv.AttributeK8SDeploymentName, + semconv.AttributeK8SDaemonSetName, + semconv.AttributeK8SStatefulSetName, + semconv.AttributeK8SReplicaSetName, + semconv.AttributeK8SContainerName, + } + nodeAllowlist = []string{ + semconv.AttributeK8SNodeName, + } +) + +type eksattributescraper struct { + Cluster string + Namespace string + Workload string + Node string +} + +func NewEKSAttributeScraper(clusterName string) *eksattributescraper { + return &eksattributescraper{ + Cluster: clusterName, + } +} + +func (e *eksattributescraper) Scrape(rm pcommon.Resource) { + resourceAttrs := rm.Attributes() + e.scrapeNamespace(resourceAttrs) + e.scrapeWorkload(resourceAttrs) + e.scrapeNode(resourceAttrs) + e.decorateEntityAttributes(resourceAttrs) + e.reset() +} + +func (e *eksattributescraper) scrapeNamespace(p pcommon.Map) { + for _, namespace := range namespaceAllowlist { + if namespaceAttr, ok := p.Get(namespace); ok { + e.Namespace = namespaceAttr.Str() + return + } + } +} + +func (e *eksattributescraper) scrapeWorkload(p pcommon.Map) { + for _, workload := range workloadAllowlist { + if workloadAttr, ok := p.Get(workload); ok { + e.Workload = workloadAttr.Str() + return + } + } + +} + +func (e *eksattributescraper) scrapeNode(p pcommon.Map) { + for _, node := range nodeAllowlist { + if nodeAttr, ok := p.Get(node); ok { + e.Node = nodeAttr.Str() + return + } + } +} + +func (e *eksattributescraper) decorateEntityAttributes(p pcommon.Map) { + addAttributeIfNonEmpty(p, entityattributes.AttributeEntityCluster, e.Cluster) + addAttributeIfNonEmpty(p, entityattributes.AttributeEntityNamespace, e.Namespace) + addAttributeIfNonEmpty(p, entityattributes.AttributeEntityWorkload, e.Workload) + addAttributeIfNonEmpty(p, entityattributes.AttributeEntityNode, e.Node) +} + +func (e *eksattributescraper) reset() { + *e = eksattributescraper{ + Cluster: e.Cluster, + } +} + +func addAttributeIfNonEmpty(p pcommon.Map, key string, value string) { + if value != "" { + p.PutStr(key, value) + } +} diff --git a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go b/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go new file mode 100644 index 0000000000..884023d2ad --- /dev/null +++ b/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go @@ -0,0 +1,310 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package eksattributescraper + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" +) + +func TestNewEKSAttributeScraper(t *testing.T) { + scraper := NewEKSAttributeScraper("test") + assert.Equal(t, "test", scraper.Cluster) +} + +func Test_eksattributescraper_Scrape(t *testing.T) { + + tests := []struct { + name string + clusterName string + args pcommon.Resource + want pcommon.Map + }{ + { + name: "Empty", + clusterName: "", + args: pcommon.NewResource(), + want: pcommon.NewMap(), + }, + { + name: "ClusterOnly", + clusterName: "test-cluster", + args: pcommon.NewResource(), + want: getAttributeMap(map[string]any{ + entityattributes.AttributeEntityCluster: "test-cluster", + }), + }, + { + name: "AllAttributes", + clusterName: "test-cluster", + args: generateResourceMetrics(semconv.AttributeK8SNamespaceName, "test-namespace", semconv.AttributeK8SDeploymentName, "test-workload", semconv.AttributeK8SNodeName, "test-node"), + want: getAttributeMap(map[string]any{ + semconv.AttributeK8SNamespaceName: "test-namespace", + semconv.AttributeK8SDeploymentName: "test-workload", + semconv.AttributeK8SNodeName: "test-node", + entityattributes.AttributeEntityCluster: "test-cluster", + entityattributes.AttributeEntityNamespace: "test-namespace", + entityattributes.AttributeEntityWorkload: "test-workload", + entityattributes.AttributeEntityNode: "test-node", + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := NewEKSAttributeScraper(tt.clusterName) + e.Scrape(tt.args) + assert.Equal(t, tt.want.AsRaw(), tt.args.Attributes().AsRaw()) + }) + } +} + +func Test_eksattributescraper_decorateEntityAttributes(t *testing.T) { + type fields struct { + Cluster string + Namespace string + Workload string + Node string + } + tests := []struct { + name string + fields fields + want pcommon.Map + }{ + { + name: "Empty", + fields: fields{}, + want: pcommon.NewMap(), + }, + { + name: "OneAttribute", + fields: fields{ + Cluster: "test-cluster", + }, + want: getAttributeMap(map[string]any{ + entityattributes.AttributeEntityCluster: "test-cluster", + }), + }, + { + name: "AllAttributes", + fields: fields{ + Cluster: "test-cluster", + Namespace: "test-namespace", + Workload: "test-workload", + Node: "test-node", + }, + want: getAttributeMap(map[string]any{ + entityattributes.AttributeEntityCluster: "test-cluster", + entityattributes.AttributeEntityNamespace: "test-namespace", + entityattributes.AttributeEntityWorkload: "test-workload", + entityattributes.AttributeEntityNode: "test-node", + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := pcommon.NewMap() + e := &eksattributescraper{ + Cluster: tt.fields.Cluster, + Namespace: tt.fields.Namespace, + Workload: tt.fields.Workload, + Node: tt.fields.Node, + } + e.decorateEntityAttributes(p) + assert.Equal(t, tt.want.AsRaw(), p.AsRaw()) + }) + } +} + +func Test_eksattributescraper_reset(t *testing.T) { + type fields struct { + Cluster string + Namespace string + Workload string + Node string + } + tests := []struct { + name string + fields fields + want *eksattributescraper + }{ + { + name: "Empty", + fields: fields{}, + want: &eksattributescraper{}, + }, + { + name: "ClusterExists", + fields: fields{ + Cluster: "test-cluster", + }, + want: &eksattributescraper{ + Cluster: "test-cluster", + }, + }, + { + name: "MultipleAttributeExists", + fields: fields{ + Cluster: "test-cluster", + Namespace: "test-namespace", + Workload: "test-workload", + Node: "test-node", + }, + want: &eksattributescraper{ + Cluster: "test-cluster", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &eksattributescraper{ + Cluster: tt.fields.Cluster, + Namespace: tt.fields.Namespace, + Workload: tt.fields.Workload, + Node: tt.fields.Node, + } + e.reset() + assert.Equal(t, tt.want, e) + }) + } +} + +func Test_eksattributescraper_scrapeNamespace(t *testing.T) { + tests := []struct { + name string + args pcommon.Map + want string + }{ + { + name: "Empty", + args: getAttributeMap(map[string]any{"": ""}), + want: "", + }, + { + name: "NodeExists", + args: getAttributeMap(map[string]any{semconv.AttributeK8SNamespaceName: "namespace-name"}), + want: "namespace-name", + }, + { + name: "NonmatchingNamespace", + args: getAttributeMap(map[string]any{"namespace": "namespace-name"}), + want: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &eksattributescraper{} + e.scrapeNamespace(tt.args) + assert.Equal(t, tt.want, e.Namespace) + }) + } +} + +func Test_eksattributescraper_scrapeNode(t *testing.T) { + tests := []struct { + name string + args pcommon.Map + want string + }{ + { + name: "Empty", + args: getAttributeMap(map[string]any{"": ""}), + want: "", + }, + { + name: "NodeExists", + args: getAttributeMap(map[string]any{semconv.AttributeK8SNodeName: "node-name"}), + want: "node-name", + }, + { + name: "NonmatchingNode", + args: getAttributeMap(map[string]any{"node": "node-name"}), + want: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &eksattributescraper{} + e.scrapeNode(tt.args) + assert.Equal(t, tt.want, e.Node) + }) + } +} + +func Test_eksattributescraper_scrapeWorkload(t *testing.T) { + tests := []struct { + name string + args pcommon.Map + want string + }{ + { + name: "Empty", + args: getAttributeMap(map[string]any{"": ""}), + want: "", + }, + { + name: "DeploymentWorkload", + args: getAttributeMap(map[string]any{semconv.AttributeK8SDeploymentName: "test-deployment"}), + want: "test-deployment", + }, + { + name: "DaemonsetWorkload", + args: getAttributeMap(map[string]any{semconv.AttributeK8SDaemonSetName: "test-daemonset"}), + want: "test-daemonset", + }, + { + name: "StatefulSetWorkload", + args: getAttributeMap(map[string]any{semconv.AttributeK8SStatefulSetName: "test-statefulset"}), + want: "test-statefulset", + }, + { + name: "ReplicaSetWorkload", + args: getAttributeMap(map[string]any{semconv.AttributeK8SReplicaSetName: "test-replicaset"}), + want: "test-replicaset", + }, + { + name: "ContainerWorkload", + args: getAttributeMap(map[string]any{semconv.AttributeK8SContainerName: "test-container"}), + want: "test-container", + }, + { + name: "MultipleWorkloads", + args: getAttributeMap(map[string]any{ + semconv.AttributeK8SDeploymentName: "test-deployment", + semconv.AttributeK8SContainerName: "test-container"}), + want: "test-deployment", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &eksattributescraper{} + e.scrapeWorkload(tt.args) + assert.Equal(t, tt.want, e.Workload) + }) + } +} + +func getAttributeMap(attributes map[string]any) pcommon.Map { + attrMap := pcommon.NewMap() + attrMap.FromRaw(attributes) + return attrMap +} + +func generateResourceMetrics(resourceAttrs ...string) pcommon.Resource { + md := pmetric.NewMetrics() + generateResource(md, resourceAttrs...) + return md.ResourceMetrics().At(0).Resource() +} + +func generateResource(md pmetric.Metrics, resourceAttrs ...string) { + attrs := md.ResourceMetrics().AppendEmpty().Resource().Attributes() + for i := 0; i < len(resourceAttrs); i += 2 { + attrs.PutStr(resourceAttrs[i], resourceAttrs[i+1]) + } +} diff --git a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go new file mode 100644 index 0000000000..7315fe11df --- /dev/null +++ b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go @@ -0,0 +1,13 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package entityattributes + +const ( + AttributeEntityServiceName = "aws.entity.service.name" + AttributeEntityDeploymentEnvironment = "aws.entity.deployment.environment" + AttributeEntityCluster = "aws.entity.k8s.cluster.name" + AttributeEntityNamespace = "aws.entity.k8s.namespace.name" + AttributeEntityWorkload = "aws.entity.k8s.workload.name" + AttributeEntityNode = "aws.entity.k8s.node.name" +) diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 746c49885e..af5e242a97 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -13,18 +13,22 @@ import ( "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/eksattributescraper" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" ) const ( - attributeAwsLogGroupNames = "aws.log.group.names" - attributeDeploymentEnvironment = "deployment.environment" - attributeServiceName = "service.name" - attributeService = "Service" - attributeEntityServiceName = "aws.entity.service.name" - attributeEntityDeploymentEnvironment = "aws.entity.deployment.environment" - EMPTY = "" + attributeAwsLogGroupNames = "aws.log.group.names" + attributeDeploymentEnvironment = "deployment.environment" + attributeServiceName = "service.name" + attributeService = "Service" + EMPTY = "" ) +type scraper interface { + Scrape(rm pcommon.Resource) +} + // exposed as a variable for unit testing var addToEntityStore = func(logGroupName entitystore.LogGroupName, serviceName string, environmentName string) { es := entitystore.GetEntityStore() @@ -38,20 +42,23 @@ var addToEntityStore = func(logGroupName entitystore.LogGroupName, serviceName s // deployment.environment resource attributes set, then adds the association between the log group(s) and the // service/environment names to the entitystore extension. type awsEntityProcessor struct { - config *Config - logger *zap.Logger + config *Config + eksscraper scraper + logger *zap.Logger } func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcessor { return &awsEntityProcessor{ - config: config, - logger: logger, + config: config, + eksscraper: eksattributescraper.NewEKSAttributeScraper(config.ClusterName), + logger: logger, } } func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { + p.eksscraper.Scrape(rm.At(i).Resource()) resourceAttrs := rm.At(i).Resource().Attributes() logGroupNames, _ := resourceAttrs.Get(attributeAwsLogGroupNames) serviceName, _ := resourceAttrs.Get(attributeServiceName) @@ -63,10 +70,10 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric entityServiceName, entityEnvironmentName = p.scrapeServiceAttribute(rm.At(i).ScopeMetrics()) } if entityServiceName != EMPTY { - resourceAttrs.PutStr(attributeEntityServiceName, entityServiceName) + resourceAttrs.PutStr(entityattributes.AttributeEntityServiceName, entityServiceName) } if entityEnvironmentName != EMPTY { - resourceAttrs.PutStr(attributeEntityDeploymentEnvironment, entityEnvironmentName) + resourceAttrs.PutStr(entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) } if logGroupNames.Str() == EMPTY || (serviceName.Str() == EMPTY && environmentName.Str() == EMPTY) { diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 223c8b423b..bebd9f9205 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" ) type mockEntityStore struct { @@ -142,26 +143,26 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { name: "ResourceAttributeServiceNameOnly", metrics: generateMetrics(attributeServiceName, "test-service"), want: map[string]any{ - attributeEntityServiceName: "test-service", - attributeServiceName: "test-service", + entityattributes.AttributeEntityServiceName: "test-service", + attributeServiceName: "test-service", }, }, { name: "ResourceAttributeEnvironmentOnly", metrics: generateMetrics(attributeDeploymentEnvironment, "test-environment"), want: map[string]any{ - attributeEntityDeploymentEnvironment: "test-environment", - attributeDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + attributeDeploymentEnvironment: "test-environment", }, }, { name: "ResourceAttributeServiceNameAndEnvironment", metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), want: map[string]any{ - attributeEntityServiceName: "test-service", - attributeEntityDeploymentEnvironment: "test-environment", - attributeServiceName: "test-service", - attributeDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + attributeServiceName: "test-service", + attributeDeploymentEnvironment: "test-environment", }, }, } @@ -196,22 +197,22 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { name: "DatapointAttributeServiceNameOnly", metrics: generateDatapointMetrics(attributeServiceName, "test-service"), want: map[string]any{ - attributeEntityServiceName: "test-service", + entityattributes.AttributeEntityServiceName: "test-service", }, }, { name: "DatapointAttributeEnvironmentOnly", metrics: generateDatapointMetrics(attributeDeploymentEnvironment, "test-environment"), want: map[string]any{ - attributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", }, }, { name: "DatapointAttributeServiceNameAndEnvironment", metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), want: map[string]any{ - attributeEntityServiceName: "test-service", - attributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", }, }, } diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index fdd8cfaf5e..47377c916d 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -299,7 +299,9 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity: {} + awsentity: + cluster_name: TestCluster + mode: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 9728e3a838..2636a565d5 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -299,7 +299,9 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity: {} + awsentity: + cluster_name: TestCluster + mode: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index a0d628b79a..072c10ae60 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -299,7 +299,9 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity: {} + awsentity: + cluster_name: TestCluster + mode: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go index cf887fdc9e..0ac761b24f 100644 --- a/translator/translate/otel/processor/awsentity/translator.go +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -9,6 +9,9 @@ import ( "go.opentelemetry.io/collector/processor" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/util" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -28,6 +31,29 @@ func (t *translator) ID() component.ID { return component.NewIDWithName(t.factory.Type(), "") } -func (t *translator) Translate(_ *confmap.Conf) (component.Config, error) { - return t.factory.CreateDefaultConfig().(*awsentity.Config), nil +func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*awsentity.Config) + + hostedInConfigKey := common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.AppSignals, "hosted_in") + hostedIn, hostedInConfigured := common.GetString(conf, hostedInConfigKey) + if !hostedInConfigured { + hostedInConfigKey = common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.AppSignalsFallback, "hosted_in") + hostedIn, hostedInConfigured = common.GetString(conf, hostedInConfigKey) + } + if common.IsAppSignalsKubernetes() { + if !hostedInConfigured { + hostedIn = util.GetClusterNameFromEc2Tagger() + } + } + + mode := context.CurrentContext().KubernetesMode() + if mode == "" { + mode = context.CurrentContext().Mode() + } + switch mode { + case config.ModeEKS: + cfg.ClusterName = hostedIn + cfg.Mode = config.ModeEKS + } + return cfg, nil } diff --git a/translator/translate/otel/processor/awsentity/translator_test.go b/translator/translate/otel/processor/awsentity/translator_test.go new file mode 100644 index 0000000000..4b4edc003f --- /dev/null +++ b/translator/translate/otel/processor/awsentity/translator_test.go @@ -0,0 +1,48 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" +) + +func TestTranslate(t *testing.T) { + context.CurrentContext().SetKubernetesMode(config.ModeEKS) + testCases := map[string]struct { + input map[string]interface{} + want *awsentity.Config + }{ + "OnlyProfile": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "app_signals": map[string]interface{}{ + "hosted_in": "test", + }, + }, + }}, + want: &awsentity.Config{ + ClusterName: "test", + Mode: config.ModeEKS, + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + tt := NewTranslator() + assert.Equal(t, "awsentity", tt.ID().String()) + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.NoError(t, err) + assert.Equal(t, testCase.want, got) + }) + } +} From 28f7a2c194a734f093cc0289cc90b39455f5c4ce Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 16 Aug 2024 10:08:37 -0400 Subject: [PATCH 03/47] Scrape EKS related entity attributes from Container Insights (#777) --- .../eksattributescraper.go | 7 ++-- .../eksattributescraper_test.go | 35 +++++++++++++++++-- .../entityattributes/entityattributes.go | 8 +++++ 3 files changed, 43 insertions(+), 7 deletions(-) diff --git a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go b/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go index 6c2f1c3c29..87eeb65528 100644 --- a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go +++ b/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go @@ -11,12 +11,9 @@ import ( ) var ( - clusterAllowlist = []string{ - semconv.AttributeK8SClusterName, - } - namespaceAllowlist = []string{ semconv.AttributeK8SNamespaceName, + entityattributes.Namespace, } workloadAllowlist = []string{ @@ -25,9 +22,11 @@ var ( semconv.AttributeK8SStatefulSetName, semconv.AttributeK8SReplicaSetName, semconv.AttributeK8SContainerName, + entityattributes.PodName, } nodeAllowlist = []string{ semconv.AttributeK8SNodeName, + entityattributes.NodeName, } ) diff --git a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go b/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go index 884023d2ad..809d96cddd 100644 --- a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go +++ b/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go @@ -42,7 +42,7 @@ func Test_eksattributescraper_Scrape(t *testing.T) { }), }, { - name: "AllAttributes", + name: "AllAppSignalAttributes", clusterName: "test-cluster", args: generateResourceMetrics(semconv.AttributeK8SNamespaceName, "test-namespace", semconv.AttributeK8SDeploymentName, "test-workload", semconv.AttributeK8SNodeName, "test-node"), want: getAttributeMap(map[string]any{ @@ -55,6 +55,20 @@ func Test_eksattributescraper_Scrape(t *testing.T) { entityattributes.AttributeEntityNode: "test-node", }), }, + { + name: "AllContainerInsightsAttributes", + clusterName: "test-cluster", + args: generateResourceMetrics(entityattributes.Namespace, "test-namespace", entityattributes.PodName, "test-workload", entityattributes.NodeName, "test-node"), + want: getAttributeMap(map[string]any{ + entityattributes.Namespace: "test-namespace", + entityattributes.PodName: "test-workload", + entityattributes.NodeName: "test-node", + entityattributes.AttributeEntityCluster: "test-cluster", + entityattributes.AttributeEntityNamespace: "test-namespace", + entityattributes.AttributeEntityWorkload: "test-workload", + entityattributes.AttributeEntityNode: "test-node", + }), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -187,10 +201,15 @@ func Test_eksattributescraper_scrapeNamespace(t *testing.T) { want: "", }, { - name: "NodeExists", + name: "AppSignalNodeExists", args: getAttributeMap(map[string]any{semconv.AttributeK8SNamespaceName: "namespace-name"}), want: "namespace-name", }, + { + name: "ContainerInsightsNodeExists", + args: getAttributeMap(map[string]any{entityattributes.Namespace: "namespace-name"}), + want: "namespace-name", + }, { name: "NonmatchingNamespace", args: getAttributeMap(map[string]any{"namespace": "namespace-name"}), @@ -218,10 +237,15 @@ func Test_eksattributescraper_scrapeNode(t *testing.T) { want: "", }, { - name: "NodeExists", + name: "AppsignalNodeExists", args: getAttributeMap(map[string]any{semconv.AttributeK8SNodeName: "node-name"}), want: "node-name", }, + { + name: "ContainerInsightNodeExists", + args: getAttributeMap(map[string]any{entityattributes.NodeName: "node-name"}), + want: "node-name", + }, { name: "NonmatchingNode", args: getAttributeMap(map[string]any{"node": "node-name"}), @@ -273,6 +297,11 @@ func Test_eksattributescraper_scrapeWorkload(t *testing.T) { args: getAttributeMap(map[string]any{semconv.AttributeK8SContainerName: "test-container"}), want: "test-container", }, + { + name: "ContainerInsightPodNameWorkload", + args: getAttributeMap(map[string]any{entityattributes.PodName: "test-workload"}), + want: "test-workload", + }, { name: "MultipleWorkloads", args: getAttributeMap(map[string]any{ diff --git a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go index 7315fe11df..a12bd28027 100644 --- a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go @@ -11,3 +11,11 @@ const ( AttributeEntityWorkload = "aws.entity.k8s.workload.name" AttributeEntityNode = "aws.entity.k8s.node.name" ) + +// Container Insights attributes used for scraping EKS related information +const ( + NodeName = "NodeName" + Namespace = "Namespace" + // PodName in Container Insights is the workload(Deployment, Daemonset, etc) name + PodName = "PodName" +) From 33ec9e950e71e51f88a08bab3c30211a7b268cbb Mon Sep 17 00:00:00 2001 From: Jason Polanco Date: Mon, 9 Sep 2024 13:30:57 -0400 Subject: [PATCH 04/47] [Compass] Add customer specified service.name and deployment.environment fields to CWA metrics config (#794) --- translator/config/schema.json | 30 ++++++++++ .../sampleConfig/compass_linux_config.conf | 26 +++++++- .../sampleConfig/compass_linux_config.json | 54 +++++++++++++++++ .../sampleConfig/compass_linux_config.yaml | 59 +++++++++++++++++-- .../tomlConfigTemplate/tomlConfig.go | 10 ++++ translator/translate/metrics/metrics.go | 14 ++++- .../metrics_collect/collectd/collectd_test.go | 4 ++ .../collectd/ruleDeploymentEnvironment.go | 29 +++++++++ .../collectd/ruleServiceName.go | 29 +++++++++ .../statsd/ruleDeploymentEnvironment.go | 29 +++++++++ .../metrics_collect/statsd/ruleServiceName.go | 29 +++++++++ .../metrics_collect/statsd/statsd_test.go | 34 +++++++---- .../metrics/ruleDeploymentEnvironmentl.go | 29 +++++++++ .../translate/metrics/ruleServiceName.go | 28 +++++++++ 14 files changed, 383 insertions(+), 21 deletions(-) create mode 100644 translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go create mode 100644 translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go create mode 100644 translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go create mode 100644 translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go create mode 100644 translator/translate/metrics/ruleDeploymentEnvironmentl.go create mode 100644 translator/translate/metrics/ruleServiceName.go diff --git a/translator/config/schema.json b/translator/config/schema.json index 40c9771a1f..e4d1834e8a 100644 --- a/translator/config/schema.json +++ b/translator/config/schema.json @@ -167,6 +167,16 @@ "endpoint_override": { "description": "The override endpoint to use to access cloudwatch", "$ref": "#/definitions/endpointOverrideDefinition" + }, + "service.name": { + "type": "string", + "minLength": 1, + "maxLength": 4096 + }, + "deployment.environment": { + "type": "string", + "minLength": 1, + "maxLength": 4096 } }, "additionalProperties": false, @@ -248,6 +258,16 @@ "items": { "type": "string" }, "minItems": 1, "uniqueItems": true + }, + "service.name": { + "type": "string", + "minLength": 1, + "maxLength": 4096 + }, + "deployment.environment": { + "type": "string", + "minLength": 1, + "maxLength": 4096 } }, "additionalProperties": false @@ -361,6 +381,16 @@ "items": { "type": "string" }, "minItems": 1, "uniqueItems": true + }, + "service.name": { + "type": "string", + "minLength": 1, + "maxLength": 4096 + }, + "deployment.environment": { + "type": "string", + "minLength": 1, + "maxLength": 4096 } }, "additionalProperties": false diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.conf b/translator/tocwconfig/sampleConfig/compass_linux_config.conf index ebf652fe84..01bca78370 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.conf +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.conf @@ -45,8 +45,32 @@ service_name = "log-level-service" timezone = "UTC" + [[inputs.socket_listener]] + collectd_auth_file = "/etc/collectd/auth_file" + collectd_security_level = "encrypt" + collectd_typesdb = ["/usr/share/collectd/types.db"] + data_format = "collectd" + deployment_environment = "plugin-level-environment" + name_prefix = "collectd_" + service_address = "udp://127.0.0.1:25826" + service_name = "plugin-level-service" + [inputs.socket_listener.tags] + "aws:AggregationInterval" = "60s" + + [[inputs.statsd]] + deployment_environment = "agent-level-environment" + interval = "10s" + metric_separator = "_" + parse_data_dog_tags = true + service_address = ":8125" + service_name = "metric-level-service" + [inputs.statsd.tags] + "aws:AggregationInterval" = "60s" + [outputs] + [[outputs.cloudwatch]] + [[outputs.cloudwatchlogs]] endpoint_override = "https://logs-fips.us-west-2.amazonaws.com" force_flush_interval = "60s" @@ -54,4 +78,4 @@ mode = "EC2" region = "us-west-2" region_type = "ACJ" - role_arn = "log_role_arn_value_test" \ No newline at end of file + role_arn = "log_role_arn_value_test" diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.json b/translator/tocwconfig/sampleConfig/compass_linux_config.json index d7d1ba059f..b61f1884cb 100755 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.json +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.json @@ -44,5 +44,59 @@ }, "endpoint_override": "https://logs-fips.us-west-2.amazonaws.com", "service.name": "log-level-service" + }, + "metrics": { + "metrics_collected": { + "collectd": { + "service_address": "udp://127.0.0.1:25826", + "name_prefix": "collectd_", + "collectd_auth_file": "/etc/collectd/auth_file", + "collectd_security_level": "encrypt", + "collectd_typesdb": [ + "/usr/share/collectd/types.db" + ], + "metrics_aggregation_interval": 60, + "drop_original_metrics": [ + "collectd_drop" + ], + "service.name": "plugin-level-service", + "deployment.environment": "plugin-level-environment" + }, + "statsd": { + "service_address": ":8125", + "metrics_collection_interval": 10, + "metrics_aggregation_interval": 60, + "metric_separator": "_", + "drop_original_metrics": [ + "statsd_drop" + ] + } + }, + "append_dimensions": { + "ImageId": "${aws:ImageId}", + "InstanceId": "${aws:InstanceId}", + "InstanceType": "${aws:InstanceType}", + "AutoScalingGroupName": "${aws:AutoScalingGroupName}" + }, + "aggregation_dimensions": [ + [ + "ImageId" + ], + [ + "InstanceId", + "InstanceType" + ], + [ + "d1" + ], + [] + ], + "force_flush_interval": 60, + "credentials": { + "role_arn": "metrics_role_arn_value_test" + }, + "endpoint_override": "https://monitoring-fips.us-west-2.amazonaws.com", + "service.name": "metric-level-service" } + } \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml index be53979284..1f62ed41bd 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml @@ -1,21 +1,68 @@ exporters: - nop: {} + awscloudwatch: + drop_original_metrics: + collectd_drop: true + statsd_drop: true + endpoint_override: https://monitoring-fips.us-west-2.amazonaws.com + force_flush_interval: 1m0s + max_datums_per_call: 1000 + max_values_per_datum: 5000 + middleware: agenthealth/metrics + namespace: CWAgent + region: us-west-2 + resource_to_telemetry_conversion: + enabled: true + role_arn: metrics_role_arn_value_test + rollup_dimensions: + - - ImageId + - - InstanceId + - InstanceType + - - d1 + - [] extensions: + agenthealth/metrics: + is_usage_data_enabled: true + stats: + operations: + - PutMetricData + usage_flags: + mode: EC2 + region_type: ACJ entitystore: mode: ec2 region: us-west-2 +processors: + ec2tagger: + ec2_instance_tag_keys: + - AutoScalingGroupName + ec2_metadata_tags: + - ImageId + - InstanceId + - InstanceType + imds_retries: 1 + refresh_interval_seconds: 0s receivers: - nop: {} + telegraf_socket_listener: + collection_interval: 10s + initial_delay: 1s + timeout: 0s + telegraf_statsd: + collection_interval: 10s + initial_delay: 1s + timeout: 0s service: extensions: + - agenthealth/metrics - entitystore pipelines: - metrics/nop: + metrics/host: exporters: - - nop - processors: [] + - awscloudwatch + processors: + - ec2tagger receivers: - - nop + - telegraf_socket_listener + - telegraf_statsd telemetry: logs: development: false diff --git a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go index 2a26ad9bb2..e0eee0978f 100644 --- a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go +++ b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go @@ -219,6 +219,11 @@ type ( NameOverride string `toml:"name_override"` ServiceAddress string `toml:"service_address"` Tags map[string]string + + //Customer specified service.name + ServiceName string `toml:"service_name"` + //Customer specified deployment.environment + DeploymentEnvironment string `toml:"deployment_environment"` } statsdConfig struct { @@ -228,6 +233,11 @@ type ( ParseDataDogTags bool `toml:"parse_data_dog_tags"` ServiceAddress string `toml:"service_address"` Tags map[string]string + + //Customer specified service.name + ServiceName string `toml:"service_name"` + //Customer specified deployment.environment + DeploymentEnvironment string `toml:"deployment_environment"` } swapConfig struct { diff --git a/translator/translate/metrics/metrics.go b/translator/translate/metrics/metrics.go index b6848248bd..7ef97a151a 100755 --- a/translator/translate/metrics/metrics.go +++ b/translator/translate/metrics/metrics.go @@ -16,7 +16,13 @@ import ( type Rule translator.Rule -var ChildRule = map[string]Rule{} +var ( + ChildRule = map[string]Rule{} + GlobalMetricConfig = Metrics{} + + serviceName ServiceName + deploymentEnvironment DeploymentEnvironment +) const ( SectionKey = "metrics" @@ -33,6 +39,8 @@ func RegisterRule(fieldName string, r Rule) { } type Metrics struct { + ServiceName string + DeploymentEnvironment string } func (m *Metrics) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { @@ -40,6 +48,10 @@ func (m *Metrics) ApplyRule(input interface{}) (returnKey string, returnVal inte result := map[string]interface{}{} outputPlugInfo := map[string]interface{}{} + //Apply Environment and ServiceName rules + serviceName.ApplyRule(im[SectionKey]) + deploymentEnvironment.ApplyRule(im[SectionKey]) + //Check if this plugin exist in the input instance //If not, not process if _, ok := im[SectionKey]; !ok { diff --git a/translator/translate/metrics/metrics_collect/collectd/collectd_test.go b/translator/translate/metrics/metrics_collect/collectd/collectd_test.go index a0ad8d3865..2407eabac1 100644 --- a/translator/translate/metrics/metrics_collect/collectd/collectd_test.go +++ b/translator/translate/metrics/metrics_collect/collectd/collectd_test.go @@ -34,6 +34,8 @@ func TestCollectD_HappyCase(t *testing.T) { "collectd_security_level": "none", "collectd_typesdb": []interface{}{"/usr/share/collectd/types.db", "/custom_location/types.db"}, "tags": map[string]interface{}{"aws:AggregationInterval": "30s"}, + "service_name": "", + "deployment_environment": "", }, } @@ -57,6 +59,8 @@ func TestCollectD_MinimumConfig(t *testing.T) { "collectd_security_level": "encrypt", "collectd_typesdb": []interface{}{"/usr/share/collectd/types.db"}, "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, + "service_name": "", + "deployment_environment": "", }, } diff --git a/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go b/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go new file mode 100644 index 0000000000..ab627fe041 --- /dev/null +++ b/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package collected + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" +) + +type DeploymentEnvironment struct { +} + +const SectionkeyDeploymentEnvironment = "deployment.environment" + +func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) + returnKey = "deployment_environment" + + if returnVal == "" { + returnVal = metrics.GlobalMetricConfig.DeploymentEnvironment + } + return +} + +func init() { + obj := new(DeploymentEnvironment) + RegisterRule(SectionkeyDeploymentEnvironment, obj) +} diff --git a/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go b/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go new file mode 100644 index 0000000000..412cc920d1 --- /dev/null +++ b/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package collected + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" +) + +type ServiceName struct { +} + +const SectionkeyServicename = "service.name" + +func (obj *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase(SectionkeyServicename, "", input) + returnKey = "service_name" + + if returnVal == "" { + returnVal = metrics.GlobalMetricConfig.ServiceName + } + return +} + +func init() { + obj := new(ServiceName) + RegisterRule(SectionkeyServicename, obj) +} diff --git a/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go b/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go new file mode 100644 index 0000000000..48b3b38fc8 --- /dev/null +++ b/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package statsd + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" +) + +type DeploymentEnvironment struct { +} + +const SectionkeyDeploymentEnvironment = "deployment.environment" + +func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) + returnKey = "deployment_environment" + + if returnVal == "" { + returnVal = metrics.GlobalMetricConfig.DeploymentEnvironment + } + return +} + +func init() { + obj := new(DeploymentEnvironment) + RegisterRule(SectionkeyDeploymentEnvironment, obj) +} diff --git a/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go b/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go new file mode 100644 index 0000000000..d6f1b88eb5 --- /dev/null +++ b/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package statsd + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" +) + +type ServiceName struct { +} + +const SectionkeyServicename = "service.name" + +func (obj *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase(SectionkeyServicename, "", input) + returnKey = "service_name" + + if returnVal == "" { + returnVal = metrics.GlobalMetricConfig.ServiceName + } + return +} + +func init() { + obj := new(ServiceName) + RegisterRule(SectionkeyServicename, obj) +} diff --git a/translator/translate/metrics/metrics_collect/statsd/statsd_test.go b/translator/translate/metrics/metrics_collect/statsd/statsd_test.go index 9b878277bf..9d5705a1ee 100644 --- a/translator/translate/metrics/metrics_collect/statsd/statsd_test.go +++ b/translator/translate/metrics/metrics_collect/statsd/statsd_test.go @@ -30,6 +30,8 @@ func TestStatsD_HappyCase(t *testing.T) { "interval": "5s", "parse_data_dog_tags": true, "tags": map[string]interface{}{"aws:AggregationInterval": "30s"}, + "service_name": "", + "deployment_environment": "", }, } @@ -46,10 +48,12 @@ func TestStatsD_MinimumConfig(t *testing.T) { expect := []interface{}{ map[string]interface{}{ - "service_address": ":8125", - "interval": "10s", - "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, + "service_address": ":8125", + "interval": "10s", + "parse_data_dog_tags": true, + "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, + "service_name": "", + "deployment_environment": "", }, } @@ -68,10 +72,12 @@ func TestStatsD_DisableAggregation(t *testing.T) { expect := []interface{}{ map[string]interface{}{ - "service_address": ":8125", - "interval": "10s", - "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:StorageResolution": "true"}, + "service_address": ":8125", + "interval": "10s", + "parse_data_dog_tags": true, + "tags": map[string]interface{}{"aws:StorageResolution": "true"}, + "service_name": "", + "deployment_environment": "", }, } @@ -90,11 +96,13 @@ func TestStatsD_MetricSeparator(t *testing.T) { expect := []interface{}{ map[string]interface{}{ - "service_address": ":8125", - "interval": "10s", - "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, - "metric_separator": ".", + "service_address": ":8125", + "interval": "10s", + "parse_data_dog_tags": true, + "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, + "metric_separator": ".", + "service_name": "", + "deployment_environment": "", }, } diff --git a/translator/translate/metrics/ruleDeploymentEnvironmentl.go b/translator/translate/metrics/ruleDeploymentEnvironmentl.go new file mode 100644 index 0000000000..5d1d6de55b --- /dev/null +++ b/translator/translate/metrics/ruleDeploymentEnvironmentl.go @@ -0,0 +1,29 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package metrics + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" +) + +type DeploymentEnvironment struct { +} + +func (f *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase("deployment.environment", "", input) + returnKey = "deployment_environment" + + if returnVal == "" { + returnVal = agent.Global_Config.DeploymentEnvironment + } + + // Set global metric deployment environment + GlobalMetricConfig.DeploymentEnvironment = returnVal.(string) + return +} + +func init() { + RegisterRule("deployment.environment", new(DeploymentEnvironment)) +} diff --git a/translator/translate/metrics/ruleServiceName.go b/translator/translate/metrics/ruleServiceName.go new file mode 100644 index 0000000000..5728008eea --- /dev/null +++ b/translator/translate/metrics/ruleServiceName.go @@ -0,0 +1,28 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package metrics + +import ( + "github.com/aws/amazon-cloudwatch-agent/translator" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" +) + +type ServiceName struct{} + +func (f *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { + _, returnVal = translator.DefaultCase("service.name", "", input) + returnKey = "service_name" + + if returnVal == "" { + returnVal = agent.Global_Config.ServiceName + } + + // Set global metric service name + GlobalMetricConfig.ServiceName = returnVal.(string) + return +} + +func init() { + RegisterRule("service.name", new(ServiceName)) +} From 33ad73e6c22497166db2cab6359a2110b37de7de Mon Sep 17 00:00:00 2001 From: POOJA REDDY NATHALA Date: Wed, 11 Sep 2024 16:50:05 -0400 Subject: [PATCH 05/47] Create a web server in agent to send pod Service mapping (#792) --- extension/entitystore/config.go | 11 +- extension/entitystore/eksInfo.go | 37 ++++ extension/entitystore/eksInfo_test.go | 93 ++++++++++ extension/entitystore/extension.go | 24 ++- extension/entitystore/extension_test.go | 53 ++++++ extension/server/config.go | 14 ++ extension/server/config_test.go | 18 ++ extension/server/extension.go | 88 ++++++++++ extension/server/extension_test.go | 159 ++++++++++++++++++ extension/server/factory.go | 32 ++++ extension/server/factory_test.go | 33 ++++ go.mod | 23 ++- go.sum | 43 ++++- plugins/processors/awsentity/config.go | 4 +- plugins/processors/awsentity/processor.go | 29 +++- .../processors/awsentity/processor_test.go | 73 +++++++- service/defaultcomponents/components.go | 2 + service/defaultcomponents/components_test.go | 6 +- .../appsignals_and_eks_config.yaml | 6 +- .../appsignals_and_k8s_config.yaml | 7 +- .../appsignals_fallback_and_eks_config.yaml | 6 +- .../appsignals_over_fallback_config.yaml | 6 +- .../otel/extension/entitystore/translator.go | 1 + .../extension/entitystore/translator_test.go | 42 +++-- .../otel/extension/server/translator.go | 39 +++++ .../otel/extension/server/translator_test.go | 35 ++++ .../otel/processor/awsentity/translator.go | 8 +- .../processor/awsentity/translator_test.go | 4 +- translator/translate/otel/translate_otel.go | 4 + 29 files changed, 854 insertions(+), 46 deletions(-) create mode 100644 extension/entitystore/eksInfo.go create mode 100644 extension/entitystore/eksInfo_test.go create mode 100644 extension/server/config.go create mode 100644 extension/server/config_test.go create mode 100644 extension/server/extension.go create mode 100644 extension/server/extension_test.go create mode 100644 extension/server/factory.go create mode 100644 extension/server/factory_test.go create mode 100644 translator/translate/otel/extension/server/translator.go create mode 100644 translator/translate/otel/extension/server/translator_test.go diff --git a/extension/entitystore/config.go b/extension/entitystore/config.go index c866540bb0..47c588a98e 100644 --- a/extension/entitystore/config.go +++ b/extension/entitystore/config.go @@ -8,11 +8,12 @@ import ( ) type Config struct { - Mode string `mapstructure:"mode"` - Region string `mapstructure:"region"` - Profile string `mapstructure:"profile,omitempty"` - RoleARN string `mapstructure:"role_arn,omitempty"` - Filename string `mapstructure:"shared_credential_file,omitempty"` + Mode string `mapstructure:"mode"` + KubernetesMode string `mapstructure:"kubernetes_mode,omitempty"` + Region string `mapstructure:"region"` + Profile string `mapstructure:"profile,omitempty"` + RoleARN string `mapstructure:"role_arn,omitempty"` + Filename string `mapstructure:"shared_credential_file,omitempty"` } var _ component.Config = (*Config)(nil) diff --git a/extension/entitystore/eksInfo.go b/extension/entitystore/eksInfo.go new file mode 100644 index 0000000000..371f07f765 --- /dev/null +++ b/extension/entitystore/eksInfo.go @@ -0,0 +1,37 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package entitystore + +import "go.uber.org/zap" + +type ServiceEnvironment struct { + ServiceName string + Environment string +} + +type eksInfo struct { + logger *zap.Logger + podToServiceEnvMap map[string]ServiceEnvironment +} + +func newEKSInfo(logger *zap.Logger) *eksInfo { + podToServiceEnvMap := make(map[string]ServiceEnvironment) + return &eksInfo{ + logger: logger, + podToServiceEnvMap: podToServiceEnvMap, + } +} + +func (eks *eksInfo) AddPodServiceEnvironmentMapping(podName string, serviceName string, environmentName string) { + if eks.podToServiceEnvMap != nil { + eks.podToServiceEnvMap[podName] = ServiceEnvironment{ + ServiceName: serviceName, + Environment: environmentName, + } + } +} + +func (eks *eksInfo) GetPodServiceEnvironmentMapping() map[string]ServiceEnvironment { + return eks.podToServiceEnvMap +} diff --git a/extension/entitystore/eksInfo_test.go b/extension/entitystore/eksInfo_test.go new file mode 100644 index 0000000000..11804efef6 --- /dev/null +++ b/extension/entitystore/eksInfo_test.go @@ -0,0 +1,93 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package entitystore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestAddPodServiceEnvironmentMapping(t *testing.T) { + tests := []struct { + name string + want map[string]ServiceEnvironment + podName string + service string + env string + mapNil bool + }{ + { + name: "AddPodWithServiceMapping", + want: map[string]ServiceEnvironment{ + "test-pod": { + ServiceName: "test-service", + }, + }, + podName: "test-pod", + service: "test-service", + }, + { + name: "AddPodWithServiceEnvMapping", + want: map[string]ServiceEnvironment{ + "test-pod": { + ServiceName: "test-service", + Environment: "test-env", + }, + }, + podName: "test-pod", + service: "test-service", + env: "test-env", + }, + { + name: "AddWhenPodToServiceMapIsNil", + mapNil: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger, _ := zap.NewDevelopment() + ei := newEKSInfo(logger) + if tt.mapNil { + ei.podToServiceEnvMap = nil + } + ei.AddPodServiceEnvironmentMapping(tt.podName, tt.service, tt.env) + assert.Equal(t, tt.want, ei.podToServiceEnvMap) + }) + } +} + +func TestGetPodServiceEnvironmentMapping(t *testing.T) { + tests := []struct { + name string + want map[string]ServiceEnvironment + addMap bool + }{ + { + name: "GetPodWithServiceEnvMapping", + want: map[string]ServiceEnvironment{ + "test-pod": { + ServiceName: "test-service", + Environment: "test-env", + }, + }, + addMap: true, + }, + { + name: "GetWhenPodToServiceMapIsEmpty", + want: map[string]ServiceEnvironment{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger, _ := zap.NewDevelopment() + ei := newEKSInfo(logger) + if tt.addMap { + ei.AddPodServiceEnvironmentMapping("test-pod", "test-service", "test-env") + } + assert.Equal(t, tt.want, ei.GetPodServiceEnvironmentMapping()) + }) + } +} diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index c18e395649..71ec423d2c 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -44,10 +44,6 @@ type serviceProviderInterface interface { logFileServiceAttribute(LogFileGlob, LogGroupName) ServiceAttribute } -type eksInfo struct { - ClusterName string -} - type EntityStore struct { logger *zap.Logger config *Config @@ -56,11 +52,13 @@ type EntityStore struct { // mode should be EC2, ECS, EKS, and K8S mode string + kubernetesMode string + // ec2Info stores information about EC2 instances such as instance ID and // auto scaling groups ec2Info ec2Info - // ekeInfo stores information about EKS such as cluster + // eksInfo stores information about EKS such as pod to service Env map eksInfo eksInfo // serviceprovider stores information about possible service names @@ -85,6 +83,7 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { e.done = make(chan struct{}) e.metadataprovider = getMetaDataProvider() e.mode = e.config.Mode + e.kubernetesMode = e.config.KubernetesMode ec2CredentialConfig := &configaws.CredentialConfig{ Profile: e.config.Profile, Filename: e.config.Filename, @@ -94,6 +93,9 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { e.ec2Info = *newEC2Info(e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done, e.config.Region, e.logger) go e.ec2Info.initEc2Info() } + if e.kubernetesMode != "" { + e.eksInfo = *newEKSInfo(e.logger) + } e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) go e.serviceprovider.startServiceProvider() return nil @@ -108,6 +110,10 @@ func (e *EntityStore) Mode() string { return e.mode } +func (e *EntityStore) KubernetesMode() string { + return e.kubernetesMode +} + func (e *EntityStore) EKSInfo() eksInfo { return e.eksInfo } @@ -162,6 +168,14 @@ func (e *EntityStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName, }) } +func (e *EntityStore) AddPodServiceEnvironmentMapping(podName string, serviceName string, environmentName string) { + e.eksInfo.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName) +} + +func (e *EntityStore) GetPodServiceEnvironmentMapping() map[string]ServiceEnvironment { + return e.eksInfo.GetPodServiceEnvironmentMapping() +} + func (e *EntityStore) createAttributeMap() map[string]*string { attributeMap := make(map[string]*string) diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 3469ee8979..e41287d654 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" @@ -146,6 +147,28 @@ func TestEntityStore_Mode(t *testing.T) { } } +func TestEntityStore_KubernetesMode(t *testing.T) { + tests := []struct { + name string + k8sModeInput string + want string + }{ + {name: "modeEKS", k8sModeInput: config.ModeEKS, want: config.ModeEKS}, + {name: "modeK8sEc2", k8sModeInput: config.ModeK8sEC2, want: config.ModeK8sEC2}, + {name: "modeK8sOnPrem", k8sModeInput: config.ModeK8sOnPrem, want: config.ModeK8sOnPrem}, + {name: "modeNotSet", want: ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &EntityStore{} + e.kubernetesMode = tt.k8sModeInput + if got := e.KubernetesMode(); got != tt.want { + t.Errorf("Kubernetes Mode() = %v, want %v", got, tt.want) + } + }) + } +} + func TestEntityStore_createAttributeMaps(t *testing.T) { type fields struct { ec2Info ec2Info @@ -381,3 +404,33 @@ func TestEntityStore_addServiceAttrEntryForLogGroup(t *testing.T) { sp.AssertExpectations(t) } + +func TestEntityStore_AddPodServiceEnvironmentMapping(t *testing.T) { + + logger, _ := zap.NewProduction() + eks := *newEKSInfo(logger) + e := EntityStore{eksInfo: eks} + e.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") + expectedMap := map[string]ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + } + assert.Equal(t, expectedMap, e.eksInfo.GetPodServiceEnvironmentMapping()) +} + +func TestEntityStore_GetPodServiceEnvironmentMapping(t *testing.T) { + + logger, _ := zap.NewProduction() + eks := *newEKSInfo(logger) + e := EntityStore{eksInfo: eks} + expectedMap := map[string]ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + } + e.eksInfo.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") + assert.Equal(t, expectedMap, e.GetPodServiceEnvironmentMapping()) +} diff --git a/extension/server/config.go b/extension/server/config.go new file mode 100644 index 0000000000..e2aad3043c --- /dev/null +++ b/extension/server/config.go @@ -0,0 +1,14 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package server + +import ( + "go.opentelemetry.io/collector/component" +) + +type Config struct { + ListenAddress string `mapstructure:"listen_addr"` +} + +var _ component.Config = (*Config)(nil) diff --git a/extension/server/config_test.go b/extension/server/config_test.go new file mode 100644 index 0000000000..e9862d2f6a --- /dev/null +++ b/extension/server/config_test.go @@ -0,0 +1,18 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package server + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/confmap" +) + +func TestUnmarshalDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + assert.NoError(t, confmap.New().Unmarshal(cfg)) + assert.Equal(t, factory.CreateDefaultConfig(), cfg) +} diff --git a/extension/server/extension.go b/extension/server/extension.go new file mode 100644 index 0000000000..e68d655207 --- /dev/null +++ b/extension/server/extension.go @@ -0,0 +1,88 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package server + +import ( + "context" + "net/http" + + "github.com/gin-gonic/gin" + jsoniter "github.com/json-iterator/go" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" +) + +type Server struct { + logger *zap.Logger + config *Config + server *http.Server + jsonMarshaller jsoniter.API +} + +var _ extension.Extension = (*Server)(nil) + +func (s *Server) setRouter(router *gin.Engine) { + router.Use(gin.Recovery()) + //disabling the gin default behavior of encoding/decoding the request path + router.UseRawPath = true + router.UnescapePathValues = false + router.GET("/kubernetes/pod-to-service-env-map", s.k8sPodToServiceMapHandler) +} + +func NewServer(logger *zap.Logger, config *Config) *Server { + s := &Server{ + logger: logger, + config: config, + jsonMarshaller: jsoniter.ConfigCompatibleWithStandardLibrary, + } + gin.SetMode(gin.ReleaseMode) + router := gin.New() + s.setRouter(router) + s.server = &http.Server{ + Addr: config.ListenAddress, + Handler: router, + } + return s +} + +func (s *Server) Start(context.Context, component.Host) error { + s.logger.Info("Starting server ...") + go func() { + err := s.server.ListenAndServe() + if err != nil { + s.logger.Error("failed to serve and listen", zap.Error(err)) + } + }() + return nil +} + +func (s *Server) Shutdown(ctx context.Context) error { + s.logger.Info("Shutting down server...") + return s.server.Shutdown(ctx) +} + +func (s *Server) k8sPodToServiceMapHandler(c *gin.Context) { + podServiceEnvironmentMap := getPodServiceEnvironmentMapping() + s.jsonHandler(c.Writer, podServiceEnvironmentMap) +} + +// Added this for testing purpose +var getPodServiceEnvironmentMapping = func() map[string]entitystore.ServiceEnvironment { + es := entitystore.GetEntityStore() + if es != nil { + return es.GetPodServiceEnvironmentMapping() + } + return map[string]entitystore.ServiceEnvironment{} +} + +func (s *Server) jsonHandler(w http.ResponseWriter, data interface{}) { + w.Header().Set("Content-Type", "application/json") + err := s.jsonMarshaller.NewEncoder(w).Encode(data) + if err != nil { + s.logger.Error("failed to encode data for http response", zap.Error(err)) + } +} diff --git a/extension/server/extension_test.go b/extension/server/extension_test.go new file mode 100644 index 0000000000..aa335aef6a --- /dev/null +++ b/extension/server/extension_test.go @@ -0,0 +1,159 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package server + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" +) + +type mockEntityStore struct { + podToServiceEnvironmentMap map[string]entitystore.ServiceEnvironment +} + +func newMockEntityStore() *mockEntityStore { + return &mockEntityStore{ + podToServiceEnvironmentMap: make(map[string]entitystore.ServiceEnvironment), + } +} + +func (es *mockEntityStore) AddPodServiceEnvironmentMapping(podName string, service string, env string) { + es.podToServiceEnvironmentMap[podName] = entitystore.ServiceEnvironment{ + ServiceName: service, + Environment: env, + } +} + +func newMockGetPodServiceEnvironmentMapping(es *mockEntityStore) func() map[string]entitystore.ServiceEnvironment { + return func() map[string]entitystore.ServiceEnvironment { + return es.podToServiceEnvironmentMap + } +} +func TestNewServer(t *testing.T) { + logger, _ := zap.NewProduction() + config := &Config{ + ListenAddress: ":8080", + } + server := NewServer(logger, config) + + assert.NotNil(t, server) + assert.Equal(t, config, server.config) + assert.NotNil(t, server.logger) + assert.NotNil(t, server.server) +} + +func TestK8sPodToServiceMapHandler(t *testing.T) { + logger, _ := zap.NewProduction() + config := &Config{ + ListenAddress: ":8080", + } + server := NewServer(logger, config) + + expectedMap := map[string]entitystore.ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + "pod2": { + ServiceName: "service2", + Environment: "env2", + }, + } + es := newMockEntityStore() + getPodServiceEnvironmentMapping = newMockGetPodServiceEnvironmentMapping(es) + es.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") + es.AddPodServiceEnvironmentMapping("pod2", "service2", "env2") + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + server.k8sPodToServiceMapHandler(c) + + assert.Equal(t, http.StatusOK, w.Code) + + var actualMap map[string]entitystore.ServiceEnvironment + err := json.Unmarshal(w.Body.Bytes(), &actualMap) + assert.NoError(t, err) + assert.Equal(t, expectedMap, actualMap) +} + +func TestJSONHandler(t *testing.T) { + + tests := []struct { + name string + expectedData map[string]string + }{ + { + name: "EmptyData", + expectedData: map[string]string{}, + }, + { + name: "NonEmptyData", + expectedData: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger, _ := zap.NewProduction() + config := &Config{ + ListenAddress: ":8080", + } + server := NewServer(logger, config) + w := httptest.NewRecorder() + server.jsonHandler(w, tt.expectedData) + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "application/json", w.Header().Get("Content-Type")) + + var actualData map[string]string + err := json.Unmarshal(w.Body.Bytes(), &actualData) + assert.NoError(t, err) + assert.Equal(t, tt.expectedData, actualData) + }) + } +} + +func TestServerStartAndShutdown(t *testing.T) { + logger, _ := zap.NewProduction() + config := &Config{ + ListenAddress: ":8080", + } + server := NewServer(logger, config) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + err := server.Start(ctx, nil) + assert.NoError(t, err) + + time.Sleep(1 * time.Second) + + // Make a request to the server to check if it's running + resp, err := http.Get("http://localhost:8080") + assert.NoError(t, err) + defer resp.Body.Close() + + // Check if the response status code is 404 (default route) + assert.Equal(t, http.StatusNotFound, resp.StatusCode) + + err = server.Shutdown(ctx) + assert.NoError(t, err) + + // Wait for the server to shut down + time.Sleep(1 * time.Second) + + // Make a request to the server to check if it's shutdown + _, err = http.Get("http://localhost:8080") + assert.Error(t, err) +} diff --git a/extension/server/factory.go b/extension/server/factory.go new file mode 100644 index 0000000000..9699366849 --- /dev/null +++ b/extension/server/factory.go @@ -0,0 +1,32 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package server + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" +) + +var ( + TypeStr, _ = component.NewType("server") +) + +func NewFactory() extension.Factory { + return extension.NewFactory( + TypeStr, + createDefaultConfig, + createExtension, + component.StabilityLevelAlpha, + ) +} + +func createDefaultConfig() component.Config { + return &Config{} +} + +func createExtension(_ context.Context, settings extension.CreateSettings, cfg component.Config) (extension.Extension, error) { + return NewServer(settings.Logger, cfg.(*Config)), nil +} diff --git a/extension/server/factory_test.go b/extension/server/factory_test.go new file mode 100644 index 0000000000..aad68e8c49 --- /dev/null +++ b/extension/server/factory_test.go @@ -0,0 +1,33 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package server + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/extension/extensiontest" +) + +func TestCreateDefaultConfig(t *testing.T) { + cfg := NewFactory().CreateDefaultConfig() + assert.Equal(t, &Config{}, cfg) + assert.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateExtension(t *testing.T) { + cfg := &Config{} + got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + assert.NoError(t, err) + assert.NotNil(t, got) +} + +func TestCreateExtensionWithConfig(t *testing.T) { + cfg := &Config{ListenAddress: ":8080"} + got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) + assert.NoError(t, err) + assert.NotNil(t, got) +} diff --git a/go.mod b/go.mod index 38cece21c1..cb47e79378 100644 --- a/go.mod +++ b/go.mod @@ -94,6 +94,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.30.2 github.com/bigkevmcd/go-configparser v0.0.0-20200217161103-d137835d2579 github.com/deckarep/golang-set/v2 v2.3.1 + github.com/gin-gonic/gin v1.10.0 github.com/go-kit/log v0.2.1 github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 github.com/gobwas/glob v0.2.3 @@ -104,6 +105,7 @@ require ( github.com/influxdata/telegraf v0.0.0-00010101000000-000000000000 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/jellydator/ttlcache/v3 v3.2.0 + github.com/json-iterator/go v1.1.12 github.com/kardianos/service v1.2.1 // Keep this pinned to v1.2.1. v1.2.2 causes the agent to not register as a service on Windows github.com/kr/pretty v0.3.1 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c @@ -143,12 +145,14 @@ require ( go.opentelemetry.io/collector/consumer v0.103.0 go.opentelemetry.io/collector/exporter v0.103.0 go.opentelemetry.io/collector/exporter/debugexporter v0.103.0 + go.opentelemetry.io/collector/exporter/nopexporter v0.103.0 go.opentelemetry.io/collector/extension v0.103.0 go.opentelemetry.io/collector/otelcol v0.103.0 go.opentelemetry.io/collector/pdata v1.10.0 go.opentelemetry.io/collector/processor v0.103.0 go.opentelemetry.io/collector/processor/batchprocessor v0.103.0 go.opentelemetry.io/collector/receiver v0.103.0 + go.opentelemetry.io/collector/receiver/nopreceiver v0.103.0 go.opentelemetry.io/collector/receiver/otlpreceiver v0.103.0 go.opentelemetry.io/collector/semconv v0.103.0 go.opentelemetry.io/collector/service v0.103.0 @@ -210,11 +214,15 @@ require ( github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bytedance/sonic v1.11.6 // indirect + github.com/bytedance/sonic/loader v0.1.1 // indirect github.com/caio/go-tdigest v3.1.0+incompatible // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/console v1.0.3 // indirect @@ -240,6 +248,8 @@ require ( github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -247,9 +257,13 @@ require ( github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.20.0 // indirect github.com/go-resty/resty/v2 v2.12.0 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect + github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect @@ -292,15 +306,16 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.17.0 // indirect github.com/klauspost/compress v1.17.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/knadh/koanf v1.5.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-syslog/v4 v4.1.0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect github.com/linode/linodego v1.33.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect @@ -350,6 +365,7 @@ require ( github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect github.com/ovh/go-ovh v1.4.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/philhofer/fwd v1.1.1 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -376,6 +392,8 @@ require ( github.com/tinylib/msgp v1.1.6 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect @@ -399,11 +417,9 @@ require ( go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.103.0 // indirect go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.103.0 // indirect go.opentelemetry.io/collector/connector v0.103.0 // indirect - go.opentelemetry.io/collector/exporter/nopexporter v0.103.0 // indirect go.opentelemetry.io/collector/extension/auth v0.103.0 // indirect go.opentelemetry.io/collector/featuregate v1.10.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.103.0 // indirect - go.opentelemetry.io/collector/receiver/nopreceiver v0.103.0 // indirect go.opentelemetry.io/contrib/config v0.7.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect @@ -423,6 +439,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect go.opentelemetry.io/otel/trace v1.27.0 // indirect go.opentelemetry.io/proto/otlp v1.2.0 // indirect + golang.org/x/arch v0.8.0 // indirect golang.org/x/crypto v0.24.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/oauth2 v0.20.0 // indirect diff --git a/go.sum b/go.sum index 52096524a4..0ccd2044ea 100644 --- a/go.sum +++ b/go.sum @@ -341,6 +341,10 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= +github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= +github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= +github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= @@ -370,6 +374,10 @@ github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5P github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -503,11 +511,15 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= +github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -554,6 +566,14 @@ github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZC github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= +github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= @@ -915,12 +935,14 @@ github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -942,6 +964,8 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+O github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= github.com/leodido/go-syslog/v4 v4.1.0 h1:Wsl194qyWXr7V6DrGWC3xmxA9Ra6XgWO+toNt2fmCaI= github.com/leodido/go-syslog/v4 v4.1.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39z1RhZ5dc4y4r/4koJo6IYFgTRMe/LlwRTEw0= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw= @@ -1178,6 +1202,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= @@ -1336,6 +1362,7 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -1361,12 +1388,16 @@ github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hM github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twmb/murmur3 v1.1.7 h1:ULWBiM04n/XoN3YMSJ6Z2pHDFLf+MeIVQU71ZPrvbWg= github.com/twmb/murmur3 v1.1.7/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36 h1:JBj2CqnFwBhI3XsdMNn9MjKvehog+p5QZihotqq0Zuo= @@ -1571,6 +1602,9 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= +golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -2100,6 +2134,7 @@ modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/plugins/processors/awsentity/config.go b/plugins/processors/awsentity/config.go index 2d2aafab8e..8916376bda 100644 --- a/plugins/processors/awsentity/config.go +++ b/plugins/processors/awsentity/config.go @@ -16,8 +16,8 @@ type Config struct { // ClusterName can be used to explicitly provide the Cluster's Name for scenarios where it's not // possible to auto-detect it using EC2 tags. ClusterName string `mapstructure:"cluster_name,omitempty"` - // Mode is the platform that the component is being used on, such as EKS - Mode string `mapstructure:"mode,omitempty"` + // KubernetesMode + KubernetesMode string `mapstructure:"kubernetes_mode,omitempty"` } // Verify Config implements Processor interface. diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index af5e242a97..d4cdec45f8 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -15,6 +15,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/eksattributescraper" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" + "github.com/aws/amazon-cloudwatch-agent/translator/config" ) const ( @@ -38,6 +39,14 @@ var addToEntityStore = func(logGroupName entitystore.LogGroupName, serviceName s es.AddServiceAttrEntryForLogGroup(logGroupName, serviceName, environmentName) } +var addPodToServiceEnvironmentMap = func(podName string, serviceName string, environmentName string) { + es := entitystore.GetEntityStore() + if es == nil { + return + } + es.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName) +} + // awsEntityProcessor looks for metrics that have the aws.log.group.names and either the service.name or // deployment.environment resource attributes set, then adds the association between the log group(s) and the // service/environment names to the entitystore extension. @@ -58,7 +67,9 @@ func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcess func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { - p.eksscraper.Scrape(rm.At(i).Resource()) + if p.config.KubernetesMode == config.ModeEKS { + p.eksscraper.Scrape(rm.At(i).Resource()) + } resourceAttrs := rm.At(i).Resource().Attributes() logGroupNames, _ := resourceAttrs.Get(attributeAwsLogGroupNames) serviceName, _ := resourceAttrs.Get(attributeServiceName) @@ -75,7 +86,12 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric if entityEnvironmentName != EMPTY { resourceAttrs.PutStr(entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) } - + if p.config.KubernetesMode != "" { + fullPodName := scrapeK8sPodName(resourceAttrs) + if fullPodName != EMPTY && (entityServiceName != EMPTY || entityEnvironmentName != EMPTY) { + addPodToServiceEnvironmentMap(fullPodName, entityServiceName, entityEnvironmentName) + } + } if logGroupNames.Str() == EMPTY || (serviceName.Str() == EMPTY && environmentName.Str() == EMPTY) { continue } @@ -186,3 +202,12 @@ func getServiceAttributes(p pcommon.Map) string { } return EMPTY } + +// scrapeK8sPodName gets the k8s pod name which is full pod name from the resource attributes +// This is needed to map the pod to the service/environment +func scrapeK8sPodName(p pcommon.Map) string { + if podAttr, ok := p.Get(semconv.AttributeK8SPodName); ok { + return podAttr.Str() + } + return EMPTY +} diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index bebd9f9205..475766389a 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -9,14 +9,17 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pmetric" + semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" + "github.com/aws/amazon-cloudwatch-agent/translator/config" ) type mockEntityStore struct { - entries []entityStoreEntry + entries []entityStoreEntry + podToServiceEnvironmentMap map[string]entitystore.ServiceEnvironment } type entityStoreEntry struct { @@ -27,7 +30,14 @@ type entityStoreEntry struct { func newMockEntityStore() *mockEntityStore { return &mockEntityStore{ - entries: make([]entityStoreEntry, 0), + entries: make([]entityStoreEntry, 0), + podToServiceEnvironmentMap: make(map[string]entitystore.ServiceEnvironment), + } +} + +func newMockAddPodServiceEnvironmentMapping(es *mockEntityStore) func(string, string, string) { + return func(podName string, serviceName string, deploymentName string) { + es.podToServiceEnvironmentMap[podName] = entitystore.ServiceEnvironment{ServiceName: serviceName, Environment: deploymentName} } } @@ -126,6 +136,65 @@ func TestProcessMetricsLogGroupAssociation(t *testing.T) { } } +func TestProcessMetricsForAddingPodToServiceMap(t *testing.T) { + logger, _ := zap.NewDevelopment() + p := newAwsEntityProcessor(&Config{}, logger) + ctx := context.Background() + tests := []struct { + name string + metrics pmetric.Metrics + k8sMode string + want map[string]entitystore.ServiceEnvironment + }{ + { + name: "WithPodNameAndServiceName", + metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service"}}, + k8sMode: config.ModeEKS, + }, + { + name: "WithPodNameAndServiceEnvironmentName", + metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", attributeDeploymentEnvironment, "test-deployment"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", Environment: "test-deployment"}}, + k8sMode: config.ModeK8sEC2, + }, + { + name: "WithPodNameAndAttributeService", + metrics: generateMetrics(attributeService, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service"}}, + k8sMode: config.ModeK8sOnPrem, + }, + { + name: "WithPodNameAndEmptyServiceAndEnvironmentName", + metrics: generateMetrics(semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf"), + k8sMode: config.ModeEKS, + want: map[string]entitystore.ServiceEnvironment{}, + }, + { + name: "WithEmptyPodName", + metrics: generateMetrics(), + k8sMode: config.ModeEKS, + want: map[string]entitystore.ServiceEnvironment{}, + }, + { + name: "WithEmptyKubernetesMode", + metrics: generateMetrics(semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf"), + want: map[string]entitystore.ServiceEnvironment{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + es := newMockEntityStore() + addPodToServiceEnvironmentMap = newMockAddPodServiceEnvironmentMapping(es) + p.config.KubernetesMode = tt.k8sMode + _, err := p.processMetrics(ctx, tt.metrics) + assert.NoError(t, err) + assert.Equal(t, tt.want, es.podToServiceEnvironmentMap) + }) + } +} + func TestProcessMetricsResourceAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index 72a6c8b9c8..591f316e53 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -32,6 +32,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" + "github.com/aws/amazon-cloudwatch-agent/extension/server" "github.com/aws/amazon-cloudwatch-agent/plugins/outputs/cloudwatch" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsapplicationsignals" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity" @@ -86,6 +87,7 @@ func Factories() (otelcol.Factories, error) { agenthealth.NewFactory(), awsproxy.NewFactory(), entitystore.NewFactory(), + server.NewFactory(), ); err != nil { return otelcol.Factories{}, err } diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index a80f202860..db83ec95ab 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -14,7 +14,7 @@ const ( receiversCount = 7 processorCount = 11 exportersCount = 6 - extensionsCount = 3 + extensionsCount = 4 ) func TestComponents(t *testing.T) { @@ -73,6 +73,10 @@ func TestComponents(t *testing.T) { assert.Len(t, extensions, extensionsCount) agenthealthType, _ := component.NewType("agenthealth") awsproxyType, _ := component.NewType("awsproxy") + entitystore, _ := component.NewType("entitystore") + server, _ := component.NewType("server") assert.NotNil(t, extensions[agenthealthType]) assert.NotNil(t, extensions[awsproxyType]) + assert.NotNil(t, extensions[entitystore]) + assert.NotNil(t, extensions[server]) } diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 3a9f40fc9b..1af07fe757 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -288,6 +288,9 @@ extensions: entitystore: mode: ec2 region: us-east-1 + kubernetes_mode: EKS + server: + listen_addr: :4311 processors: awsapplicationsignals: limiter: @@ -301,7 +304,7 @@ processors: platform: eks awsentity: cluster_name: TestCluster - mode: EKS + kubernetes_mode: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -668,6 +671,7 @@ service: - agenthealth/traces - agenthealth/logs - entitystore + - server pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index 92f78be8e3..fa9ce39e16 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -288,6 +288,9 @@ extensions: entitystore: mode: ec2 region: us-east-1 + kubernetes_mode: K8sEC2 + server: + listen_addr: :4311 processors: awsapplicationsignals: limiter: @@ -299,7 +302,8 @@ processors: resolvers: - name: TestCluster platform: k8s - awsentity: {} + awsentity: + kubernetes_mode: K8sEC2 batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -646,6 +650,7 @@ service: - agenthealth/traces - agenthealth/logs - entitystore + - server pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 768c825f7b..acde2a898e 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -288,6 +288,9 @@ extensions: entitystore: mode: ec2 region: us-east-1 + kubernetes_mode: EKS + server: + listen_addr: :4311 processors: awsapplicationsignals: limiter: @@ -301,7 +304,7 @@ processors: platform: eks awsentity: cluster_name: TestCluster - mode: EKS + kubernetes_mode: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -668,6 +671,7 @@ service: - agenthealth/traces - agenthealth/logs - entitystore + - server pipelines: metrics/application_signals: exporters: diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index 70706db222..d386b7f6ce 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -288,6 +288,9 @@ extensions: entitystore: mode: ec2 region: us-east-1 + kubernetes_mode: EKS + server: + listen_addr: :4311 processors: awsapplicationsignals: limiter: @@ -301,7 +304,7 @@ processors: platform: eks awsentity: cluster_name: TestCluster - mode: EKS + kubernetes_mode: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -668,6 +671,7 @@ service: - agenthealth/traces - agenthealth/logs - entitystore + - server pipelines: metrics/application_signals: exporters: diff --git a/translator/translate/otel/extension/entitystore/translator.go b/translator/translate/otel/extension/entitystore/translator.go index ab80a851fa..e98d4ee549 100644 --- a/translator/translate/otel/extension/entitystore/translator.go +++ b/translator/translate/otel/extension/entitystore/translator.go @@ -35,6 +35,7 @@ func (t *translator) ID() component.ID { func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*entitystore.Config) cfg.Mode = context.CurrentContext().Mode() + cfg.KubernetesMode = context.CurrentContext().KubernetesMode() cfg.Region = agent.Global_Config.Region credentials := confmap.NewFromStringMap(agent.Global_Config.Credentials) _ = credentials.Unmarshal(cfg) diff --git a/translator/translate/otel/extension/entitystore/translator_test.go b/translator/translate/otel/extension/entitystore/translator_test.go index 74a11f8138..4180b02970 100644 --- a/translator/translate/otel/extension/entitystore/translator_test.go +++ b/translator/translate/otel/extension/entitystore/translator_test.go @@ -16,47 +16,69 @@ import ( ) func TestTranslate(t *testing.T) { - context.CurrentContext().SetMode(config.ModeEC2) translateagent.Global_Config.Credentials = make(map[string]interface{}) translateagent.Global_Config.Region = "us-east-1" testCases := map[string]struct { input map[string]interface{} + inputMode string + inputK8sMode string file_exists bool profile_exists bool want *entitystore.Config }{ "OnlyProfile": { input: map[string]interface{}{}, + inputMode: config.ModeEC2, + inputK8sMode: config.ModeEKS, profile_exists: true, want: &entitystore.Config{ - Mode: config.ModeEC2, - Region: "us-east-1", - Profile: "test_profile", + Mode: config.ModeEC2, + KubernetesMode: config.ModeEKS, + Region: "us-east-1", + Profile: "test_profile", + }, + }, + "OnlyProfileWithK8sOnPrem": { + input: map[string]interface{}{}, + inputMode: config.ModeEC2, + inputK8sMode: config.ModeK8sOnPrem, + profile_exists: true, + want: &entitystore.Config{ + Mode: config.ModeEC2, + KubernetesMode: config.ModeK8sOnPrem, + Region: "us-east-1", + Profile: "test_profile", }, }, "OnlyFile": { - input: map[string]interface{}{}, - file_exists: true, + input: map[string]interface{}{}, + inputMode: config.ModeEC2, + inputK8sMode: config.ModeK8sEC2, + file_exists: true, want: &entitystore.Config{ - Mode: config.ModeEC2, - Region: "us-east-1", - Filename: "test_file", + Mode: config.ModeEC2, + KubernetesMode: config.ModeK8sEC2, + Region: "us-east-1", + Filename: "test_file", }, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - translateagent.Global_Config.Credentials[translateagent.Profile_Key] = "" translateagent.Global_Config.Credentials[translateagent.CredentialsSectionKey] = "" if testCase.file_exists { translateagent.Global_Config.Credentials[translateagent.CredentialsFile_Key] = "test_file" + translateagent.Global_Config.Credentials[translateagent.Profile_Key] = "" } if testCase.profile_exists { translateagent.Global_Config.Credentials[translateagent.Profile_Key] = "test_profile" + translateagent.Global_Config.Credentials[translateagent.CredentialsFile_Key] = "" } tt := NewTranslator().(*translator) assert.Equal(t, "entitystore", tt.ID().String()) conf := confmap.NewFromStringMap(testCase.input) + context.CurrentContext().SetMode(testCase.inputMode) + context.CurrentContext().SetKubernetesMode(testCase.inputK8sMode) got, err := tt.Translate(conf) assert.NoError(t, err) assert.Equal(t, testCase.want, got) diff --git a/translator/translate/otel/extension/server/translator.go b/translator/translate/otel/extension/server/translator.go new file mode 100644 index 0000000000..15e3186095 --- /dev/null +++ b/translator/translate/otel/extension/server/translator.go @@ -0,0 +1,39 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package server + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/extension" + + "github.com/aws/amazon-cloudwatch-agent/extension/server" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +const defaultListenAddr = ":4311" + +type translator struct { + name string + factory extension.Factory +} + +var _ common.Translator[component.Config] = (*translator)(nil) + +func NewTranslator() common.Translator[component.Config] { + return &translator{ + factory: server.NewFactory(), + } +} + +func (t *translator) ID() component.ID { + return component.NewIDWithName(t.factory.Type(), t.name) +} + +// Translate creates an extension configuration. +func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + cfg := t.factory.CreateDefaultConfig().(*server.Config) + cfg.ListenAddress = defaultListenAddr + return cfg, nil +} diff --git a/translator/translate/otel/extension/server/translator_test.go b/translator/translate/otel/extension/server/translator_test.go new file mode 100644 index 0000000000..666f3de868 --- /dev/null +++ b/translator/translate/otel/extension/server/translator_test.go @@ -0,0 +1,35 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package server + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/extension/server" +) + +func TestTranslate(t *testing.T) { + testCases := map[string]struct { + input map[string]interface{} + want *server.Config + }{ + "DefaultConfig": { + input: map[string]interface{}{}, + want: &server.Config{ListenAddress: defaultListenAddr}, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + tt := NewTranslator().(*translator) + assert.Equal(t, "server", tt.ID().String()) + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.NoError(t, err) + assert.Equal(t, testCase.want, got) + }) + } +} diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go index 0ac761b24f..dc6892bfb0 100644 --- a/translator/translate/otel/processor/awsentity/translator.go +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -46,14 +46,10 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { } } - mode := context.CurrentContext().KubernetesMode() - if mode == "" { - mode = context.CurrentContext().Mode() - } - switch mode { + cfg.KubernetesMode = context.CurrentContext().KubernetesMode() + switch cfg.KubernetesMode { case config.ModeEKS: cfg.ClusterName = hostedIn - cfg.Mode = config.ModeEKS } return cfg, nil } diff --git a/translator/translate/otel/processor/awsentity/translator_test.go b/translator/translate/otel/processor/awsentity/translator_test.go index 4b4edc003f..e80ffda76e 100644 --- a/translator/translate/otel/processor/awsentity/translator_test.go +++ b/translator/translate/otel/processor/awsentity/translator_test.go @@ -30,8 +30,8 @@ func TestTranslate(t *testing.T) { }, }}, want: &awsentity.Config{ - ClusterName: "test", - Mode: config.ModeEKS, + ClusterName: "test", + KubernetesMode: config.ModeEKS, }, }, } diff --git a/translator/translate/otel/translate_otel.go b/translator/translate/otel/translate_otel.go index 14ad05d8e1..7d77d8f831 100644 --- a/translator/translate/otel/translate_otel.go +++ b/translator/translate/otel/translate_otel.go @@ -21,6 +21,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/entitystore" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/server" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/applicationsignals" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/containerinsights" @@ -73,6 +74,9 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { } } pipelines.Translators.Extensions.Set(entitystore.NewTranslator()) + if context.CurrentContext().KubernetesMode() != "" { + pipelines.Translators.Extensions.Set(server.NewTranslator()) + } cfg := &otelcol.Config{ Receivers: map[component.ID]component.Config{}, Exporters: map[component.ID]component.Config{}, From 37ae6b346a8f56134467fb205008edd3f2db01fe Mon Sep 17 00:00:00 2001 From: POOJA REDDY NATHALA Date: Thu, 12 Sep 2024 13:37:20 -0400 Subject: [PATCH 06/47] Added safety checks in Add and Get pod to service env map (#796) --- extension/entitystore/extension.go | 15 ++++-- extension/entitystore/extension_test.go | 50 ++++++++++--------- extension/server/extension.go | 2 +- extension/server/extension_test.go | 65 ++++++++++++++++--------- 4 files changed, 79 insertions(+), 53 deletions(-) diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 71ec423d2c..4d2b8adb08 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -59,7 +59,7 @@ type EntityStore struct { ec2Info ec2Info // eksInfo stores information about EKS such as pod to service Env map - eksInfo eksInfo + eksInfo *eksInfo // serviceprovider stores information about possible service names // that we can attach to the entity @@ -94,7 +94,7 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { go e.ec2Info.initEc2Info() } if e.kubernetesMode != "" { - e.eksInfo = *newEKSInfo(e.logger) + e.eksInfo = newEKSInfo(e.logger) } e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) go e.serviceprovider.startServiceProvider() @@ -114,7 +114,7 @@ func (e *EntityStore) KubernetesMode() string { return e.kubernetesMode } -func (e *EntityStore) EKSInfo() eksInfo { +func (e *EntityStore) EKSInfo() *eksInfo { return e.eksInfo } @@ -169,11 +169,16 @@ func (e *EntityStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName, } func (e *EntityStore) AddPodServiceEnvironmentMapping(podName string, serviceName string, environmentName string) { - e.eksInfo.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName) + if e.eksInfo != nil { + e.eksInfo.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName) + } } func (e *EntityStore) GetPodServiceEnvironmentMapping() map[string]ServiceEnvironment { - return e.eksInfo.GetPodServiceEnvironmentMapping() + if e.eksInfo != nil { + return e.eksInfo.GetPodServiceEnvironmentMapping() + } + return map[string]ServiceEnvironment{} } func (e *EntityStore) createAttributeMap() map[string]*string { diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index e41287d654..026866e163 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -405,32 +405,34 @@ func TestEntityStore_addServiceAttrEntryForLogGroup(t *testing.T) { sp.AssertExpectations(t) } -func TestEntityStore_AddPodServiceEnvironmentMapping(t *testing.T) { - +func TestEntityStore_AddAndGetPodServiceEnvironmentMapping(t *testing.T) { logger, _ := zap.NewProduction() - eks := *newEKSInfo(logger) - e := EntityStore{eksInfo: eks} - e.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") - expectedMap := map[string]ServiceEnvironment{ - "pod1": { - ServiceName: "service1", - Environment: "env1", + tests := []struct { + name string + want map[string]ServiceEnvironment + eks *eksInfo + }{ + { + name: "HappyPath", + want: map[string]ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + }, + eks: newEKSInfo(logger), }, - } - assert.Equal(t, expectedMap, e.eksInfo.GetPodServiceEnvironmentMapping()) -} - -func TestEntityStore_GetPodServiceEnvironmentMapping(t *testing.T) { - - logger, _ := zap.NewProduction() - eks := *newEKSInfo(logger) - e := EntityStore{eksInfo: eks} - expectedMap := map[string]ServiceEnvironment{ - "pod1": { - ServiceName: "service1", - Environment: "env1", + { + name: "Empty EKS Info", + want: map[string]ServiceEnvironment{}, + eks: nil, }, } - e.eksInfo.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") - assert.Equal(t, expectedMap, e.GetPodServiceEnvironmentMapping()) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := EntityStore{eksInfo: tt.eks} + e.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") + assert.Equal(t, tt.want, e.GetPodServiceEnvironmentMapping()) + }) + } } diff --git a/extension/server/extension.go b/extension/server/extension.go index e68d655207..bc90a44546 100644 --- a/extension/server/extension.go +++ b/extension/server/extension.go @@ -73,7 +73,7 @@ func (s *Server) k8sPodToServiceMapHandler(c *gin.Context) { // Added this for testing purpose var getPodServiceEnvironmentMapping = func() map[string]entitystore.ServiceEnvironment { es := entitystore.GetEntityStore() - if es != nil { + if es != nil && es.GetPodServiceEnvironmentMapping() != nil { return es.GetPodServiceEnvironmentMapping() } return map[string]entitystore.ServiceEnvironment{} diff --git a/extension/server/extension_test.go b/extension/server/extension_test.go index aa335aef6a..02a6f6acf6 100644 --- a/extension/server/extension_test.go +++ b/extension/server/extension_test.go @@ -58,32 +58,51 @@ func TestK8sPodToServiceMapHandler(t *testing.T) { config := &Config{ ListenAddress: ":8080", } - server := NewServer(logger, config) - - expectedMap := map[string]entitystore.ServiceEnvironment{ - "pod1": { - ServiceName: "service1", - Environment: "env1", + tests := []struct { + name string + want map[string]entitystore.ServiceEnvironment + emptyMap bool + }{ + { + name: "HappyPath", + want: map[string]entitystore.ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + "pod2": { + ServiceName: "service2", + Environment: "env2", + }, + }, }, - "pod2": { - ServiceName: "service2", - Environment: "env2", + { + name: "Empty Map", + want: map[string]entitystore.ServiceEnvironment{}, + emptyMap: true, }, } - es := newMockEntityStore() - getPodServiceEnvironmentMapping = newMockGetPodServiceEnvironmentMapping(es) - es.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") - es.AddPodServiceEnvironmentMapping("pod2", "service2", "env2") - w := httptest.NewRecorder() - c, _ := gin.CreateTestContext(w) - server.k8sPodToServiceMapHandler(c) - - assert.Equal(t, http.StatusOK, w.Code) - - var actualMap map[string]entitystore.ServiceEnvironment - err := json.Unmarshal(w.Body.Bytes(), &actualMap) - assert.NoError(t, err) - assert.Equal(t, expectedMap, actualMap) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := NewServer(logger, config) + es := newMockEntityStore() + getPodServiceEnvironmentMapping = newMockGetPodServiceEnvironmentMapping(es) + if !tt.emptyMap { + es.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") + es.AddPodServiceEnvironmentMapping("pod2", "service2", "env2") + } + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + server.k8sPodToServiceMapHandler(c) + + assert.Equal(t, http.StatusOK, w.Code) + + var actualMap map[string]entitystore.ServiceEnvironment + err := json.Unmarshal(w.Body.Bytes(), &actualMap) + assert.NoError(t, err) + assert.Equal(t, tt.want, actualMap) + }) + } } func TestJSONHandler(t *testing.T) { From 2583a777d40a3924f95b8dd7c4f1e807f430c8ee Mon Sep 17 00:00:00 2001 From: Bryce Carey Date: Wed, 18 Sep 2024 13:56:52 -0400 Subject: [PATCH 07/47] Revert cross account safety check (#800) --- extension/entitystore/extension.go | 34 --- extension/entitystore/extension_test.go | 45 ---- .../outputs/cloudwatchlogs/cloudwatchlogs.go | 13 +- test/compass/compass_test.go | 229 ++---------------- .../resources/compass_role_arn_check.json | 24 -- 5 files changed, 26 insertions(+), 319 deletions(-) delete mode 100644 test/compass/resources/compass_role_arn_check.json diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 4d2b8adb08..fe296d9be6 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" - "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension" @@ -132,10 +131,6 @@ func (e *EntityStore) NativeCredentialExists() bool { // CreateLogFileEntity creates the entity for log events that are being uploaded from a log file in the environment. func (e *EntityStore) CreateLogFileEntity(logFileGlob LogFileGlob, logGroupName LogGroupName) *cloudwatchlogs.Entity { - if !e.shouldReturnEntity() { - return nil - } - serviceAttr := e.serviceprovider.logFileServiceAttribute(logFileGlob, logGroupName) keyAttributes := e.createServiceKeyAttributes(serviceAttr) @@ -205,35 +200,6 @@ func (e *EntityStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) m return serviceKeyAttr } -// shouldReturnEntity checks if the account ID for the instance is -// matching the account ID when assuming role for the current credential. -func (e *EntityStore) shouldReturnEntity() bool { - if e.nativeCredential == nil || e.metadataprovider == nil { - e.logger.Debug("there is no credential stored for cross-account checks") - return false - } - doc, err := e.metadataprovider.Get(context.Background()) - if err != nil { - e.logger.Debug("an error occurred when getting instance document for cross-account checks. Reason: %v\n", zap.Error(err)) - return false - } - instanceAccountID := doc.AccountID - if e.stsClient == nil { - e.stsClient = sts.New( - e.nativeCredential, - &aws.Config{ - LogLevel: configaws.SDKLogLevel(), - Logger: configaws.SDKLogger{}, - }) - } - assumedRoleIdentity, err := e.stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{}) - if err != nil { - e.logger.Debug("an error occurred when calling STS GetCallerIdentity for cross-account checks. Reason: ", zap.Error(err)) - return false - } - return instanceAccountID == *assumedRoleIdentity.Account -} - func getMetaDataProvider() ec2metadataprovider.MetadataProvider { mdCredentialConfig := &configaws.CredentialConfig{} return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 026866e163..30179b6b6f 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -10,7 +10,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" @@ -19,7 +18,6 @@ import ( "github.com/stretchr/testify/mock" "go.uber.org/zap" - "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) @@ -318,49 +316,6 @@ func TestEntityStore_createLogFileRID(t *testing.T) { assert.Equal(t, dereferenceMap(expectedEntity.Attributes), dereferenceMap(entity.Attributes)) } -func TestEntityStore_shouldReturnRID(t *testing.T) { - type fields struct { - metadataprovider ec2metadataprovider.MetadataProvider - stsClient stsiface.STSAPI - nativeCredential client.ConfigProvider - } - tests := []struct { - name string - fields fields - want bool - }{ - // TODO need tests for when you can't fetch from IMDS or STS (fail closed) - { - name: "HappyPath_AccountIDMatches", - fields: fields{ - metadataprovider: mockMetadataProviderWithAccountId("123456789012"), - stsClient: &mockSTSClient{accountId: "123456789012"}, - nativeCredential: &session.Session{}, - }, - want: true, - }, - { - name: "HappyPath_AccountIDMismatches", - fields: fields{ - metadataprovider: mockMetadataProviderWithAccountId("210987654321"), - stsClient: &mockSTSClient{accountId: "123456789012"}, - nativeCredential: &session.Session{}, - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - e := &EntityStore{ - metadataprovider: tt.fields.metadataprovider, - stsClient: tt.fields.stsClient, - nativeCredential: tt.fields.nativeCredential, - } - assert.Equalf(t, tt.want, e.shouldReturnEntity(), "shouldReturnEntity()") - }) - } -} - func dereferenceMap(input map[string]*string) map[string]string { result := make(map[string]string) for k, v := range input { diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index a75198f38d..e1b0f4f457 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -23,7 +23,6 @@ import ( "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/stats/agent" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth/handler/useragent" - "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/handlers" "github.com/aws/amazon-cloudwatch-agent/internal" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" @@ -126,6 +125,10 @@ func (c *CloudWatchLogs) CreateDest(group, stream string, retention int, logGrou } func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { + if cwd, ok := c.cwDests[t]; ok { + return cwd + } + credentialConfig := &configaws.CredentialConfig{ Region: c.Region, AccessKey: c.AccessKey, @@ -135,16 +138,8 @@ func (c *CloudWatchLogs) getDest(t Target, logSrc logs.LogSrc) *cwDest { Filename: c.Filename, Token: c.Token, } - es := entitystore.GetEntityStore() - if es != nil && !es.NativeCredentialExists() { - es.SetNativeCredential(credentialConfig.Credentials()) - } - if cwd, ok := c.cwDests[t]; ok { - return cwd - } logThrottleRetryer := retryer.NewLogThrottleRetryer(c.Log) - client := cloudwatchlogs.New( credentialConfig.Credentials(), &aws.Config{ diff --git a/test/compass/compass_test.go b/test/compass/compass_test.go index 711674c505..6343ef05e7 100644 --- a/test/compass/compass_test.go +++ b/test/compass/compass_test.go @@ -10,18 +10,15 @@ import ( "log" "os" "path/filepath" - "strings" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" cwlTypes "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/google/uuid" "github.com/stretchr/testify/assert" @@ -44,18 +41,15 @@ const ( entityName = "@entity.KeyAttributes.Name" entityEnvironment = "@entity.KeyAttributes.Environment" - entityPlatform = "@entity.Attributes.PlatformType" - entityInstanceId = "@entity.Attributes.EC2.InstanceId" - credsDir = "/tmp/.aws" - testAccountRoleArn = "arn:aws:iam::874389809020:role/CloudWatchAgentServerRole" + entityPlatform = "@entity.Attributes.PlatformType" + entityInstanceId = "@entity.Attributes.EC2.InstanceId" ) var ( - logLineIds = []string{logLineId1, logLineId2} - rnf *cwlTypes.ResourceNotFoundException - cwlClient *cloudwatchlogs.Client - crossAccountLogClient *cloudwatchlogs.Client - ec2Client *ec2.Client + logLineIds = []string{logLineId1, logLineId2} + rnf *cwlTypes.ResourceNotFoundException + cwlClient *cloudwatchlogs.Client + ec2Client *ec2.Client ) type expectedEntity struct { @@ -83,28 +77,6 @@ func init() { }) ec2Client = ec2.NewFromConfig(awsCfg) - // Initialize STS client for cross-account checks - stsClient := sts.NewFromConfig(awsCfg) - - // Assume the role in the target account - appCreds := stscreds.NewAssumeRoleProvider(stsClient, testAccountRoleArn) - - // Create a new configuration using the assumed role credentials - assumedCfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(iadRegionalCode), - config.WithCredentialsProvider( - appCreds, - ), - ) - if err != nil { - log.Fatalf("unable to load assumed role config, %v", err) - } - - // Create a CloudWatch Logs client with the assumed role credentials - crossAccountLogClient = cloudwatchlogs.NewFromConfig(assumedCfg, func(o *cloudwatchlogs.Options) { - o.BaseEndpoint = aws.String(cwlPerfEndpoint) - }) - } // TestWriteLogsToCloudWatch writes N number of logs, and then validates that the @@ -115,13 +87,7 @@ func TestWriteLogsToCloudWatch(t *testing.T) { instanceId := awsservice.GetInstanceId() log.Printf("Found instance id %s", instanceId) - err := ResetProfile() - // Truncate the common-config so we don't use the profile credential - if err != nil { - log.Fatalf("Error truncating file: %s", err) - } - - defer DeleteLogGroupAndStream(cwlClient, instanceId, instanceId) + defer awsservice.DeleteLogGroupAndStream(instanceId, instanceId) testCases := map[string]struct { agentConfigPath string @@ -166,7 +132,6 @@ func TestWriteLogsToCloudWatch(t *testing.T) { } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - if testCase.useEC2Tag { input := &ec2.CreateTagsInput{ Resources: []string{instanceId}, @@ -200,70 +165,7 @@ func TestWriteLogsToCloudWatch(t *testing.T) { end := time.Now() // check CWL to ensure we got the expected entities in the log group - ValidateEntity(t, cwlClient, instanceId, instanceId, &end, testCase.expectedEntity, false) - - f.Close() - os.Remove(logFilePath + "-" + id.String()) - }) - } -} - -// TestCrossAccount writes N number of logs, and then validates that the -// log events being sent to the other account are not associated with entity -func TestCrossAccount(t *testing.T) { - // this uses the {instance_id} placeholder in the agent configuration, - // so we need to determine the host's instance ID for validation - instanceId := awsservice.GetInstanceId() - log.Printf("Found instance id %s", instanceId) - defer DeleteLogGroupAndStream(crossAccountLogClient, instanceId, instanceId) - - testCases := map[string]struct { - agentConfigPath string - iterations int - setupFunction func() error - entityFieldsShouldMiss bool - expectedEntity expectedEntity - }{ - "Compass/RoleArnCrossAccount": { - agentConfigPath: filepath.Join("resources", "compass_role_arn_check.json"), - entityFieldsShouldMiss: true, - setupFunction: SetupRoleArnCredential, - iterations: 1000, - }, - "Compass/ProfileCrossAccount": { - agentConfigPath: filepath.Join("resources", "compass_default_log.json"), - entityFieldsShouldMiss: true, - setupFunction: SetupProfileCredential, - iterations: 1000, - }, - } - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - err := testCase.setupFunction() - if err != nil { - t.Fatalf("Error setting up cross-account credential: %v", err) - } - id := uuid.New() - f, err := os.Create(logFilePath + "-" + id.String()) - if err != nil { - t.Fatalf("Error occurred creating log file for writing: %v", err) - } - common.DeleteFile(common.AgentLogFile) - common.TouchFile(common.AgentLogFile) - - common.CopyFile(testCase.agentConfigPath, configOutputPath) - - common.StartAgent(configOutputPath, true, false) - - // ensure that there is enough time from the "start" time and the first log line, - // so we don't miss it in the GetLogEvents call - writeLogLines(t, f, testCase.iterations) - time.Sleep(sleepForFlush) - common.StopAgent() - end := time.Now() - - // check CWL to ensure we got the expected entities in the log group - ValidateEntity(t, crossAccountLogClient, instanceId, instanceId, &end, testCase.expectedEntity, testCase.entityFieldsShouldMiss) + ValidateEntity(t, instanceId, instanceId, &end, testCase.expectedEntity) f.Close() os.Remove(logFilePath + "-" + id.String()) @@ -271,61 +173,6 @@ func TestCrossAccount(t *testing.T) { } } -func SetupRoleArnCredential() error { - err := ResetProfile() - // Truncate the common-config so we don't use the profile credential - if err != nil { - return fmt.Errorf("error truncating file: %s", err) - } - log.Println("common-config has been emptied successfully") - - jsonPath := filepath.Join("resources", "compass_role_arn_check.json") - // Read the JSON file - fileContent, err := os.ReadFile(jsonPath) - if err != nil { - return fmt.Errorf("error reading file: %s", err) - } - // Convert the file content to a string - jsonString := string(fileContent) - - // Replace the placeholder with the actual role ARN - updatedJsonString := strings.ReplaceAll(jsonString, "{integ-test-role-arn}", testAccountRoleArn) - - // Write the updated JSON string back to the file - err = os.WriteFile(jsonPath, []byte(updatedJsonString), 0644) - if err != nil { - return fmt.Errorf("error writing file: %s", err) - } - - log.Println("Successfully updated the role ARN in the JSON file") - return nil -} - -func SetupProfileCredential() error { - err := common.RunCommands(profileSetupCommand(testAccountRoleArn)) - return err -} - -func ResetProfile() error { - err := common.RunCommands(profileResetCommand()) - // Truncate the common-config so we don't use the profile credential - return err -} - -func profileSetupCommand(roleArn string) []string { - return []string{ - "mkdir -p " + credsDir, - "printf '[default]\naws_access_key_id=%s\naws_secret_access_key=%s\naws_session_token=%s' $(aws sts assume-role --role-arn " + roleArn + " --role-session-name test --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]' --output text) | tee " + credsDir + "/credentials>/dev/null", - "printf '[credentials]\n shared_credential_profile = \"default\"\n shared_credential_file = \"" + credsDir + "/credentials\"' | sudo tee /opt/aws/amazon-cloudwatch-agent/etc/common-config.toml>/dev/null", - } -} - -func profileResetCommand() []string { - return []string{ - "sudo truncate -s 0 /opt/aws/amazon-cloudwatch-agent/etc/common-config.toml", - } -} - func writeLogLines(t *testing.T, f *os.File, iterations int) { log.Printf("Writing %d lines to %s", iterations*len(logLineIds), f.Name()) @@ -345,10 +192,10 @@ func writeLogLines(t *testing.T, f *os.File, iterations int) { // ValidateLogs queries a given LogGroup/LogStream combination given the start and end times, and executes an // arbitrary validator function on the found logs. -func ValidateEntity(t *testing.T, logClient *cloudwatchlogs.Client, logGroup, logStream string, end *time.Time, expectedEntity expectedEntity, entityFieldsShouldMiss bool) { +func ValidateEntity(t *testing.T, logGroup, logStream string, end *time.Time, expectedEntity expectedEntity) { log.Printf("Checking log group/stream: %s/%s", logGroup, logStream) - logGroupInfo, err := getLogGroup(logClient) + logGroupInfo, err := getLogGroup() for _, lg := range logGroupInfo { if *lg.LogGroupName == logGroup { log.Println("Log group " + *lg.LogGroupName + " exists") @@ -358,10 +205,10 @@ func ValidateEntity(t *testing.T, logClient *cloudwatchlogs.Client, logGroup, lo assert.NoError(t, err) begin := end.Add(-sleepForFlush * 2) log.Printf("Start time is " + begin.String() + " and end time is " + end.String()) - queryId, err := getLogQueryId(logClient, logGroup, &begin, end) + queryId, err := getLogQueryId(logGroup, &begin, end) assert.NoError(t, err) log.Printf("queryId is " + *queryId) - result, err := getQueryResult(logClient, queryId) + result, err := getQueryResult(queryId) assert.NoError(t, err) if !assert.NotZero(t, len(result)) { return @@ -394,20 +241,16 @@ func ValidateEntity(t *testing.T, logClient *cloudwatchlogs.Client, logGroup, lo } fmt.Printf("%s: %s\n", aws.ToString(field.Field), aws.ToString(field.Value)) } - entityFieldFoundCount := 0 + allEntityFieldsFound := true for _, value := range requiredEntityFields { - if value { - entityFieldFoundCount += 1 + if !value { + allEntityFieldsFound = false } } - if entityFieldsShouldMiss { - assert.Equal(t, 0, entityFieldFoundCount) - } else { - assert.Equal(t, 5, entityFieldFoundCount) - } + assert.True(t, allEntityFieldsFound) } -func getLogQueryId(logClient *cloudwatchlogs.Client, logGroup string, since, until *time.Time) (*string, error) { +func getLogQueryId(logGroup string, since, until *time.Time) (*string, error) { var queryId *string params := &cloudwatchlogs.StartQueryInput{ QueryString: aws.String("fields @message, @entity.KeyAttributes.Type, @entity.KeyAttributes.Name, @entity.KeyAttributes.Environment, @entity.Attributes.PlatformType, @entity.Attributes.EC2.InstanceId"), @@ -422,7 +265,7 @@ func getLogQueryId(logClient *cloudwatchlogs.Client, logGroup string, since, unt attempts := 0 for { - output, err := logClient.StartQuery(context.Background(), params) + output, err := cwlClient.StartQuery(context.Background(), params) attempts += 1 if err != nil { @@ -440,7 +283,7 @@ func getLogQueryId(logClient *cloudwatchlogs.Client, logGroup string, since, unt } } -func getQueryResult(logClient *cloudwatchlogs.Client, queryId *string) ([][]cwlTypes.ResultField, error) { +func getQueryResult(queryId *string) ([][]cwlTypes.ResultField, error) { attempts := 0 var results [][]cwlTypes.ResultField params := &cloudwatchlogs.GetQueryResultsInput{ @@ -450,7 +293,7 @@ func getQueryResult(logClient *cloudwatchlogs.Client, queryId *string) ([][]cwlT if attempts > awsservice.StandardRetries { return results, errors.New("exceeded retry count") } - result, err := logClient.GetQueryResults(context.Background(), params) + result, err := cwlClient.GetQueryResults(context.Background(), params) log.Printf("GetQueryResult status is: %v", result.Status) attempts += 1 if result.Status != cwlTypes.QueryStatusComplete { @@ -474,12 +317,12 @@ func getQueryResult(logClient *cloudwatchlogs.Client, queryId *string) ([][]cwlT } } -func getLogGroup(logClient *cloudwatchlogs.Client) ([]cwlTypes.LogGroup, error) { +func getLogGroup() ([]cwlTypes.LogGroup, error) { attempts := 0 var logGroups []cwlTypes.LogGroup params := &cloudwatchlogs.DescribeLogGroupsInput{} for { - output, err := logClient.DescribeLogGroups(context.Background(), params) + output, err := cwlClient.DescribeLogGroups(context.Background(), params) attempts += 1 @@ -497,31 +340,3 @@ func getLogGroup(logClient *cloudwatchlogs.Client) ([]cwlTypes.LogGroup, error) return logGroups, err } } - -// DeleteLogGroupAndStream cleans up a log group and stream by name. This gracefully handles -// ResourceNotFoundException errors from calling the APIs -func DeleteLogGroupAndStream(logClient *cloudwatchlogs.Client, logGroupName, logStreamName string) { - DeleteLogStream(logClient, logGroupName, logStreamName) - DeleteLogGroup(logClient, logGroupName) -} - -// DeleteLogStream cleans up log stream by name -func DeleteLogStream(logClient *cloudwatchlogs.Client, logGroupName, logStreamName string) { - _, err := logClient.DeleteLogStream(context.TODO(), &cloudwatchlogs.DeleteLogStreamInput{ - LogGroupName: aws.String(logGroupName), - LogStreamName: aws.String(logStreamName), - }) - if err != nil && !errors.As(err, &rnf) { - log.Printf("Error occurred while deleting log stream %s: %v", logStreamName, err) - } -} - -// DeleteLogGroup cleans up log group by name -func DeleteLogGroup(logClient *cloudwatchlogs.Client, logGroupName string) { - _, err := logClient.DeleteLogGroup(context.TODO(), &cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String(logGroupName), - }) - if err != nil && !errors.As(err, &rnf) { - log.Printf("Error occurred while deleting log group %s: %v", logGroupName, err) - } -} diff --git a/test/compass/resources/compass_role_arn_check.json b/test/compass/resources/compass_role_arn_check.json deleted file mode 100644 index 90b1eaaabb..0000000000 --- a/test/compass/resources/compass_role_arn_check.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "agent": { - "run_as_user": "root", - "debug": true, - "credentials": { - "role_arn": "{integ-test-role-arn}" - } - }, - "logs": { - "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", - "logs_collected": { - "files": { - "collect_list": [ - { - "file_path": "/tmp/cwagent_log_test.log*", - "log_group_name": "{instance_id}", - "log_stream_name": "{instance_id}", - "timezone": "UTC" - } - ] - } - } - } -} From e39b118492cdee42f623192715d70dbe06411cc0 Mon Sep 17 00:00:00 2001 From: Chad Patel Date: Wed, 18 Sep 2024 14:04:40 -0500 Subject: [PATCH 08/47] Add entity cloudwatch sdk v1 changes to amazon-cloudwatch-agent (#802) --- plugins/outputs/cloudwatch/aggregator.go | 3 +- plugins/outputs/cloudwatch/aggregator_test.go | 2 +- plugins/outputs/cloudwatch/cloudwatch.go | 4 +- plugins/outputs/cloudwatch/cloudwatch_test.go | 4 +- plugins/outputs/cloudwatch/convert_otel.go | 2 +- plugins/outputs/cloudwatch/util.go | 3 +- plugins/outputs/cloudwatch/util_test.go | 2 +- sdk/service/cloudwatch/api.go | 12746 ++++++++++++++++ .../cloudwatch/cloudwatchiface/interface.go | 239 + sdk/service/cloudwatch/doc.go | 43 + sdk/service/cloudwatch/errors.go | 84 + sdk/service/cloudwatch/integ_test.go | 70 + sdk/service/cloudwatch/service.go | 104 + sdk/service/cloudwatch/waiters.go | 102 + 14 files changed, 13397 insertions(+), 11 deletions(-) create mode 100644 sdk/service/cloudwatch/api.go create mode 100644 sdk/service/cloudwatch/cloudwatchiface/interface.go create mode 100644 sdk/service/cloudwatch/doc.go create mode 100644 sdk/service/cloudwatch/errors.go create mode 100644 sdk/service/cloudwatch/integ_test.go create mode 100644 sdk/service/cloudwatch/service.go create mode 100644 sdk/service/cloudwatch/waiters.go diff --git a/plugins/outputs/cloudwatch/aggregator.go b/plugins/outputs/cloudwatch/aggregator.go index 43c0a253c7..3295f93d87 100644 --- a/plugins/outputs/cloudwatch/aggregator.go +++ b/plugins/outputs/cloudwatch/aggregator.go @@ -11,9 +11,8 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/amazon-cloudwatch-agent/metric/distribution" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" ) const ( diff --git a/plugins/outputs/cloudwatch/aggregator_test.go b/plugins/outputs/cloudwatch/aggregator_test.go index c41af65677..c504f9c692 100644 --- a/plugins/outputs/cloudwatch/aggregator_test.go +++ b/plugins/outputs/cloudwatch/aggregator_test.go @@ -9,11 +9,11 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/stretchr/testify/assert" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" "github.com/aws/amazon-cloudwatch-agent/metric/distribution/seh1" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" ) var wg sync.WaitGroup diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 4cc9a4276d..e81605edfe 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -14,8 +14,6 @@ import ( "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" @@ -32,6 +30,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/internal/retryer" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch/cloudwatchiface" ) const ( diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index b3aada3801..2c3ea73a22 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -17,8 +17,6 @@ import ( "github.com/amazon-contributing/opentelemetry-collector-contrib/extension/awsmiddleware" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/stretchr/testify/assert" @@ -29,6 +27,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/internal/publisher" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch/cloudwatchiface" ) // Return true if found. diff --git a/plugins/outputs/cloudwatch/convert_otel.go b/plugins/outputs/cloudwatch/convert_otel.go index f9471c309c..6df75a36bc 100644 --- a/plugins/outputs/cloudwatch/convert_otel.go +++ b/plugins/outputs/cloudwatch/convert_otel.go @@ -9,12 +9,12 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" cloudwatchutil "github.com/aws/amazon-cloudwatch-agent/internal/cloudwatch" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" ) // ConvertOtelDimensions will returns a sorted list of dimensions. diff --git a/plugins/outputs/cloudwatch/util.go b/plugins/outputs/cloudwatch/util.go index 8ffc81c325..805c22bb82 100644 --- a/plugins/outputs/cloudwatch/util.go +++ b/plugins/outputs/cloudwatch/util.go @@ -9,11 +9,10 @@ import ( "sort" "time" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/amazon-cloudwatch-agent/metric/distribution" "github.com/aws/amazon-cloudwatch-agent/metric/distribution/regular" "github.com/aws/amazon-cloudwatch-agent/metric/distribution/seh1" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" ) const ( diff --git a/plugins/outputs/cloudwatch/util_test.go b/plugins/outputs/cloudwatch/util_test.go index 8f022b736c..36acc387cd 100644 --- a/plugins/outputs/cloudwatch/util_test.go +++ b/plugins/outputs/cloudwatch/util_test.go @@ -10,12 +10,12 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/stretchr/testify/assert" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" "github.com/aws/amazon-cloudwatch-agent/metric/distribution/regular" "github.com/aws/amazon-cloudwatch-agent/metric/distribution/seh1" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" ) func TestPublishJitter(t *testing.T) { diff --git a/sdk/service/cloudwatch/api.go b/sdk/service/cloudwatch/api.go new file mode 100644 index 0000000000..db2109d91f --- /dev/null +++ b/sdk/service/cloudwatch/api.go @@ -0,0 +1,12746 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatch + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opDeleteAlarms = "DeleteAlarms" + +// DeleteAlarmsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlarms operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAlarms for more information on using the DeleteAlarms +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteAlarmsRequest method. +// req, resp := client.DeleteAlarmsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteAlarms +func (c *CloudWatch) DeleteAlarmsRequest(input *DeleteAlarmsInput) (req *request.Request, output *DeleteAlarmsOutput) { + op := &request.Operation{ + Name: opDeleteAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAlarmsInput{} + } + + output = &DeleteAlarmsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAlarms API operation for Amazon CloudWatch. +// +// Deletes the specified alarms. You can delete up to 100 alarms in one operation. +// However, this total can include no more than one composite alarm. For example, +// you could delete 99 metric alarms and one composite alarms with one operation, +// but you can't delete two composite alarms with one operation. +// +// If you specify an incorrect alarm name or make any other error in the operation, +// no alarms are deleted. To confirm that alarms were deleted successfully, +// you can use the DescribeAlarms (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html) +// operation after using DeleteAlarms. +// +// It is possible to create a loop or cycle of composite alarms, where composite +// alarm A depends on composite alarm B, and composite alarm B also depends +// on composite alarm A. In this scenario, you can't delete any composite alarm +// that is part of the cycle because there is always still a composite alarm +// that depends on that alarm that you want to delete. +// +// To get out of such a situation, you must break the cycle by changing the +// rule of one of the composite alarms in the cycle to remove a dependency that +// creates the cycle. The simplest change to make to break a cycle is to change +// the AlarmRule of one of the alarms to false. +// +// Additionally, the evaluation of composite alarms stops if CloudWatch detects +// a cycle in the evaluation path. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DeleteAlarms for usage and error information. +// +// Returned Error Codes: +// - ErrCodeResourceNotFound "ResourceNotFound" +// The named resource does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteAlarms +func (c *CloudWatch) DeleteAlarms(input *DeleteAlarmsInput) (*DeleteAlarmsOutput, error) { + req, out := c.DeleteAlarmsRequest(input) + return out, req.Send() +} + +// DeleteAlarmsWithContext is the same as DeleteAlarms with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAlarms for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DeleteAlarmsWithContext(ctx aws.Context, input *DeleteAlarmsInput, opts ...request.Option) (*DeleteAlarmsOutput, error) { + req, out := c.DeleteAlarmsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAnomalyDetector = "DeleteAnomalyDetector" + +// DeleteAnomalyDetectorRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAnomalyDetector operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAnomalyDetector for more information on using the DeleteAnomalyDetector +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteAnomalyDetectorRequest method. +// req, resp := client.DeleteAnomalyDetectorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteAnomalyDetector +func (c *CloudWatch) DeleteAnomalyDetectorRequest(input *DeleteAnomalyDetectorInput) (req *request.Request, output *DeleteAnomalyDetectorOutput) { + op := &request.Operation{ + Name: opDeleteAnomalyDetector, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAnomalyDetectorInput{} + } + + output = &DeleteAnomalyDetectorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAnomalyDetector API operation for Amazon CloudWatch. +// +// Deletes the specified anomaly detection model from your account. For more +// information about how to delete an anomaly detection model, see Deleting +// an anomaly detection model (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Anomaly_Detection_Alarm.html#Delete_Anomaly_Detection_Model) +// in the CloudWatch User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DeleteAnomalyDetector for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The named resource does not exist. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Parameters were used together that cannot be used together. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteAnomalyDetector +func (c *CloudWatch) DeleteAnomalyDetector(input *DeleteAnomalyDetectorInput) (*DeleteAnomalyDetectorOutput, error) { + req, out := c.DeleteAnomalyDetectorRequest(input) + return out, req.Send() +} + +// DeleteAnomalyDetectorWithContext is the same as DeleteAnomalyDetector with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAnomalyDetector for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DeleteAnomalyDetectorWithContext(ctx aws.Context, input *DeleteAnomalyDetectorInput, opts ...request.Option) (*DeleteAnomalyDetectorOutput, error) { + req, out := c.DeleteAnomalyDetectorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteDashboards = "DeleteDashboards" + +// DeleteDashboardsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDashboards operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDashboards for more information on using the DeleteDashboards +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteDashboardsRequest method. +// req, resp := client.DeleteDashboardsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteDashboards +func (c *CloudWatch) DeleteDashboardsRequest(input *DeleteDashboardsInput) (req *request.Request, output *DeleteDashboardsOutput) { + op := &request.Operation{ + Name: opDeleteDashboards, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDashboardsInput{} + } + + output = &DeleteDashboardsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDashboards API operation for Amazon CloudWatch. +// +// Deletes all dashboards that you specify. You can specify up to 100 dashboards +// to delete. If there is an error during this call, no dashboards are deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DeleteDashboards for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeDashboardNotFoundError "ResourceNotFound" +// The specified dashboard does not exist. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteDashboards +func (c *CloudWatch) DeleteDashboards(input *DeleteDashboardsInput) (*DeleteDashboardsOutput, error) { + req, out := c.DeleteDashboardsRequest(input) + return out, req.Send() +} + +// DeleteDashboardsWithContext is the same as DeleteDashboards with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDashboards for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DeleteDashboardsWithContext(ctx aws.Context, input *DeleteDashboardsInput, opts ...request.Option) (*DeleteDashboardsOutput, error) { + req, out := c.DeleteDashboardsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteInsightRules = "DeleteInsightRules" + +// DeleteInsightRulesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInsightRules operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInsightRules for more information on using the DeleteInsightRules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteInsightRulesRequest method. +// req, resp := client.DeleteInsightRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteInsightRules +func (c *CloudWatch) DeleteInsightRulesRequest(input *DeleteInsightRulesInput) (req *request.Request, output *DeleteInsightRulesOutput) { + op := &request.Operation{ + Name: opDeleteInsightRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInsightRulesInput{} + } + + output = &DeleteInsightRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteInsightRules API operation for Amazon CloudWatch. +// +// Permanently deletes the specified Contributor Insights rules. +// +// If you create a rule, delete it, and then re-create it with the same name, +// historical data from the first time the rule was created might not be available. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DeleteInsightRules for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteInsightRules +func (c *CloudWatch) DeleteInsightRules(input *DeleteInsightRulesInput) (*DeleteInsightRulesOutput, error) { + req, out := c.DeleteInsightRulesRequest(input) + return out, req.Send() +} + +// DeleteInsightRulesWithContext is the same as DeleteInsightRules with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInsightRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DeleteInsightRulesWithContext(ctx aws.Context, input *DeleteInsightRulesInput, opts ...request.Option) (*DeleteInsightRulesOutput, error) { + req, out := c.DeleteInsightRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteMetricStream = "DeleteMetricStream" + +// DeleteMetricStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMetricStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteMetricStream for more information on using the DeleteMetricStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteMetricStreamRequest method. +// req, resp := client.DeleteMetricStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteMetricStream +func (c *CloudWatch) DeleteMetricStreamRequest(input *DeleteMetricStreamInput) (req *request.Request, output *DeleteMetricStreamOutput) { + op := &request.Operation{ + Name: opDeleteMetricStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMetricStreamInput{} + } + + output = &DeleteMetricStreamOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteMetricStream API operation for Amazon CloudWatch. +// +// Permanently deletes the metric stream that you specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DeleteMetricStream for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DeleteMetricStream +func (c *CloudWatch) DeleteMetricStream(input *DeleteMetricStreamInput) (*DeleteMetricStreamOutput, error) { + req, out := c.DeleteMetricStreamRequest(input) + return out, req.Send() +} + +// DeleteMetricStreamWithContext is the same as DeleteMetricStream with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteMetricStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DeleteMetricStreamWithContext(ctx aws.Context, input *DeleteMetricStreamInput, opts ...request.Option) (*DeleteMetricStreamOutput, error) { + req, out := c.DeleteMetricStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAlarmHistory = "DescribeAlarmHistory" + +// DescribeAlarmHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarmHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAlarmHistory for more information on using the DescribeAlarmHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeAlarmHistoryRequest method. +// req, resp := client.DescribeAlarmHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAlarmHistory +func (c *CloudWatch) DescribeAlarmHistoryRequest(input *DescribeAlarmHistoryInput) (req *request.Request, output *DescribeAlarmHistoryOutput) { + op := &request.Operation{ + Name: opDescribeAlarmHistory, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmHistoryInput{} + } + + output = &DescribeAlarmHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAlarmHistory API operation for Amazon CloudWatch. +// +// Retrieves the history for the specified alarm. You can filter the results +// by date range or item type. If an alarm name is not specified, the histories +// for either all metric alarms or all composite alarms are returned. +// +// CloudWatch retains the history of an alarm even if you delete the alarm. +// +// To use this operation and return information about a composite alarm, you +// must be signed on with the cloudwatch:DescribeAlarmHistory permission that +// is scoped to *. You can't return information about composite alarms if your +// cloudwatch:DescribeAlarmHistory permission has a narrower scope. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DescribeAlarmHistory for usage and error information. +// +// Returned Error Codes: +// - ErrCodeInvalidNextToken "InvalidNextToken" +// The next token specified is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAlarmHistory +func (c *CloudWatch) DescribeAlarmHistory(input *DescribeAlarmHistoryInput) (*DescribeAlarmHistoryOutput, error) { + req, out := c.DescribeAlarmHistoryRequest(input) + return out, req.Send() +} + +// DescribeAlarmHistoryWithContext is the same as DescribeAlarmHistory with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAlarmHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeAlarmHistoryWithContext(ctx aws.Context, input *DescribeAlarmHistoryInput, opts ...request.Option) (*DescribeAlarmHistoryOutput, error) { + req, out := c.DescribeAlarmHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAlarmHistoryPages iterates over the pages of a DescribeAlarmHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAlarmHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAlarmHistory operation. +// pageNum := 0 +// err := client.DescribeAlarmHistoryPages(params, +// func(page *cloudwatch.DescribeAlarmHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatch) DescribeAlarmHistoryPages(input *DescribeAlarmHistoryInput, fn func(*DescribeAlarmHistoryOutput, bool) bool) error { + return c.DescribeAlarmHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAlarmHistoryPagesWithContext same as DescribeAlarmHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeAlarmHistoryPagesWithContext(ctx aws.Context, input *DescribeAlarmHistoryInput, fn func(*DescribeAlarmHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAlarmHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAlarmHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAlarmHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeAlarms = "DescribeAlarms" + +// DescribeAlarmsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarms operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAlarms for more information on using the DescribeAlarms +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeAlarmsRequest method. +// req, resp := client.DescribeAlarmsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAlarms +func (c *CloudWatch) DescribeAlarmsRequest(input *DescribeAlarmsInput) (req *request.Request, output *DescribeAlarmsOutput) { + op := &request.Operation{ + Name: opDescribeAlarms, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAlarmsInput{} + } + + output = &DescribeAlarmsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAlarms API operation for Amazon CloudWatch. +// +// Retrieves the specified alarms. You can filter the results by specifying +// a prefix for the alarm name, the alarm state, or a prefix for any action. +// +// To use this operation and return information about composite alarms, you +// must be signed on with the cloudwatch:DescribeAlarms permission that is scoped +// to *. You can't return information about composite alarms if your cloudwatch:DescribeAlarms +// permission has a narrower scope. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DescribeAlarms for usage and error information. +// +// Returned Error Codes: +// - ErrCodeInvalidNextToken "InvalidNextToken" +// The next token specified is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAlarms +func (c *CloudWatch) DescribeAlarms(input *DescribeAlarmsInput) (*DescribeAlarmsOutput, error) { + req, out := c.DescribeAlarmsRequest(input) + return out, req.Send() +} + +// DescribeAlarmsWithContext is the same as DescribeAlarms with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAlarms for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeAlarmsWithContext(ctx aws.Context, input *DescribeAlarmsInput, opts ...request.Option) (*DescribeAlarmsOutput, error) { + req, out := c.DescribeAlarmsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAlarmsPages iterates over the pages of a DescribeAlarms operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAlarms method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAlarms operation. +// pageNum := 0 +// err := client.DescribeAlarmsPages(params, +// func(page *cloudwatch.DescribeAlarmsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatch) DescribeAlarmsPages(input *DescribeAlarmsInput, fn func(*DescribeAlarmsOutput, bool) bool) error { + return c.DescribeAlarmsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAlarmsPagesWithContext same as DescribeAlarmsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeAlarmsPagesWithContext(ctx aws.Context, input *DescribeAlarmsInput, fn func(*DescribeAlarmsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAlarmsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAlarmsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAlarmsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeAlarmsForMetric = "DescribeAlarmsForMetric" + +// DescribeAlarmsForMetricRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarmsForMetric operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAlarmsForMetric for more information on using the DescribeAlarmsForMetric +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeAlarmsForMetricRequest method. +// req, resp := client.DescribeAlarmsForMetricRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAlarmsForMetric +func (c *CloudWatch) DescribeAlarmsForMetricRequest(input *DescribeAlarmsForMetricInput) (req *request.Request, output *DescribeAlarmsForMetricOutput) { + op := &request.Operation{ + Name: opDescribeAlarmsForMetric, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAlarmsForMetricInput{} + } + + output = &DescribeAlarmsForMetricOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAlarmsForMetric API operation for Amazon CloudWatch. +// +// Retrieves the alarms for the specified metric. To filter the results, specify +// a statistic, period, or unit. +// +// This operation retrieves only standard alarms that are based on the specified +// metric. It does not return alarms based on math expressions that use the +// specified metric, or composite alarms that use the specified metric. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DescribeAlarmsForMetric for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAlarmsForMetric +func (c *CloudWatch) DescribeAlarmsForMetric(input *DescribeAlarmsForMetricInput) (*DescribeAlarmsForMetricOutput, error) { + req, out := c.DescribeAlarmsForMetricRequest(input) + return out, req.Send() +} + +// DescribeAlarmsForMetricWithContext is the same as DescribeAlarmsForMetric with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAlarmsForMetric for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeAlarmsForMetricWithContext(ctx aws.Context, input *DescribeAlarmsForMetricInput, opts ...request.Option) (*DescribeAlarmsForMetricOutput, error) { + req, out := c.DescribeAlarmsForMetricRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAnomalyDetectors = "DescribeAnomalyDetectors" + +// DescribeAnomalyDetectorsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAnomalyDetectors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAnomalyDetectors for more information on using the DescribeAnomalyDetectors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeAnomalyDetectorsRequest method. +// req, resp := client.DescribeAnomalyDetectorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAnomalyDetectors +func (c *CloudWatch) DescribeAnomalyDetectorsRequest(input *DescribeAnomalyDetectorsInput) (req *request.Request, output *DescribeAnomalyDetectorsOutput) { + op := &request.Operation{ + Name: opDescribeAnomalyDetectors, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeAnomalyDetectorsInput{} + } + + output = &DescribeAnomalyDetectorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAnomalyDetectors API operation for Amazon CloudWatch. +// +// Lists the anomaly detection models that you have created in your account. +// For single metric anomaly detectors, you can list all of the models in your +// account or filter the results to only the models that are related to a certain +// namespace, metric name, or metric dimension. For metric math anomaly detectors, +// you can list them by adding METRIC_MATH to the AnomalyDetectorTypes array. +// This will return all metric math anomaly detectors in your account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DescribeAnomalyDetectors for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidNextToken "InvalidNextToken" +// The next token specified is invalid. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Parameters were used together that cannot be used together. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeAnomalyDetectors +func (c *CloudWatch) DescribeAnomalyDetectors(input *DescribeAnomalyDetectorsInput) (*DescribeAnomalyDetectorsOutput, error) { + req, out := c.DescribeAnomalyDetectorsRequest(input) + return out, req.Send() +} + +// DescribeAnomalyDetectorsWithContext is the same as DescribeAnomalyDetectors with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAnomalyDetectors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeAnomalyDetectorsWithContext(ctx aws.Context, input *DescribeAnomalyDetectorsInput, opts ...request.Option) (*DescribeAnomalyDetectorsOutput, error) { + req, out := c.DescribeAnomalyDetectorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeAnomalyDetectorsPages iterates over the pages of a DescribeAnomalyDetectors operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAnomalyDetectors method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAnomalyDetectors operation. +// pageNum := 0 +// err := client.DescribeAnomalyDetectorsPages(params, +// func(page *cloudwatch.DescribeAnomalyDetectorsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatch) DescribeAnomalyDetectorsPages(input *DescribeAnomalyDetectorsInput, fn func(*DescribeAnomalyDetectorsOutput, bool) bool) error { + return c.DescribeAnomalyDetectorsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAnomalyDetectorsPagesWithContext same as DescribeAnomalyDetectorsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeAnomalyDetectorsPagesWithContext(ctx aws.Context, input *DescribeAnomalyDetectorsInput, fn func(*DescribeAnomalyDetectorsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAnomalyDetectorsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAnomalyDetectorsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAnomalyDetectorsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInsightRules = "DescribeInsightRules" + +// DescribeInsightRulesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInsightRules operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInsightRules for more information on using the DescribeInsightRules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeInsightRulesRequest method. +// req, resp := client.DescribeInsightRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeInsightRules +func (c *CloudWatch) DescribeInsightRulesRequest(input *DescribeInsightRulesInput) (req *request.Request, output *DescribeInsightRulesOutput) { + op := &request.Operation{ + Name: opDescribeInsightRules, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInsightRulesInput{} + } + + output = &DescribeInsightRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInsightRules API operation for Amazon CloudWatch. +// +// Returns a list of all the Contributor Insights rules in your account. +// +// For more information about Contributor Insights, see Using Contributor Insights +// to Analyze High-Cardinality Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DescribeInsightRules for usage and error information. +// +// Returned Error Codes: +// - ErrCodeInvalidNextToken "InvalidNextToken" +// The next token specified is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DescribeInsightRules +func (c *CloudWatch) DescribeInsightRules(input *DescribeInsightRulesInput) (*DescribeInsightRulesOutput, error) { + req, out := c.DescribeInsightRulesRequest(input) + return out, req.Send() +} + +// DescribeInsightRulesWithContext is the same as DescribeInsightRules with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInsightRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeInsightRulesWithContext(ctx aws.Context, input *DescribeInsightRulesInput, opts ...request.Option) (*DescribeInsightRulesOutput, error) { + req, out := c.DescribeInsightRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInsightRulesPages iterates over the pages of a DescribeInsightRules operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInsightRules method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInsightRules operation. +// pageNum := 0 +// err := client.DescribeInsightRulesPages(params, +// func(page *cloudwatch.DescribeInsightRulesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatch) DescribeInsightRulesPages(input *DescribeInsightRulesInput, fn func(*DescribeInsightRulesOutput, bool) bool) error { + return c.DescribeInsightRulesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInsightRulesPagesWithContext same as DescribeInsightRulesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DescribeInsightRulesPagesWithContext(ctx aws.Context, input *DescribeInsightRulesInput, fn func(*DescribeInsightRulesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInsightRulesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInsightRulesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInsightRulesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDisableAlarmActions = "DisableAlarmActions" + +// DisableAlarmActionsRequest generates a "aws/request.Request" representing the +// client's request for the DisableAlarmActions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableAlarmActions for more information on using the DisableAlarmActions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DisableAlarmActionsRequest method. +// req, resp := client.DisableAlarmActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DisableAlarmActions +func (c *CloudWatch) DisableAlarmActionsRequest(input *DisableAlarmActionsInput) (req *request.Request, output *DisableAlarmActionsOutput) { + op := &request.Operation{ + Name: opDisableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableAlarmActionsInput{} + } + + output = &DisableAlarmActionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisableAlarmActions API operation for Amazon CloudWatch. +// +// Disables the actions for the specified alarms. When an alarm's actions are +// disabled, the alarm actions do not execute when the alarm state changes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DisableAlarmActions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DisableAlarmActions +func (c *CloudWatch) DisableAlarmActions(input *DisableAlarmActionsInput) (*DisableAlarmActionsOutput, error) { + req, out := c.DisableAlarmActionsRequest(input) + return out, req.Send() +} + +// DisableAlarmActionsWithContext is the same as DisableAlarmActions with the addition of +// the ability to pass a context and additional request options. +// +// See DisableAlarmActions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DisableAlarmActionsWithContext(ctx aws.Context, input *DisableAlarmActionsInput, opts ...request.Option) (*DisableAlarmActionsOutput, error) { + req, out := c.DisableAlarmActionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisableInsightRules = "DisableInsightRules" + +// DisableInsightRulesRequest generates a "aws/request.Request" representing the +// client's request for the DisableInsightRules operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableInsightRules for more information on using the DisableInsightRules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DisableInsightRulesRequest method. +// req, resp := client.DisableInsightRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DisableInsightRules +func (c *CloudWatch) DisableInsightRulesRequest(input *DisableInsightRulesInput) (req *request.Request, output *DisableInsightRulesOutput) { + op := &request.Operation{ + Name: opDisableInsightRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableInsightRulesInput{} + } + + output = &DisableInsightRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisableInsightRules API operation for Amazon CloudWatch. +// +// Disables the specified Contributor Insights rules. When rules are disabled, +// they do not analyze log groups and do not incur costs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation DisableInsightRules for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/DisableInsightRules +func (c *CloudWatch) DisableInsightRules(input *DisableInsightRulesInput) (*DisableInsightRulesOutput, error) { + req, out := c.DisableInsightRulesRequest(input) + return out, req.Send() +} + +// DisableInsightRulesWithContext is the same as DisableInsightRules with the addition of +// the ability to pass a context and additional request options. +// +// See DisableInsightRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) DisableInsightRulesWithContext(ctx aws.Context, input *DisableInsightRulesInput, opts ...request.Option) (*DisableInsightRulesOutput, error) { + req, out := c.DisableInsightRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableAlarmActions = "EnableAlarmActions" + +// EnableAlarmActionsRequest generates a "aws/request.Request" representing the +// client's request for the EnableAlarmActions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableAlarmActions for more information on using the EnableAlarmActions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the EnableAlarmActionsRequest method. +// req, resp := client.EnableAlarmActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/EnableAlarmActions +func (c *CloudWatch) EnableAlarmActionsRequest(input *EnableAlarmActionsInput) (req *request.Request, output *EnableAlarmActionsOutput) { + op := &request.Operation{ + Name: opEnableAlarmActions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableAlarmActionsInput{} + } + + output = &EnableAlarmActionsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// EnableAlarmActions API operation for Amazon CloudWatch. +// +// Enables the actions for the specified alarms. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation EnableAlarmActions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/EnableAlarmActions +func (c *CloudWatch) EnableAlarmActions(input *EnableAlarmActionsInput) (*EnableAlarmActionsOutput, error) { + req, out := c.EnableAlarmActionsRequest(input) + return out, req.Send() +} + +// EnableAlarmActionsWithContext is the same as EnableAlarmActions with the addition of +// the ability to pass a context and additional request options. +// +// See EnableAlarmActions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) EnableAlarmActionsWithContext(ctx aws.Context, input *EnableAlarmActionsInput, opts ...request.Option) (*EnableAlarmActionsOutput, error) { + req, out := c.EnableAlarmActionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableInsightRules = "EnableInsightRules" + +// EnableInsightRulesRequest generates a "aws/request.Request" representing the +// client's request for the EnableInsightRules operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableInsightRules for more information on using the EnableInsightRules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the EnableInsightRulesRequest method. +// req, resp := client.EnableInsightRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/EnableInsightRules +func (c *CloudWatch) EnableInsightRulesRequest(input *EnableInsightRulesInput) (req *request.Request, output *EnableInsightRulesOutput) { + op := &request.Operation{ + Name: opEnableInsightRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableInsightRulesInput{} + } + + output = &EnableInsightRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// EnableInsightRules API operation for Amazon CloudWatch. +// +// Enables the specified Contributor Insights rules. When rules are enabled, +// they immediately begin analyzing log data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation EnableInsightRules for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeLimitExceededException "LimitExceededException" +// The operation exceeded one or more limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/EnableInsightRules +func (c *CloudWatch) EnableInsightRules(input *EnableInsightRulesInput) (*EnableInsightRulesOutput, error) { + req, out := c.EnableInsightRulesRequest(input) + return out, req.Send() +} + +// EnableInsightRulesWithContext is the same as EnableInsightRules with the addition of +// the ability to pass a context and additional request options. +// +// See EnableInsightRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) EnableInsightRulesWithContext(ctx aws.Context, input *EnableInsightRulesInput, opts ...request.Option) (*EnableInsightRulesOutput, error) { + req, out := c.EnableInsightRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDashboard = "GetDashboard" + +// GetDashboardRequest generates a "aws/request.Request" representing the +// client's request for the GetDashboard operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDashboard for more information on using the GetDashboard +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetDashboardRequest method. +// req, resp := client.GetDashboardRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetDashboard +func (c *CloudWatch) GetDashboardRequest(input *GetDashboardInput) (req *request.Request, output *GetDashboardOutput) { + op := &request.Operation{ + Name: opGetDashboard, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDashboardInput{} + } + + output = &GetDashboardOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDashboard API operation for Amazon CloudWatch. +// +// Displays the details of the dashboard that you specify. +// +// To copy an existing dashboard, use GetDashboard, and then use the data returned +// within DashboardBody as the template for the new dashboard when you call +// PutDashboard to create the copy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation GetDashboard for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeDashboardNotFoundError "ResourceNotFound" +// The specified dashboard does not exist. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetDashboard +func (c *CloudWatch) GetDashboard(input *GetDashboardInput) (*GetDashboardOutput, error) { + req, out := c.GetDashboardRequest(input) + return out, req.Send() +} + +// GetDashboardWithContext is the same as GetDashboard with the addition of +// the ability to pass a context and additional request options. +// +// See GetDashboard for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) GetDashboardWithContext(ctx aws.Context, input *GetDashboardInput, opts ...request.Option) (*GetDashboardOutput, error) { + req, out := c.GetDashboardRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetInsightRuleReport = "GetInsightRuleReport" + +// GetInsightRuleReportRequest generates a "aws/request.Request" representing the +// client's request for the GetInsightRuleReport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInsightRuleReport for more information on using the GetInsightRuleReport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetInsightRuleReportRequest method. +// req, resp := client.GetInsightRuleReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetInsightRuleReport +func (c *CloudWatch) GetInsightRuleReportRequest(input *GetInsightRuleReportInput) (req *request.Request, output *GetInsightRuleReportOutput) { + op := &request.Operation{ + Name: opGetInsightRuleReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInsightRuleReportInput{} + } + + output = &GetInsightRuleReportOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInsightRuleReport API operation for Amazon CloudWatch. +// +// This operation returns the time series data collected by a Contributor Insights +// rule. The data includes the identity and number of contributors to the log +// group. +// +// You can also optionally return one or more statistics about each data point +// in the time series. These statistics can include the following: +// +// - UniqueContributors -- the number of unique contributors for each data +// point. +// +// - MaxContributorValue -- the value of the top contributor for each data +// point. The identity of the contributor might change for each data point +// in the graph. If this rule aggregates by COUNT, the top contributor for +// each data point is the contributor with the most occurrences in that period. +// If the rule aggregates by SUM, the top contributor is the contributor +// with the highest sum in the log field specified by the rule's Value, during +// that period. +// +// - SampleCount -- the number of data points matched by the rule. +// +// - Sum -- the sum of the values from all contributors during the time period +// represented by that data point. +// +// - Minimum -- the minimum value from a single observation during the time +// period represented by that data point. +// +// - Maximum -- the maximum value from a single observation during the time +// period represented by that data point. +// +// - Average -- the average value from all contributors during the time period +// represented by that data point. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation GetInsightRuleReport for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The named resource does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetInsightRuleReport +func (c *CloudWatch) GetInsightRuleReport(input *GetInsightRuleReportInput) (*GetInsightRuleReportOutput, error) { + req, out := c.GetInsightRuleReportRequest(input) + return out, req.Send() +} + +// GetInsightRuleReportWithContext is the same as GetInsightRuleReport with the addition of +// the ability to pass a context and additional request options. +// +// See GetInsightRuleReport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) GetInsightRuleReportWithContext(ctx aws.Context, input *GetInsightRuleReportInput, opts ...request.Option) (*GetInsightRuleReportOutput, error) { + req, out := c.GetInsightRuleReportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMetricData = "GetMetricData" + +// GetMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the GetMetricData operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMetricData for more information on using the GetMetricData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetMetricDataRequest method. +// req, resp := client.GetMetricDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricData +func (c *CloudWatch) GetMetricDataRequest(input *GetMetricDataInput) (req *request.Request, output *GetMetricDataOutput) { + op := &request.Operation{ + Name: opGetMetricData, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxDatapoints", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetMetricDataInput{} + } + + output = &GetMetricDataOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMetricData API operation for Amazon CloudWatch. +// +// You can use the GetMetricData API to retrieve CloudWatch metric values. The +// operation can also include a CloudWatch Metrics Insights query, and one or +// more metric math functions. +// +// A GetMetricData operation that does not include a query can retrieve as many +// as 500 different metrics in a single request, with a total of as many as +// 100,800 data points. You can also optionally perform metric math expressions +// on the values of the returned statistics, to create new time series that +// represent new insights into your data. For example, using Lambda metrics, +// you could divide the Errors metric by the Invocations metric to get an error +// rate time series. For more information about metric math expressions, see +// Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax) +// in the Amazon CloudWatch User Guide. +// +// If you include a Metrics Insights query, each GetMetricData operation can +// include only one query. But the same GetMetricData operation can also retrieve +// other metrics. Metrics Insights queries can query only the most recent three +// hours of metric data. For more information about Metrics Insights, see Query +// your metrics with CloudWatch Metrics Insights (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/query_with_cloudwatch-metrics-insights.html). +// +// Calls to the GetMetricData API have a different pricing structure than calls +// to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch +// Pricing (https://aws.amazon.com/cloudwatch/pricing/). +// +// Amazon CloudWatch retains metric data as follows: +// +// - Data points with a period of less than 60 seconds are available for +// 3 hours. These data points are high-resolution metrics and are available +// only for custom metrics that have been defined with a StorageResolution +// of 1. +// +// - Data points with a period of 60 seconds (1-minute) are available for +// 15 days. +// +// - Data points with a period of 300 seconds (5-minute) are available for +// 63 days. +// +// - Data points with a period of 3600 seconds (1 hour) are available for +// 455 days (15 months). +// +// Data points that are initially published with a shorter period are aggregated +// together for long-term storage. For example, if you collect data using a +// period of 1 minute, the data remains available for 15 days with 1-minute +// resolution. After 15 days, this data is still available, but is aggregated +// and retrievable only with a resolution of 5 minutes. After 63 days, the data +// is further aggregated and is available with a resolution of 1 hour. +// +// If you omit Unit in your request, all data that was collected with any unit +// is returned, along with the corresponding units that were specified when +// the data was reported to CloudWatch. If you specify a unit, the operation +// returns only data that was collected with that unit specified. If you specify +// a unit that does not match the data collected, the results of the operation +// are null. CloudWatch does not perform unit conversions. +// +// # Using Metrics Insights queries with metric math +// +// You can't mix a Metric Insights query and metric math syntax in the same +// expression, but you can reference results from a Metrics Insights query within +// other Metric math expressions. A Metrics Insights query without a GROUP BY +// clause returns a single time-series (TS), and can be used as input for a +// metric math expression that expects a single time series. A Metrics Insights +// query with a GROUP BY clause returns an array of time-series (TS[]), and +// can be used as input for a metric math expression that expects an array of +// time series. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation GetMetricData for usage and error information. +// +// Returned Error Codes: +// - ErrCodeInvalidNextToken "InvalidNextToken" +// The next token specified is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricData +func (c *CloudWatch) GetMetricData(input *GetMetricDataInput) (*GetMetricDataOutput, error) { + req, out := c.GetMetricDataRequest(input) + return out, req.Send() +} + +// GetMetricDataWithContext is the same as GetMetricData with the addition of +// the ability to pass a context and additional request options. +// +// See GetMetricData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) GetMetricDataWithContext(ctx aws.Context, input *GetMetricDataInput, opts ...request.Option) (*GetMetricDataOutput, error) { + req, out := c.GetMetricDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetMetricDataPages iterates over the pages of a GetMetricData operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetMetricData method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetMetricData operation. +// pageNum := 0 +// err := client.GetMetricDataPages(params, +// func(page *cloudwatch.GetMetricDataOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatch) GetMetricDataPages(input *GetMetricDataInput, fn func(*GetMetricDataOutput, bool) bool) error { + return c.GetMetricDataPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetMetricDataPagesWithContext same as GetMetricDataPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) GetMetricDataPagesWithContext(ctx aws.Context, input *GetMetricDataInput, fn func(*GetMetricDataOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetMetricDataInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetMetricDataRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetMetricDataOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetMetricStatistics = "GetMetricStatistics" + +// GetMetricStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetMetricStatistics operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMetricStatistics for more information on using the GetMetricStatistics +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetMetricStatisticsRequest method. +// req, resp := client.GetMetricStatisticsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricStatistics +func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput) (req *request.Request, output *GetMetricStatisticsOutput) { + op := &request.Operation{ + Name: opGetMetricStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMetricStatisticsInput{} + } + + output = &GetMetricStatisticsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMetricStatistics API operation for Amazon CloudWatch. +// +// Gets statistics for the specified metric. +// +// The maximum number of data points returned from a single call is 1,440. If +// you request more than 1,440 data points, CloudWatch returns an error. To +// reduce the number of data points, you can narrow the specified time range +// and make multiple requests across adjacent time ranges, or you can increase +// the specified period. Data points are not returned in chronological order. +// +// CloudWatch aggregates data points based on the length of the period that +// you specify. For example, if you request statistics with a one-hour period, +// CloudWatch aggregates all data points with time stamps that fall within each +// one-hour period. Therefore, the number of values aggregated by CloudWatch +// is larger than the number of data points returned. +// +// CloudWatch needs raw data points to calculate percentile statistics. If you +// publish data using a statistic set instead, you can only retrieve percentile +// statistics for this data if one of the following conditions is true: +// +// - The SampleCount value of the statistic set is 1. +// +// - The Min and the Max values of the statistic set are equal. +// +// Percentile statistics are not available for metrics when any of the metric +// values are negative numbers. +// +// Amazon CloudWatch retains metric data as follows: +// +// - Data points with a period of less than 60 seconds are available for +// 3 hours. These data points are high-resolution metrics and are available +// only for custom metrics that have been defined with a StorageResolution +// of 1. +// +// - Data points with a period of 60 seconds (1-minute) are available for +// 15 days. +// +// - Data points with a period of 300 seconds (5-minute) are available for +// 63 days. +// +// - Data points with a period of 3600 seconds (1 hour) are available for +// 455 days (15 months). +// +// Data points that are initially published with a shorter period are aggregated +// together for long-term storage. For example, if you collect data using a +// period of 1 minute, the data remains available for 15 days with 1-minute +// resolution. After 15 days, this data is still available, but is aggregated +// and retrievable only with a resolution of 5 minutes. After 63 days, the data +// is further aggregated and is available with a resolution of 1 hour. +// +// CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, +// 2016. +// +// For information about metrics and dimensions supported by Amazon Web Services +// services, see the Amazon CloudWatch Metrics and Dimensions Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html) +// in the Amazon CloudWatch User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation GetMetricStatistics for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Parameters were used together that cannot be used together. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricStatistics +func (c *CloudWatch) GetMetricStatistics(input *GetMetricStatisticsInput) (*GetMetricStatisticsOutput, error) { + req, out := c.GetMetricStatisticsRequest(input) + return out, req.Send() +} + +// GetMetricStatisticsWithContext is the same as GetMetricStatistics with the addition of +// the ability to pass a context and additional request options. +// +// See GetMetricStatistics for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) GetMetricStatisticsWithContext(ctx aws.Context, input *GetMetricStatisticsInput, opts ...request.Option) (*GetMetricStatisticsOutput, error) { + req, out := c.GetMetricStatisticsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMetricStream = "GetMetricStream" + +// GetMetricStreamRequest generates a "aws/request.Request" representing the +// client's request for the GetMetricStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMetricStream for more information on using the GetMetricStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetMetricStreamRequest method. +// req, resp := client.GetMetricStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricStream +func (c *CloudWatch) GetMetricStreamRequest(input *GetMetricStreamInput) (req *request.Request, output *GetMetricStreamOutput) { + op := &request.Operation{ + Name: opGetMetricStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMetricStreamInput{} + } + + output = &GetMetricStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMetricStream API operation for Amazon CloudWatch. +// +// Returns information about the metric stream that you specify. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation GetMetricStream for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The named resource does not exist. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Parameters were used together that cannot be used together. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricStream +func (c *CloudWatch) GetMetricStream(input *GetMetricStreamInput) (*GetMetricStreamOutput, error) { + req, out := c.GetMetricStreamRequest(input) + return out, req.Send() +} + +// GetMetricStreamWithContext is the same as GetMetricStream with the addition of +// the ability to pass a context and additional request options. +// +// See GetMetricStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) GetMetricStreamWithContext(ctx aws.Context, input *GetMetricStreamInput, opts ...request.Option) (*GetMetricStreamOutput, error) { + req, out := c.GetMetricStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetMetricWidgetImage = "GetMetricWidgetImage" + +// GetMetricWidgetImageRequest generates a "aws/request.Request" representing the +// client's request for the GetMetricWidgetImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMetricWidgetImage for more information on using the GetMetricWidgetImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetMetricWidgetImageRequest method. +// req, resp := client.GetMetricWidgetImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricWidgetImage +func (c *CloudWatch) GetMetricWidgetImageRequest(input *GetMetricWidgetImageInput) (req *request.Request, output *GetMetricWidgetImageOutput) { + op := &request.Operation{ + Name: opGetMetricWidgetImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMetricWidgetImageInput{} + } + + output = &GetMetricWidgetImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMetricWidgetImage API operation for Amazon CloudWatch. +// +// You can use the GetMetricWidgetImage API to retrieve a snapshot graph of +// one or more Amazon CloudWatch metrics as a bitmap image. You can then embed +// this image into your services and products, such as wiki pages, reports, +// and documents. You could also retrieve images regularly, such as every minute, +// and create your own custom live dashboard. +// +// The graph you retrieve can include all CloudWatch metric graph features, +// including metric math and horizontal and vertical annotations. +// +// There is a limit of 20 transactions per second for this API. Each GetMetricWidgetImage +// action has the following limits: +// +// - As many as 100 metrics in the graph. +// +// - Up to 100 KB uncompressed payload. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation GetMetricWidgetImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/GetMetricWidgetImage +func (c *CloudWatch) GetMetricWidgetImage(input *GetMetricWidgetImageInput) (*GetMetricWidgetImageOutput, error) { + req, out := c.GetMetricWidgetImageRequest(input) + return out, req.Send() +} + +// GetMetricWidgetImageWithContext is the same as GetMetricWidgetImage with the addition of +// the ability to pass a context and additional request options. +// +// See GetMetricWidgetImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) GetMetricWidgetImageWithContext(ctx aws.Context, input *GetMetricWidgetImageInput, opts ...request.Option) (*GetMetricWidgetImageOutput, error) { + req, out := c.GetMetricWidgetImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDashboards = "ListDashboards" + +// ListDashboardsRequest generates a "aws/request.Request" representing the +// client's request for the ListDashboards operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDashboards for more information on using the ListDashboards +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListDashboardsRequest method. +// req, resp := client.ListDashboardsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/ListDashboards +func (c *CloudWatch) ListDashboardsRequest(input *ListDashboardsInput) (req *request.Request, output *ListDashboardsOutput) { + op := &request.Operation{ + Name: opListDashboards, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDashboardsInput{} + } + + output = &ListDashboardsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDashboards API operation for Amazon CloudWatch. +// +// Returns a list of the dashboards for your account. If you include DashboardNamePrefix, +// only those dashboards with names starting with the prefix are listed. Otherwise, +// all dashboards in your account are listed. +// +// ListDashboards returns up to 1000 results on one page. If there are more +// than 1000 dashboards, you can call ListDashboards again and include the value +// you received for NextToken in the first call, to receive the next 1000 results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation ListDashboards for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/ListDashboards +func (c *CloudWatch) ListDashboards(input *ListDashboardsInput) (*ListDashboardsOutput, error) { + req, out := c.ListDashboardsRequest(input) + return out, req.Send() +} + +// ListDashboardsWithContext is the same as ListDashboards with the addition of +// the ability to pass a context and additional request options. +// +// See ListDashboards for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) ListDashboardsWithContext(ctx aws.Context, input *ListDashboardsInput, opts ...request.Option) (*ListDashboardsOutput, error) { + req, out := c.ListDashboardsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDashboardsPages iterates over the pages of a ListDashboards operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDashboards method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDashboards operation. +// pageNum := 0 +// err := client.ListDashboardsPages(params, +// func(page *cloudwatch.ListDashboardsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatch) ListDashboardsPages(input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool) error { + return c.ListDashboardsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDashboardsPagesWithContext same as ListDashboardsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) ListDashboardsPagesWithContext(ctx aws.Context, input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDashboardsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDashboardsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDashboardsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListMetricStreams = "ListMetricStreams" + +// ListMetricStreamsRequest generates a "aws/request.Request" representing the +// client's request for the ListMetricStreams operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMetricStreams for more information on using the ListMetricStreams +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListMetricStreamsRequest method. +// req, resp := client.ListMetricStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/ListMetricStreams +func (c *CloudWatch) ListMetricStreamsRequest(input *ListMetricStreamsInput) (req *request.Request, output *ListMetricStreamsOutput) { + op := &request.Operation{ + Name: opListMetricStreams, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMetricStreamsInput{} + } + + output = &ListMetricStreamsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMetricStreams API operation for Amazon CloudWatch. +// +// Returns a list of metric streams in this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation ListMetricStreams for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidNextToken "InvalidNextToken" +// The next token specified is invalid. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/ListMetricStreams +func (c *CloudWatch) ListMetricStreams(input *ListMetricStreamsInput) (*ListMetricStreamsOutput, error) { + req, out := c.ListMetricStreamsRequest(input) + return out, req.Send() +} + +// ListMetricStreamsWithContext is the same as ListMetricStreams with the addition of +// the ability to pass a context and additional request options. +// +// See ListMetricStreams for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) ListMetricStreamsWithContext(ctx aws.Context, input *ListMetricStreamsInput, opts ...request.Option) (*ListMetricStreamsOutput, error) { + req, out := c.ListMetricStreamsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMetricStreamsPages iterates over the pages of a ListMetricStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMetricStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMetricStreams operation. +// pageNum := 0 +// err := client.ListMetricStreamsPages(params, +// func(page *cloudwatch.ListMetricStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatch) ListMetricStreamsPages(input *ListMetricStreamsInput, fn func(*ListMetricStreamsOutput, bool) bool) error { + return c.ListMetricStreamsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMetricStreamsPagesWithContext same as ListMetricStreamsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) ListMetricStreamsPagesWithContext(ctx aws.Context, input *ListMetricStreamsInput, fn func(*ListMetricStreamsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMetricStreamsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMetricStreamsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMetricStreamsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListMetrics = "ListMetrics" + +// ListMetricsRequest generates a "aws/request.Request" representing the +// client's request for the ListMetrics operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMetrics for more information on using the ListMetrics +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListMetricsRequest method. +// req, resp := client.ListMetricsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/ListMetrics +func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.Request, output *ListMetricsOutput) { + op := &request.Operation{ + Name: opListMetrics, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMetricsInput{} + } + + output = &ListMetricsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMetrics API operation for Amazon CloudWatch. +// +// List the specified metrics. You can use the returned metrics with GetMetricData +// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) +// to get statistical data. +// +// Up to 500 results are returned for any one call. To retrieve additional results, +// use the returned token with subsequent calls. +// +// After you create a metric, allow up to 15 minutes for the metric to appear. +// To see metric statistics sooner, use GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html). +// +// If you are using CloudWatch cross-account observability, you can use this +// operation in a monitoring account and view metrics from the linked source +// accounts. For more information, see CloudWatch cross-account observability +// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// +// ListMetrics doesn't return information about metrics if those metrics haven't +// reported data in the past two weeks. To retrieve those metrics, use GetMetricData +// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation ListMetrics for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/ListMetrics +func (c *CloudWatch) ListMetrics(input *ListMetricsInput) (*ListMetricsOutput, error) { + req, out := c.ListMetricsRequest(input) + return out, req.Send() +} + +// ListMetricsWithContext is the same as ListMetrics with the addition of +// the ability to pass a context and additional request options. +// +// See ListMetrics for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) ListMetricsWithContext(ctx aws.Context, input *ListMetricsInput, opts ...request.Option) (*ListMetricsOutput, error) { + req, out := c.ListMetricsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMetricsPages iterates over the pages of a ListMetrics operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMetrics method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMetrics operation. +// pageNum := 0 +// err := client.ListMetricsPages(params, +// func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *CloudWatch) ListMetricsPages(input *ListMetricsInput, fn func(*ListMetricsOutput, bool) bool) error { + return c.ListMetricsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMetricsPagesWithContext same as ListMetricsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) ListMetricsPagesWithContext(ctx aws.Context, input *ListMetricsInput, fn func(*ListMetricsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMetricsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMetricsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMetricsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/ListTagsForResource +func (c *CloudWatch) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon CloudWatch. +// +// Displays the tags associated with a CloudWatch resource. Currently, alarms +// and Contributor Insights rules support tagging. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The named resource does not exist. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/ListTagsForResource +func (c *CloudWatch) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutAnomalyDetector = "PutAnomalyDetector" + +// PutAnomalyDetectorRequest generates a "aws/request.Request" representing the +// client's request for the PutAnomalyDetector operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutAnomalyDetector for more information on using the PutAnomalyDetector +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutAnomalyDetectorRequest method. +// req, resp := client.PutAnomalyDetectorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutAnomalyDetector +func (c *CloudWatch) PutAnomalyDetectorRequest(input *PutAnomalyDetectorInput) (req *request.Request, output *PutAnomalyDetectorOutput) { + op := &request.Operation{ + Name: opPutAnomalyDetector, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAnomalyDetectorInput{} + } + + output = &PutAnomalyDetectorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutAnomalyDetector API operation for Amazon CloudWatch. +// +// Creates an anomaly detection model for a CloudWatch metric. You can use the +// model to display a band of expected normal values when the metric is graphed. +// +// If you have enabled unified cross-account observability, and this account +// is a monitoring account, the metric can be in the same account or a source +// account. You can specify the account ID in the object you specify in the +// SingleMetricAnomalyDetector parameter. +// +// For more information, see CloudWatch Anomaly Detection (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Anomaly_Detection.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation PutAnomalyDetector for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeLimitExceededException "LimitExceededException" +// The operation exceeded one or more limits. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Parameters were used together that cannot be used together. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutAnomalyDetector +func (c *CloudWatch) PutAnomalyDetector(input *PutAnomalyDetectorInput) (*PutAnomalyDetectorOutput, error) { + req, out := c.PutAnomalyDetectorRequest(input) + return out, req.Send() +} + +// PutAnomalyDetectorWithContext is the same as PutAnomalyDetector with the addition of +// the ability to pass a context and additional request options. +// +// See PutAnomalyDetector for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) PutAnomalyDetectorWithContext(ctx aws.Context, input *PutAnomalyDetectorInput, opts ...request.Option) (*PutAnomalyDetectorOutput, error) { + req, out := c.PutAnomalyDetectorRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutCompositeAlarm = "PutCompositeAlarm" + +// PutCompositeAlarmRequest generates a "aws/request.Request" representing the +// client's request for the PutCompositeAlarm operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutCompositeAlarm for more information on using the PutCompositeAlarm +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutCompositeAlarmRequest method. +// req, resp := client.PutCompositeAlarmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutCompositeAlarm +func (c *CloudWatch) PutCompositeAlarmRequest(input *PutCompositeAlarmInput) (req *request.Request, output *PutCompositeAlarmOutput) { + op := &request.Operation{ + Name: opPutCompositeAlarm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutCompositeAlarmInput{} + } + + output = &PutCompositeAlarmOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutCompositeAlarm API operation for Amazon CloudWatch. +// +// Creates or updates a composite alarm. When you create a composite alarm, +// you specify a rule expression for the alarm that takes into account the alarm +// states of other alarms that you have created. The composite alarm goes into +// ALARM state only if all conditions of the rule are met. +// +// The alarms specified in a composite alarm's rule expression can include metric +// alarms and other composite alarms. The rule expression of a composite alarm +// can include as many as 100 underlying alarms. Any single alarm can be included +// in the rule expressions of as many as 150 composite alarms. +// +// Using composite alarms can reduce alarm noise. You can create multiple metric +// alarms, and also create a composite alarm and set up alerts only for the +// composite alarm. For example, you could create a composite alarm that goes +// into ALARM state only when more than one of the underlying metric alarms +// are in ALARM state. +// +// Composite alarms can take the following actions: +// +// - Notify Amazon SNS topics. +// +// - Invoke Lambda functions. +// +// - Create OpsItems in Systems Manager Ops Center. +// +// - Create incidents in Systems Manager Incident Manager. +// +// It is possible to create a loop or cycle of composite alarms, where composite +// alarm A depends on composite alarm B, and composite alarm B also depends +// on composite alarm A. In this scenario, you can't delete any composite alarm +// that is part of the cycle because there is always still a composite alarm +// that depends on that alarm that you want to delete. +// +// To get out of such a situation, you must break the cycle by changing the +// rule of one of the composite alarms in the cycle to remove a dependency that +// creates the cycle. The simplest change to make to break a cycle is to change +// the AlarmRule of one of the alarms to false. +// +// Additionally, the evaluation of composite alarms stops if CloudWatch detects +// a cycle in the evaluation path. +// +// When this operation creates an alarm, the alarm state is immediately set +// to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. +// Any actions associated with the new state are then executed. For a composite +// alarm, this initial time after creation is the only time that the alarm can +// be in INSUFFICIENT_DATA state. +// +// When you update an existing alarm, its state is left unchanged, but the update +// completely overwrites the previous configuration of the alarm. +// +// To use this operation, you must be signed on with the cloudwatch:PutCompositeAlarm +// permission that is scoped to *. You can't create a composite alarms if your +// cloudwatch:PutCompositeAlarm permission has a narrower scope. +// +// If you are an IAM user, you must have iam:CreateServiceLinkedRole to create +// a composite alarm that has Systems Manager OpsItem actions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation PutCompositeAlarm for usage and error information. +// +// Returned Error Codes: +// - ErrCodeLimitExceededFault "LimitExceeded" +// The quota for alarms for this customer has already been reached. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutCompositeAlarm +func (c *CloudWatch) PutCompositeAlarm(input *PutCompositeAlarmInput) (*PutCompositeAlarmOutput, error) { + req, out := c.PutCompositeAlarmRequest(input) + return out, req.Send() +} + +// PutCompositeAlarmWithContext is the same as PutCompositeAlarm with the addition of +// the ability to pass a context and additional request options. +// +// See PutCompositeAlarm for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) PutCompositeAlarmWithContext(ctx aws.Context, input *PutCompositeAlarmInput, opts ...request.Option) (*PutCompositeAlarmOutput, error) { + req, out := c.PutCompositeAlarmRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutDashboard = "PutDashboard" + +// PutDashboardRequest generates a "aws/request.Request" representing the +// client's request for the PutDashboard operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDashboard for more information on using the PutDashboard +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDashboardRequest method. +// req, resp := client.PutDashboardRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutDashboard +func (c *CloudWatch) PutDashboardRequest(input *PutDashboardInput) (req *request.Request, output *PutDashboardOutput) { + op := &request.Operation{ + Name: opPutDashboard, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDashboardInput{} + } + + output = &PutDashboardOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutDashboard API operation for Amazon CloudWatch. +// +// Creates a dashboard if it does not already exist, or updates an existing +// dashboard. If you update a dashboard, the entire contents are replaced with +// what you specify here. +// +// All dashboards in your account are global, not region-specific. +// +// A simple way to create a dashboard using PutDashboard is to copy an existing +// dashboard. To copy an existing dashboard using the console, you can load +// the dashboard and then use the View/edit source command in the Actions menu +// to display the JSON block for that dashboard. Another way to copy a dashboard +// is to use GetDashboard, and then use the data returned within DashboardBody +// as the template for the new dashboard when you call PutDashboard. +// +// When you create a dashboard with PutDashboard, a good practice is to add +// a text widget at the top of the dashboard with a message that the dashboard +// was created by script and should not be changed in the console. This message +// could also point console users to the location of the DashboardBody script +// or the CloudFormation template used to create the dashboard. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation PutDashboard for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeDashboardInvalidInputError "InvalidParameterInput" +// Some part of the dashboard data is invalid. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutDashboard +func (c *CloudWatch) PutDashboard(input *PutDashboardInput) (*PutDashboardOutput, error) { + req, out := c.PutDashboardRequest(input) + return out, req.Send() +} + +// PutDashboardWithContext is the same as PutDashboard with the addition of +// the ability to pass a context and additional request options. +// +// See PutDashboard for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) PutDashboardWithContext(ctx aws.Context, input *PutDashboardInput, opts ...request.Option) (*PutDashboardOutput, error) { + req, out := c.PutDashboardRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutInsightRule = "PutInsightRule" + +// PutInsightRuleRequest generates a "aws/request.Request" representing the +// client's request for the PutInsightRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutInsightRule for more information on using the PutInsightRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutInsightRuleRequest method. +// req, resp := client.PutInsightRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutInsightRule +func (c *CloudWatch) PutInsightRuleRequest(input *PutInsightRuleInput) (req *request.Request, output *PutInsightRuleOutput) { + op := &request.Operation{ + Name: opPutInsightRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutInsightRuleInput{} + } + + output = &PutInsightRuleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutInsightRule API operation for Amazon CloudWatch. +// +// Creates a Contributor Insights rule. Rules evaluate log events in a CloudWatch +// Logs log group, enabling you to find contributor data for the log events +// in that log group. For more information, see Using Contributor Insights to +// Analyze High-Cardinality Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html). +// +// If you create a rule, delete it, and then re-create it with the same name, +// historical data from the first time the rule was created might not be available. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation PutInsightRule for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeLimitExceededException "LimitExceededException" +// The operation exceeded one or more limits. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutInsightRule +func (c *CloudWatch) PutInsightRule(input *PutInsightRuleInput) (*PutInsightRuleOutput, error) { + req, out := c.PutInsightRuleRequest(input) + return out, req.Send() +} + +// PutInsightRuleWithContext is the same as PutInsightRule with the addition of +// the ability to pass a context and additional request options. +// +// See PutInsightRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) PutInsightRuleWithContext(ctx aws.Context, input *PutInsightRuleInput, opts ...request.Option) (*PutInsightRuleOutput, error) { + req, out := c.PutInsightRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutMetricAlarm = "PutMetricAlarm" + +// PutMetricAlarmRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricAlarm operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutMetricAlarm for more information on using the PutMetricAlarm +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutMetricAlarmRequest method. +// req, resp := client.PutMetricAlarmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutMetricAlarm +func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *request.Request, output *PutMetricAlarmOutput) { + op := &request.Operation{ + Name: opPutMetricAlarm, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricAlarmInput{} + } + + output = &PutMetricAlarmOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutMetricAlarm API operation for Amazon CloudWatch. +// +// Creates or updates an alarm and associates it with the specified metric, +// metric math expression, anomaly detection model, or Metrics Insights query. +// For more information about using a Metrics Insights query for an alarm, see +// Create alarms on Metrics Insights queries (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Metrics_Insights_Alarm.html). +// +// Alarms based on anomaly detection models cannot have Auto Scaling actions. +// +// When this operation creates an alarm, the alarm state is immediately set +// to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. +// Any actions associated with the new state are then executed. +// +// When you update an existing alarm, its state is left unchanged, but the update +// completely overwrites the previous configuration of the alarm. +// +// If you are an IAM user, you must have Amazon EC2 permissions for some alarm +// operations: +// +// - The iam:CreateServiceLinkedRole permission for all alarms with EC2 actions +// +// - The iam:CreateServiceLinkedRole permissions to create an alarm with +// Systems Manager OpsItem or response plan actions. +// +// The first time you create an alarm in the Amazon Web Services Management +// Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates +// the necessary service-linked role for you. The service-linked roles are called +// AWSServiceRoleForCloudWatchEvents and AWSServiceRoleForCloudWatchAlarms_ActionSSM. +// For more information, see Amazon Web Services service-linked role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role). +// +// Each PutMetricAlarm action has a maximum uncompressed payload of 120 KB. +// +// # Cross-account alarms +// +// You can set an alarm on metrics in the current account, or in another account. +// To create a cross-account alarm that watches a metric in a different account, +// you must have completed the following pre-requisites: +// +// - The account where the metrics are located (the sharing account) must +// already have a sharing role named CloudWatch-CrossAccountSharingRole. +// If it does not already have this role, you must create it using the instructions +// in Set up a sharing account in Cross-account cross-Region CloudWatch console +// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Cross-Account-Cross-Region.html#enable-cross-account-cross-Region). +// The policy for that role must grant access to the ID of the account where +// you are creating the alarm. +// +// - The account where you are creating the alarm (the monitoring account) +// must already have a service-linked role named AWSServiceRoleForCloudWatchCrossAccount +// to allow CloudWatch to assume the sharing role in the sharing account. +// If it does not, you must create it following the directions in Set up +// a monitoring account in Cross-account cross-Region CloudWatch console +// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Cross-Account-Cross-Region.html#enable-cross-account-cross-Region). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation PutMetricAlarm for usage and error information. +// +// Returned Error Codes: +// - ErrCodeLimitExceededFault "LimitExceeded" +// The quota for alarms for this customer has already been reached. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutMetricAlarm +func (c *CloudWatch) PutMetricAlarm(input *PutMetricAlarmInput) (*PutMetricAlarmOutput, error) { + req, out := c.PutMetricAlarmRequest(input) + return out, req.Send() +} + +// PutMetricAlarmWithContext is the same as PutMetricAlarm with the addition of +// the ability to pass a context and additional request options. +// +// See PutMetricAlarm for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) PutMetricAlarmWithContext(ctx aws.Context, input *PutMetricAlarmInput, opts ...request.Option) (*PutMetricAlarmOutput, error) { + req, out := c.PutMetricAlarmRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutMetricData = "PutMetricData" + +// PutMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricData operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutMetricData for more information on using the PutMetricData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutMetricDataRequest method. +// req, resp := client.PutMetricDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutMetricData +func (c *CloudWatch) PutMetricDataRequest(input *PutMetricDataInput) (req *request.Request, output *PutMetricDataOutput) { + op := &request.Operation{ + Name: opPutMetricData, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricDataInput{} + } + + output = &PutMetricDataOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutMetricData API operation for Amazon CloudWatch. +// +// Publishes metric data to Amazon CloudWatch. CloudWatch associates the data +// with the specified metric. If the specified metric does not exist, CloudWatch +// creates the metric. When CloudWatch creates a metric, it can take up to fifteen +// minutes for the metric to appear in calls to ListMetrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html). +// +// You can publish either individual values in the Value field, or arrays of +// values and the number of times each value occurred during the period by using +// the Values and Counts fields in the MetricData structure. Using the Values +// and Counts method enables you to publish up to 150 values per metric with +// one PutMetricData request, and supports retrieving percentile statistics +// on this data. +// +// Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. +// You can send a payload compressed by gzip. Each request is also limited to +// no more than 1000 different metrics. +// +// Although the Value parameter accepts numbers of type Double, CloudWatch rejects +// values that are either too small or too large. Values must be in the range +// of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, +// -Infinity) are not supported. +// +// You can use up to 30 dimensions per metric to further clarify what data the +// metric collects. Each dimension consists of a Name and Value pair. For more +// information about specifying dimensions, see Publishing Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) +// in the Amazon CloudWatch User Guide. +// +// You specify the time stamp to be associated with each data point. You can +// specify time stamps that are as much as two weeks before the current date, +// and as much as 2 hours after the current day and time. +// +// Data points with time stamps from 24 hours ago or longer can take at least +// 48 hours to become available for GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html) +// from the time they are submitted. Data points with time stamps between 3 +// and 24 hours ago can take as much as 2 hours to become available for for +// GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html). +// +// CloudWatch needs raw data points to calculate percentile statistics. If you +// publish data using a statistic set instead, you can only retrieve percentile +// statistics for this data if one of the following conditions is true: +// +// - The SampleCount value of the statistic set is 1 and Min, Max, and Sum +// are all equal. +// +// - The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation PutMetricData for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Parameters were used together that cannot be used together. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutMetricData +func (c *CloudWatch) PutMetricData(input *PutMetricDataInput) (*PutMetricDataOutput, error) { + req, out := c.PutMetricDataRequest(input) + return out, req.Send() +} + +// PutMetricDataWithContext is the same as PutMetricData with the addition of +// the ability to pass a context and additional request options. +// +// See PutMetricData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) PutMetricDataWithContext(ctx aws.Context, input *PutMetricDataInput, opts ...request.Option) (*PutMetricDataOutput, error) { + req, out := c.PutMetricDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutMetricStream = "PutMetricStream" + +// PutMetricStreamRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutMetricStream for more information on using the PutMetricStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutMetricStreamRequest method. +// req, resp := client.PutMetricStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutMetricStream +func (c *CloudWatch) PutMetricStreamRequest(input *PutMetricStreamInput) (req *request.Request, output *PutMetricStreamOutput) { + op := &request.Operation{ + Name: opPutMetricStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricStreamInput{} + } + + output = &PutMetricStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutMetricStream API operation for Amazon CloudWatch. +// +// Creates or updates a metric stream. Metric streams can automatically stream +// CloudWatch metrics to Amazon Web Services destinations, including Amazon +// S3, and to many third-party solutions. +// +// For more information, see Using Metric Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Metric-Streams.html). +// +// To create a metric stream, you must be signed in to an account that has the +// iam:PassRole permission and either the CloudWatchFullAccess policy or the +// cloudwatch:PutMetricStream permission. +// +// When you create or update a metric stream, you choose one of the following: +// +// - Stream metrics from all metric namespaces in the account. +// +// - Stream metrics from all metric namespaces in the account, except for +// the namespaces that you list in ExcludeFilters. +// +// - Stream metrics from only the metric namespaces that you list in IncludeFilters. +// +// By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT +// statistics for each metric that is streamed. You can use the StatisticsConfigurations +// parameter to have the metric stream send additional statistics in the stream. +// Streaming additional statistics incurs additional costs. For more information, +// see Amazon CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/). +// +// When you use PutMetricStream to create a new metric stream, the stream is +// created in the running state. If you use it to update an existing stream, +// the state of the stream is not changed. +// +// If you are using CloudWatch cross-account observability and you create a +// metric stream in a monitoring account, you can choose whether to include +// metrics from source accounts in the stream. For more information, see CloudWatch +// cross-account observability (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation PutMetricStream for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeConcurrentModificationException "ConcurrentModificationException" +// More than one process tried to modify a resource at the same time. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// - ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Parameters were used together that cannot be used together. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/PutMetricStream +func (c *CloudWatch) PutMetricStream(input *PutMetricStreamInput) (*PutMetricStreamOutput, error) { + req, out := c.PutMetricStreamRequest(input) + return out, req.Send() +} + +// PutMetricStreamWithContext is the same as PutMetricStream with the addition of +// the ability to pass a context and additional request options. +// +// See PutMetricStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) PutMetricStreamWithContext(ctx aws.Context, input *PutMetricStreamInput, opts ...request.Option) (*PutMetricStreamOutput, error) { + req, out := c.PutMetricStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetAlarmState = "SetAlarmState" + +// SetAlarmStateRequest generates a "aws/request.Request" representing the +// client's request for the SetAlarmState operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetAlarmState for more information on using the SetAlarmState +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the SetAlarmStateRequest method. +// req, resp := client.SetAlarmStateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/SetAlarmState +func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *request.Request, output *SetAlarmStateOutput) { + op := &request.Operation{ + Name: opSetAlarmState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetAlarmStateInput{} + } + + output = &SetAlarmStateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetAlarmState API operation for Amazon CloudWatch. +// +// Temporarily sets the state of an alarm for testing purposes. When the updated +// state differs from the previous value, the action configured for the appropriate +// state is invoked. For example, if your alarm is configured to send an Amazon +// SNS message when an alarm is triggered, temporarily changing the alarm state +// to ALARM sends an SNS message. +// +// Metric alarms returns to their actual state quickly, often within seconds. +// Because the metric alarm state change happens quickly, it is typically only +// visible in the alarm's History tab in the Amazon CloudWatch console or through +// DescribeAlarmHistory (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarmHistory.html). +// +// If you use SetAlarmState on a composite alarm, the composite alarm is not +// guaranteed to return to its actual state. It returns to its actual state +// only once any of its children alarms change state. It is also reevaluated +// if you update its configuration. +// +// If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling +// policies, you must include information in the StateReasonData parameter to +// enable the policy to take the correct action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation SetAlarmState for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeResourceNotFound "ResourceNotFound" +// The named resource does not exist. +// +// - ErrCodeInvalidFormatFault "InvalidFormat" +// Data was not syntactically valid JSON. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/SetAlarmState +func (c *CloudWatch) SetAlarmState(input *SetAlarmStateInput) (*SetAlarmStateOutput, error) { + req, out := c.SetAlarmStateRequest(input) + return out, req.Send() +} + +// SetAlarmStateWithContext is the same as SetAlarmState with the addition of +// the ability to pass a context and additional request options. +// +// See SetAlarmState for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) SetAlarmStateWithContext(ctx aws.Context, input *SetAlarmStateInput, opts ...request.Option) (*SetAlarmStateOutput, error) { + req, out := c.SetAlarmStateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartMetricStreams = "StartMetricStreams" + +// StartMetricStreamsRequest generates a "aws/request.Request" representing the +// client's request for the StartMetricStreams operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartMetricStreams for more information on using the StartMetricStreams +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartMetricStreamsRequest method. +// req, resp := client.StartMetricStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/StartMetricStreams +func (c *CloudWatch) StartMetricStreamsRequest(input *StartMetricStreamsInput) (req *request.Request, output *StartMetricStreamsOutput) { + op := &request.Operation{ + Name: opStartMetricStreams, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartMetricStreamsInput{} + } + + output = &StartMetricStreamsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StartMetricStreams API operation for Amazon CloudWatch. +// +// Starts the streaming of metrics for one or more of your metric streams. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation StartMetricStreams for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/StartMetricStreams +func (c *CloudWatch) StartMetricStreams(input *StartMetricStreamsInput) (*StartMetricStreamsOutput, error) { + req, out := c.StartMetricStreamsRequest(input) + return out, req.Send() +} + +// StartMetricStreamsWithContext is the same as StartMetricStreams with the addition of +// the ability to pass a context and additional request options. +// +// See StartMetricStreams for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) StartMetricStreamsWithContext(ctx aws.Context, input *StartMetricStreamsInput, opts ...request.Option) (*StartMetricStreamsOutput, error) { + req, out := c.StartMetricStreamsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopMetricStreams = "StopMetricStreams" + +// StopMetricStreamsRequest generates a "aws/request.Request" representing the +// client's request for the StopMetricStreams operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopMetricStreams for more information on using the StopMetricStreams +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StopMetricStreamsRequest method. +// req, resp := client.StopMetricStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/StopMetricStreams +func (c *CloudWatch) StopMetricStreamsRequest(input *StopMetricStreamsInput) (req *request.Request, output *StopMetricStreamsOutput) { + op := &request.Operation{ + Name: opStopMetricStreams, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopMetricStreamsInput{} + } + + output = &StopMetricStreamsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopMetricStreams API operation for Amazon CloudWatch. +// +// Stops the streaming of metrics for one or more of your metric streams. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation StopMetricStreams for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeMissingRequiredParameterException "MissingParameter" +// An input parameter that is required is missing. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/StopMetricStreams +func (c *CloudWatch) StopMetricStreams(input *StopMetricStreamsInput) (*StopMetricStreamsOutput, error) { + req, out := c.StopMetricStreamsRequest(input) + return out, req.Send() +} + +// StopMetricStreamsWithContext is the same as StopMetricStreams with the addition of +// the ability to pass a context and additional request options. +// +// See StopMetricStreams for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) StopMetricStreamsWithContext(ctx aws.Context, input *StopMetricStreamsInput, opts ...request.Option) (*StopMetricStreamsOutput, error) { + req, out := c.StopMetricStreamsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/TagResource +func (c *CloudWatch) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon CloudWatch. +// +// Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. +// Currently, the only CloudWatch resources that can be tagged are alarms and +// Contributor Insights rules. +// +// Tags can help you organize and categorize your resources. You can also use +// them to scope user permissions by granting a user permission to access or +// change only resources with certain tag values. +// +// Tags don't have any semantic meaning to Amazon Web Services and are interpreted +// strictly as strings of characters. +// +// You can use the TagResource action with an alarm that already has tags. If +// you specify a new tag key for the alarm, this tag is appended to the list +// of tags associated with the alarm. If you specify a tag key that is already +// associated with the alarm, the new tag value that you specify replaces the +// previous value for that tag. +// +// You can associate as many as 50 tags with a CloudWatch resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The named resource does not exist. +// +// - ErrCodeConcurrentModificationException "ConcurrentModificationException" +// More than one process tried to modify a resource at the same time. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/TagResource +func (c *CloudWatch) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/UntagResource +func (c *CloudWatch) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon CloudWatch. +// +// Removes one or more tags from the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value of an input parameter is bad or out-of-range. +// +// - ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The named resource does not exist. +// +// - ErrCodeConcurrentModificationException "ConcurrentModificationException" +// More than one process tried to modify a resource at the same time. +// +// - ErrCodeInternalServiceFault "InternalServiceError" +// Request processing has failed due to some unknown error, exception, or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01/UntagResource +func (c *CloudWatch) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Represents the history of a specific alarm. +type AlarmHistoryItem struct { + _ struct{} `type:"structure"` + + // The descriptive name for the alarm. + AlarmName *string `min:"1" type:"string"` + + // The type of alarm, either metric alarm or composite alarm. + AlarmType *string `type:"string" enum:"AlarmType"` + + // Data about the alarm, in JSON format. + HistoryData *string `min:"1" type:"string"` + + // The type of alarm history item. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // A summary of the alarm history, in text format. + HistorySummary *string `min:"1" type:"string"` + + // The time stamp for the alarm history item. + Timestamp *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AlarmHistoryItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AlarmHistoryItem) GoString() string { + return s.String() +} + +// SetAlarmName sets the AlarmName field's value. +func (s *AlarmHistoryItem) SetAlarmName(v string) *AlarmHistoryItem { + s.AlarmName = &v + return s +} + +// SetAlarmType sets the AlarmType field's value. +func (s *AlarmHistoryItem) SetAlarmType(v string) *AlarmHistoryItem { + s.AlarmType = &v + return s +} + +// SetHistoryData sets the HistoryData field's value. +func (s *AlarmHistoryItem) SetHistoryData(v string) *AlarmHistoryItem { + s.HistoryData = &v + return s +} + +// SetHistoryItemType sets the HistoryItemType field's value. +func (s *AlarmHistoryItem) SetHistoryItemType(v string) *AlarmHistoryItem { + s.HistoryItemType = &v + return s +} + +// SetHistorySummary sets the HistorySummary field's value. +func (s *AlarmHistoryItem) SetHistorySummary(v string) *AlarmHistoryItem { + s.HistorySummary = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *AlarmHistoryItem) SetTimestamp(v time.Time) *AlarmHistoryItem { + s.Timestamp = &v + return s +} + +// An anomaly detection model associated with a particular CloudWatch metric, +// statistic, or metric math expression. You can use the model to display a +// band of expected, normal values when the metric is graphed. +// +// If you have enabled unified cross-account observability, and this account +// is a monitoring account, the metric can be in the same account or a source +// account. +type AnomalyDetector struct { + _ struct{} `type:"structure"` + + // The configuration specifies details about how the anomaly detection model + // is to be trained, including time ranges to exclude from use for training + // the model, and the time zone to use for the metric. + Configuration *AnomalyDetectorConfiguration `type:"structure"` + + // The metric dimensions associated with the anomaly detection model. + // + // Deprecated: Use SingleMetricAnomalyDetector.Dimensions property. + Dimensions []*Dimension `deprecated:"true" type:"list"` + + // This object includes parameters that you can use to provide information about + // your metric to CloudWatch to help it build more accurate anomaly detection + // models. Currently, it includes the PeriodicSpikes parameter. + MetricCharacteristics *MetricCharacteristics `type:"structure"` + + // The CloudWatch metric math expression for this anomaly detector. + MetricMathAnomalyDetector *MetricMathAnomalyDetector `type:"structure"` + + // The name of the metric associated with the anomaly detection model. + // + // Deprecated: Use SingleMetricAnomalyDetector.MetricName property. + MetricName *string `min:"1" deprecated:"true" type:"string"` + + // The namespace of the metric associated with the anomaly detection model. + // + // Deprecated: Use SingleMetricAnomalyDetector.Namespace property. + Namespace *string `min:"1" deprecated:"true" type:"string"` + + // The CloudWatch metric and statistic for this anomaly detector. + SingleMetricAnomalyDetector *SingleMetricAnomalyDetector `type:"structure"` + + // The statistic associated with the anomaly detection model. + // + // Deprecated: Use SingleMetricAnomalyDetector.Stat property. + Stat *string `deprecated:"true" type:"string"` + + // The current status of the anomaly detector's training. + StateValue *string `type:"string" enum:"AnomalyDetectorStateValue"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnomalyDetector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnomalyDetector) GoString() string { + return s.String() +} + +// SetConfiguration sets the Configuration field's value. +func (s *AnomalyDetector) SetConfiguration(v *AnomalyDetectorConfiguration) *AnomalyDetector { + s.Configuration = v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *AnomalyDetector) SetDimensions(v []*Dimension) *AnomalyDetector { + s.Dimensions = v + return s +} + +// SetMetricCharacteristics sets the MetricCharacteristics field's value. +func (s *AnomalyDetector) SetMetricCharacteristics(v *MetricCharacteristics) *AnomalyDetector { + s.MetricCharacteristics = v + return s +} + +// SetMetricMathAnomalyDetector sets the MetricMathAnomalyDetector field's value. +func (s *AnomalyDetector) SetMetricMathAnomalyDetector(v *MetricMathAnomalyDetector) *AnomalyDetector { + s.MetricMathAnomalyDetector = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *AnomalyDetector) SetMetricName(v string) *AnomalyDetector { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *AnomalyDetector) SetNamespace(v string) *AnomalyDetector { + s.Namespace = &v + return s +} + +// SetSingleMetricAnomalyDetector sets the SingleMetricAnomalyDetector field's value. +func (s *AnomalyDetector) SetSingleMetricAnomalyDetector(v *SingleMetricAnomalyDetector) *AnomalyDetector { + s.SingleMetricAnomalyDetector = v + return s +} + +// SetStat sets the Stat field's value. +func (s *AnomalyDetector) SetStat(v string) *AnomalyDetector { + s.Stat = &v + return s +} + +// SetStateValue sets the StateValue field's value. +func (s *AnomalyDetector) SetStateValue(v string) *AnomalyDetector { + s.StateValue = &v + return s +} + +// The configuration specifies details about how the anomaly detection model +// is to be trained, including time ranges to exclude from use for training +// the model and the time zone to use for the metric. +type AnomalyDetectorConfiguration struct { + _ struct{} `type:"structure"` + + // An array of time ranges to exclude from use when the anomaly detection model + // is trained. Use this to make sure that events that could cause unusual values + // for the metric, such as deployments, aren't used when CloudWatch creates + // the model. + ExcludedTimeRanges []*Range `type:"list"` + + // The time zone to use for the metric. This is useful to enable the model to + // automatically account for daylight savings time changes if the metric is + // sensitive to such time changes. + // + // To specify a time zone, use the name of the time zone as specified in the + // standard tz database. For more information, see tz database (https://en.wikipedia.org/wiki/Tz_database). + MetricTimezone *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnomalyDetectorConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnomalyDetectorConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnomalyDetectorConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnomalyDetectorConfiguration"} + if s.ExcludedTimeRanges != nil { + for i, v := range s.ExcludedTimeRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExcludedTimeRanges", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExcludedTimeRanges sets the ExcludedTimeRanges field's value. +func (s *AnomalyDetectorConfiguration) SetExcludedTimeRanges(v []*Range) *AnomalyDetectorConfiguration { + s.ExcludedTimeRanges = v + return s +} + +// SetMetricTimezone sets the MetricTimezone field's value. +func (s *AnomalyDetectorConfiguration) SetMetricTimezone(v string) *AnomalyDetectorConfiguration { + s.MetricTimezone = &v + return s +} + +// The details about a composite alarm. +type CompositeAlarm struct { + _ struct{} `type:"structure"` + + // Indicates whether actions should be executed during any changes to the alarm + // state. + ActionsEnabled *bool `type:"boolean"` + + // When the value is ALARM, it means that the actions are suppressed because + // the suppressor alarm is in ALARM When the value is WaitPeriod, it means that + // the actions are suppressed because the composite alarm is waiting for the + // suppressor alarm to go into into the ALARM state. The maximum waiting time + // is as specified in ActionsSuppressorWaitPeriod. After this time, the composite + // alarm performs its actions. When the value is ExtensionPeriod, it means that + // the actions are suppressed because the composite alarm is waiting after the + // suppressor alarm went out of the ALARM state. The maximum waiting time is + // as specified in ActionsSuppressorExtensionPeriod. After this time, the composite + // alarm performs its actions. + ActionsSuppressedBy *string `type:"string" enum:"ActionsSuppressedBy"` + + // Captures the reason for action suppression. + ActionsSuppressedReason *string `type:"string"` + + // Actions will be suppressed if the suppressor alarm is in the ALARM state. + // ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from + // an existing alarm. + ActionsSuppressor *string `min:"1" type:"string"` + + // The maximum time in seconds that the composite alarm waits after suppressor + // alarm goes out of the ALARM state. After this time, the composite alarm performs + // its actions. + // + // ExtensionPeriod is required only when ActionsSuppressor is specified. + ActionsSuppressorExtensionPeriod *int64 `type:"integer"` + + // The maximum time in seconds that the composite alarm waits for the suppressor + // alarm to go into the ALARM state. After this time, the composite alarm performs + // its actions. + // + // WaitPeriod is required only when ActionsSuppressor is specified. + ActionsSuppressorWaitPeriod *int64 `type:"integer"` + + // The actions to execute when this alarm transitions to the ALARM state from + // any other state. Each action is specified as an Amazon Resource Name (ARN). + AlarmActions []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmArn *string `min:"1" type:"string"` + + // The time stamp of the last update to the alarm configuration. + AlarmConfigurationUpdatedTimestamp *time.Time `type:"timestamp"` + + // The description of the alarm. + AlarmDescription *string `type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The rule that this alarm uses to evaluate its alarm state. + AlarmRule *string `min:"1" type:"string"` + + // The actions to execute when this alarm transitions to the INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Name (ARN). + InsufficientDataActions []*string `type:"list"` + + // The actions to execute when this alarm transitions to the OK state from any + // other state. Each action is specified as an Amazon Resource Name (ARN). + OKActions []*string `type:"list"` + + // An explanation for the alarm state, in text format. + StateReason *string `type:"string"` + + // An explanation for the alarm state, in JSON format. + StateReasonData *string `type:"string"` + + // The timestamp of the last change to the alarm's StateValue. + StateTransitionedTimestamp *time.Time `type:"timestamp"` + + // Tracks the timestamp of any state update, even if StateValue doesn't change. + StateUpdatedTimestamp *time.Time `type:"timestamp"` + + // The state value for the alarm. + StateValue *string `type:"string" enum:"StateValue"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompositeAlarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompositeAlarm) GoString() string { + return s.String() +} + +// SetActionsEnabled sets the ActionsEnabled field's value. +func (s *CompositeAlarm) SetActionsEnabled(v bool) *CompositeAlarm { + s.ActionsEnabled = &v + return s +} + +// SetActionsSuppressedBy sets the ActionsSuppressedBy field's value. +func (s *CompositeAlarm) SetActionsSuppressedBy(v string) *CompositeAlarm { + s.ActionsSuppressedBy = &v + return s +} + +// SetActionsSuppressedReason sets the ActionsSuppressedReason field's value. +func (s *CompositeAlarm) SetActionsSuppressedReason(v string) *CompositeAlarm { + s.ActionsSuppressedReason = &v + return s +} + +// SetActionsSuppressor sets the ActionsSuppressor field's value. +func (s *CompositeAlarm) SetActionsSuppressor(v string) *CompositeAlarm { + s.ActionsSuppressor = &v + return s +} + +// SetActionsSuppressorExtensionPeriod sets the ActionsSuppressorExtensionPeriod field's value. +func (s *CompositeAlarm) SetActionsSuppressorExtensionPeriod(v int64) *CompositeAlarm { + s.ActionsSuppressorExtensionPeriod = &v + return s +} + +// SetActionsSuppressorWaitPeriod sets the ActionsSuppressorWaitPeriod field's value. +func (s *CompositeAlarm) SetActionsSuppressorWaitPeriod(v int64) *CompositeAlarm { + s.ActionsSuppressorWaitPeriod = &v + return s +} + +// SetAlarmActions sets the AlarmActions field's value. +func (s *CompositeAlarm) SetAlarmActions(v []*string) *CompositeAlarm { + s.AlarmActions = v + return s +} + +// SetAlarmArn sets the AlarmArn field's value. +func (s *CompositeAlarm) SetAlarmArn(v string) *CompositeAlarm { + s.AlarmArn = &v + return s +} + +// SetAlarmConfigurationUpdatedTimestamp sets the AlarmConfigurationUpdatedTimestamp field's value. +func (s *CompositeAlarm) SetAlarmConfigurationUpdatedTimestamp(v time.Time) *CompositeAlarm { + s.AlarmConfigurationUpdatedTimestamp = &v + return s +} + +// SetAlarmDescription sets the AlarmDescription field's value. +func (s *CompositeAlarm) SetAlarmDescription(v string) *CompositeAlarm { + s.AlarmDescription = &v + return s +} + +// SetAlarmName sets the AlarmName field's value. +func (s *CompositeAlarm) SetAlarmName(v string) *CompositeAlarm { + s.AlarmName = &v + return s +} + +// SetAlarmRule sets the AlarmRule field's value. +func (s *CompositeAlarm) SetAlarmRule(v string) *CompositeAlarm { + s.AlarmRule = &v + return s +} + +// SetInsufficientDataActions sets the InsufficientDataActions field's value. +func (s *CompositeAlarm) SetInsufficientDataActions(v []*string) *CompositeAlarm { + s.InsufficientDataActions = v + return s +} + +// SetOKActions sets the OKActions field's value. +func (s *CompositeAlarm) SetOKActions(v []*string) *CompositeAlarm { + s.OKActions = v + return s +} + +// SetStateReason sets the StateReason field's value. +func (s *CompositeAlarm) SetStateReason(v string) *CompositeAlarm { + s.StateReason = &v + return s +} + +// SetStateReasonData sets the StateReasonData field's value. +func (s *CompositeAlarm) SetStateReasonData(v string) *CompositeAlarm { + s.StateReasonData = &v + return s +} + +// SetStateTransitionedTimestamp sets the StateTransitionedTimestamp field's value. +func (s *CompositeAlarm) SetStateTransitionedTimestamp(v time.Time) *CompositeAlarm { + s.StateTransitionedTimestamp = &v + return s +} + +// SetStateUpdatedTimestamp sets the StateUpdatedTimestamp field's value. +func (s *CompositeAlarm) SetStateUpdatedTimestamp(v time.Time) *CompositeAlarm { + s.StateUpdatedTimestamp = &v + return s +} + +// SetStateValue sets the StateValue field's value. +func (s *CompositeAlarm) SetStateValue(v string) *CompositeAlarm { + s.StateValue = &v + return s +} + +// Represents a specific dashboard. +type DashboardEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dashboard. + DashboardArn *string `type:"string"` + + // The name of the dashboard. + DashboardName *string `type:"string"` + + // The time stamp of when the dashboard was last modified, either by an API + // call or through the console. This number is expressed as the number of milliseconds + // since Jan 1, 1970 00:00:00 UTC. + LastModified *time.Time `type:"timestamp"` + + // The size of the dashboard, in bytes. + Size *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DashboardEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DashboardEntry) GoString() string { + return s.String() +} + +// SetDashboardArn sets the DashboardArn field's value. +func (s *DashboardEntry) SetDashboardArn(v string) *DashboardEntry { + s.DashboardArn = &v + return s +} + +// SetDashboardName sets the DashboardName field's value. +func (s *DashboardEntry) SetDashboardName(v string) *DashboardEntry { + s.DashboardName = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DashboardEntry) SetLastModified(v time.Time) *DashboardEntry { + s.LastModified = &v + return s +} + +// SetSize sets the Size field's value. +func (s *DashboardEntry) SetSize(v int64) *DashboardEntry { + s.Size = &v + return s +} + +// An error or warning for the operation. +type DashboardValidationMessage struct { + _ struct{} `type:"structure"` + + // The data path related to the message. + DataPath *string `type:"string"` + + // A message describing the error or warning. + Message *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DashboardValidationMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DashboardValidationMessage) GoString() string { + return s.String() +} + +// SetDataPath sets the DataPath field's value. +func (s *DashboardValidationMessage) SetDataPath(v string) *DashboardValidationMessage { + s.DataPath = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *DashboardValidationMessage) SetMessage(v string) *DashboardValidationMessage { + s.Message = &v + return s +} + +// Encapsulates the statistical data that CloudWatch computes from metric data. +type Datapoint struct { + _ struct{} `type:"structure"` + + // The average of the metric values that correspond to the data point. + Average *float64 `type:"double"` + + // The percentile statistic for the data point. + ExtendedStatistics map[string]*float64 `type:"map"` + + // The maximum metric value for the data point. + Maximum *float64 `type:"double"` + + // The minimum metric value for the data point. + Minimum *float64 `type:"double"` + + // The number of metric values that contributed to the aggregate value of this + // data point. + SampleCount *float64 `type:"double"` + + // The sum of the metric values for the data point. + Sum *float64 `type:"double"` + + // The time stamp used for the data point. + Timestamp *time.Time `type:"timestamp"` + + // The standard unit for the data point. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Datapoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Datapoint) GoString() string { + return s.String() +} + +// SetAverage sets the Average field's value. +func (s *Datapoint) SetAverage(v float64) *Datapoint { + s.Average = &v + return s +} + +// SetExtendedStatistics sets the ExtendedStatistics field's value. +func (s *Datapoint) SetExtendedStatistics(v map[string]*float64) *Datapoint { + s.ExtendedStatistics = v + return s +} + +// SetMaximum sets the Maximum field's value. +func (s *Datapoint) SetMaximum(v float64) *Datapoint { + s.Maximum = &v + return s +} + +// SetMinimum sets the Minimum field's value. +func (s *Datapoint) SetMinimum(v float64) *Datapoint { + s.Minimum = &v + return s +} + +// SetSampleCount sets the SampleCount field's value. +func (s *Datapoint) SetSampleCount(v float64) *Datapoint { + s.SampleCount = &v + return s +} + +// SetSum sets the Sum field's value. +func (s *Datapoint) SetSum(v float64) *Datapoint { + s.Sum = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *Datapoint) SetTimestamp(v time.Time) *Datapoint { + s.Timestamp = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *Datapoint) SetUnit(v string) *Datapoint { + s.Unit = &v + return s +} + +type DeleteAlarmsInput struct { + _ struct{} `type:"structure"` + + // The alarms to be deleted. Do not enclose the alarm names in quote marks. + // + // AlarmNames is a required field + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAlarmsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAlarmsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAlarmsInput"} + if s.AlarmNames == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlarmNames sets the AlarmNames field's value. +func (s *DeleteAlarmsInput) SetAlarmNames(v []*string) *DeleteAlarmsInput { + s.AlarmNames = v + return s +} + +type DeleteAlarmsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAlarmsOutput) GoString() string { + return s.String() +} + +type DeleteAnomalyDetectorInput struct { + _ struct{} `type:"structure"` + + // The metric dimensions associated with the anomaly detection model to delete. + // + // Deprecated: Use SingleMetricAnomalyDetector. + Dimensions []*Dimension `deprecated:"true" type:"list"` + + // The metric math anomaly detector to be deleted. + // + // When using MetricMathAnomalyDetector, you cannot include following parameters + // in the same operation: + // + // * Dimensions, + // + // * MetricName + // + // * Namespace + // + // * Stat + // + // * the SingleMetricAnomalyDetector parameters of DeleteAnomalyDetectorInput + // + // Instead, specify the metric math anomaly detector attributes as part of the + // MetricMathAnomalyDetector property. + MetricMathAnomalyDetector *MetricMathAnomalyDetector `type:"structure"` + + // The metric name associated with the anomaly detection model to delete. + // + // Deprecated: Use SingleMetricAnomalyDetector. + MetricName *string `min:"1" deprecated:"true" type:"string"` + + // The namespace associated with the anomaly detection model to delete. + // + // Deprecated: Use SingleMetricAnomalyDetector. + Namespace *string `min:"1" deprecated:"true" type:"string"` + + // A single metric anomaly detector to be deleted. + // + // When using SingleMetricAnomalyDetector, you cannot include the following + // parameters in the same operation: + // + // * Dimensions, + // + // * MetricName + // + // * Namespace + // + // * Stat + // + // * the MetricMathAnomalyDetector parameters of DeleteAnomalyDetectorInput + // + // Instead, specify the single metric anomaly detector attributes as part of + // the SingleMetricAnomalyDetector property. + SingleMetricAnomalyDetector *SingleMetricAnomalyDetector `type:"structure"` + + // The statistic associated with the anomaly detection model to delete. + // + // Deprecated: Use SingleMetricAnomalyDetector. + Stat *string `deprecated:"true" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAnomalyDetectorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAnomalyDetectorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAnomalyDetectorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAnomalyDetectorInput"} + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.MetricMathAnomalyDetector != nil { + if err := s.MetricMathAnomalyDetector.Validate(); err != nil { + invalidParams.AddNested("MetricMathAnomalyDetector", err.(request.ErrInvalidParams)) + } + } + if s.SingleMetricAnomalyDetector != nil { + if err := s.SingleMetricAnomalyDetector.Validate(); err != nil { + invalidParams.AddNested("SingleMetricAnomalyDetector", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *DeleteAnomalyDetectorInput) SetDimensions(v []*Dimension) *DeleteAnomalyDetectorInput { + s.Dimensions = v + return s +} + +// SetMetricMathAnomalyDetector sets the MetricMathAnomalyDetector field's value. +func (s *DeleteAnomalyDetectorInput) SetMetricMathAnomalyDetector(v *MetricMathAnomalyDetector) *DeleteAnomalyDetectorInput { + s.MetricMathAnomalyDetector = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *DeleteAnomalyDetectorInput) SetMetricName(v string) *DeleteAnomalyDetectorInput { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DeleteAnomalyDetectorInput) SetNamespace(v string) *DeleteAnomalyDetectorInput { + s.Namespace = &v + return s +} + +// SetSingleMetricAnomalyDetector sets the SingleMetricAnomalyDetector field's value. +func (s *DeleteAnomalyDetectorInput) SetSingleMetricAnomalyDetector(v *SingleMetricAnomalyDetector) *DeleteAnomalyDetectorInput { + s.SingleMetricAnomalyDetector = v + return s +} + +// SetStat sets the Stat field's value. +func (s *DeleteAnomalyDetectorInput) SetStat(v string) *DeleteAnomalyDetectorInput { + s.Stat = &v + return s +} + +type DeleteAnomalyDetectorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAnomalyDetectorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteAnomalyDetectorOutput) GoString() string { + return s.String() +} + +type DeleteDashboardsInput struct { + _ struct{} `type:"structure"` + + // The dashboards to be deleted. This parameter is required. + // + // DashboardNames is a required field + DashboardNames []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDashboardsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDashboardsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDashboardsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDashboardsInput"} + if s.DashboardNames == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDashboardNames sets the DashboardNames field's value. +func (s *DeleteDashboardsInput) SetDashboardNames(v []*string) *DeleteDashboardsInput { + s.DashboardNames = v + return s +} + +type DeleteDashboardsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDashboardsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDashboardsOutput) GoString() string { + return s.String() +} + +type DeleteInsightRulesInput struct { + _ struct{} `type:"structure"` + + // An array of the rule names to delete. If you need to find out the names of + // your rules, use DescribeInsightRules (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html). + // + // RuleNames is a required field + RuleNames []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteInsightRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteInsightRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInsightRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInsightRulesInput"} + if s.RuleNames == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleNames sets the RuleNames field's value. +func (s *DeleteInsightRulesInput) SetRuleNames(v []*string) *DeleteInsightRulesInput { + s.RuleNames = v + return s +} + +type DeleteInsightRulesOutput struct { + _ struct{} `type:"structure"` + + // An array listing the rules that could not be deleted. You cannot delete built-in + // rules. + Failures []*PartialFailure `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteInsightRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteInsightRulesOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *DeleteInsightRulesOutput) SetFailures(v []*PartialFailure) *DeleteInsightRulesOutput { + s.Failures = v + return s +} + +type DeleteMetricStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the metric stream to delete. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMetricStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMetricStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMetricStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMetricStreamInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteMetricStreamInput) SetName(v string) *DeleteMetricStreamInput { + s.Name = &v + return s +} + +type DeleteMetricStreamOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMetricStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMetricStreamOutput) GoString() string { + return s.String() +} + +type DescribeAlarmHistoryInput struct { + _ struct{} `type:"structure"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // Use this parameter to specify whether you want the operation to return metric + // alarms or composite alarms. If you omit this parameter, only metric alarms + // are returned. + AlarmTypes []*string `type:"list" enum:"AlarmType"` + + // The ending date to retrieve alarm history. + EndDate *time.Time `type:"timestamp"` + + // The type of alarm histories to retrieve. + HistoryItemType *string `type:"string" enum:"HistoryItemType"` + + // The maximum number of alarm history records to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // Specified whether to return the newest or oldest alarm history first. Specify + // TimestampDescending to have the newest event history returned first, and + // specify TimestampAscending to have the oldest history returned first. + ScanBy *string `type:"string" enum:"ScanBy"` + + // The starting date to retrieve alarm history. + StartDate *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAlarmHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAlarmHistoryInput"} + if s.AlarmName != nil && len(*s.AlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmName", 1)) + } + if s.MaxRecords != nil && *s.MaxRecords < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlarmName sets the AlarmName field's value. +func (s *DescribeAlarmHistoryInput) SetAlarmName(v string) *DescribeAlarmHistoryInput { + s.AlarmName = &v + return s +} + +// SetAlarmTypes sets the AlarmTypes field's value. +func (s *DescribeAlarmHistoryInput) SetAlarmTypes(v []*string) *DescribeAlarmHistoryInput { + s.AlarmTypes = v + return s +} + +// SetEndDate sets the EndDate field's value. +func (s *DescribeAlarmHistoryInput) SetEndDate(v time.Time) *DescribeAlarmHistoryInput { + s.EndDate = &v + return s +} + +// SetHistoryItemType sets the HistoryItemType field's value. +func (s *DescribeAlarmHistoryInput) SetHistoryItemType(v string) *DescribeAlarmHistoryInput { + s.HistoryItemType = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeAlarmHistoryInput) SetMaxRecords(v int64) *DescribeAlarmHistoryInput { + s.MaxRecords = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAlarmHistoryInput) SetNextToken(v string) *DescribeAlarmHistoryInput { + s.NextToken = &v + return s +} + +// SetScanBy sets the ScanBy field's value. +func (s *DescribeAlarmHistoryInput) SetScanBy(v string) *DescribeAlarmHistoryInput { + s.ScanBy = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *DescribeAlarmHistoryInput) SetStartDate(v time.Time) *DescribeAlarmHistoryInput { + s.StartDate = &v + return s +} + +type DescribeAlarmHistoryOutput struct { + _ struct{} `type:"structure"` + + // The alarm histories, in JSON format. + AlarmHistoryItems []*AlarmHistoryItem `type:"list"` + + // The token that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmHistoryOutput) GoString() string { + return s.String() +} + +// SetAlarmHistoryItems sets the AlarmHistoryItems field's value. +func (s *DescribeAlarmHistoryOutput) SetAlarmHistoryItems(v []*AlarmHistoryItem) *DescribeAlarmHistoryOutput { + s.AlarmHistoryItems = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAlarmHistoryOutput) SetNextToken(v string) *DescribeAlarmHistoryOutput { + s.NextToken = &v + return s +} + +type DescribeAlarmsForMetricInput struct { + _ struct{} `type:"structure"` + + // The dimensions associated with the metric. If the metric has any associated + // dimensions, you must specify them in order for the call to succeed. + Dimensions []*Dimension `type:"list"` + + // The percentile statistic for the metric. Specify a value between p0.0 and + // p100. + ExtendedStatistic *string `type:"string"` + + // The name of the metric. + // + // MetricName is a required field + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric. + // + // Namespace is a required field + Namespace *string `min:"1" type:"string" required:"true"` + + // The period, in seconds, over which the statistic is applied. + Period *int64 `min:"1" type:"integer"` + + // The statistic for the metric, other than percentiles. For percentile statistics, + // use ExtendedStatistics. + Statistic *string `type:"string" enum:"Statistic"` + + // The unit for the metric. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmsForMetricInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmsForMetricInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAlarmsForMetricInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAlarmsForMetricInput"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *DescribeAlarmsForMetricInput) SetDimensions(v []*Dimension) *DescribeAlarmsForMetricInput { + s.Dimensions = v + return s +} + +// SetExtendedStatistic sets the ExtendedStatistic field's value. +func (s *DescribeAlarmsForMetricInput) SetExtendedStatistic(v string) *DescribeAlarmsForMetricInput { + s.ExtendedStatistic = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *DescribeAlarmsForMetricInput) SetMetricName(v string) *DescribeAlarmsForMetricInput { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DescribeAlarmsForMetricInput) SetNamespace(v string) *DescribeAlarmsForMetricInput { + s.Namespace = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *DescribeAlarmsForMetricInput) SetPeriod(v int64) *DescribeAlarmsForMetricInput { + s.Period = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *DescribeAlarmsForMetricInput) SetStatistic(v string) *DescribeAlarmsForMetricInput { + s.Statistic = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *DescribeAlarmsForMetricInput) SetUnit(v string) *DescribeAlarmsForMetricInput { + s.Unit = &v + return s +} + +type DescribeAlarmsForMetricOutput struct { + _ struct{} `type:"structure"` + + // The information for each alarm with the specified metric. + MetricAlarms []*MetricAlarm `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmsForMetricOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmsForMetricOutput) GoString() string { + return s.String() +} + +// SetMetricAlarms sets the MetricAlarms field's value. +func (s *DescribeAlarmsForMetricOutput) SetMetricAlarms(v []*MetricAlarm) *DescribeAlarmsForMetricOutput { + s.MetricAlarms = v + return s +} + +type DescribeAlarmsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter to filter the results of the operation to only those alarms + // that use a certain alarm action. For example, you could specify the ARN of + // an SNS topic to find all alarms that send notifications to that topic. + ActionPrefix *string `min:"1" type:"string"` + + // An alarm name prefix. If you specify this parameter, you receive information + // about all alarms that have names that start with this prefix. + // + // If this parameter is specified, you cannot specify AlarmNames. + AlarmNamePrefix *string `min:"1" type:"string"` + + // The names of the alarms to retrieve information about. + AlarmNames []*string `type:"list"` + + // Use this parameter to specify whether you want the operation to return metric + // alarms or composite alarms. If you omit this parameter, only metric alarms + // are returned, even if composite alarms exist in the account. + // + // For example, if you omit this parameter or specify MetricAlarms, the operation + // returns only a list of metric alarms. It does not return any composite alarms, + // even if composite alarms exist in the account. + // + // If you specify CompositeAlarms, the operation returns only a list of composite + // alarms, and does not return any metric alarms. + AlarmTypes []*string `type:"list" enum:"AlarmType"` + + // If you use this parameter and specify the name of a composite alarm, the + // operation returns information about the "children" alarms of the alarm you + // specify. These are the metric alarms and composite alarms referenced in the + // AlarmRule field of the composite alarm that you specify in ChildrenOfAlarmName. + // Information about the composite alarm that you name in ChildrenOfAlarmName + // is not returned. + // + // If you specify ChildrenOfAlarmName, you cannot specify any other parameters + // in the request except for MaxRecords and NextToken. If you do so, you receive + // a validation error. + // + // Only the Alarm Name, ARN, StateValue (OK/ALARM/INSUFFICIENT_DATA), and StateUpdatedTimestamp + // information are returned by this operation when you use this parameter. To + // get complete information about these alarms, perform another DescribeAlarms + // operation and specify the parent alarm names in the AlarmNames parameter. + ChildrenOfAlarmName *string `min:"1" type:"string"` + + // The maximum number of alarm descriptions to retrieve. + MaxRecords *int64 `min:"1" type:"integer"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // If you use this parameter and specify the name of a metric or composite alarm, + // the operation returns information about the "parent" alarms of the alarm + // you specify. These are the composite alarms that have AlarmRule parameters + // that reference the alarm named in ParentsOfAlarmName. Information about the + // alarm that you specify in ParentsOfAlarmName is not returned. + // + // If you specify ParentsOfAlarmName, you cannot specify any other parameters + // in the request except for MaxRecords and NextToken. If you do so, you receive + // a validation error. + // + // Only the Alarm Name and ARN are returned by this operation when you use this + // parameter. To get complete information about these alarms, perform another + // DescribeAlarms operation and specify the parent alarm names in the AlarmNames + // parameter. + ParentsOfAlarmName *string `min:"1" type:"string"` + + // Specify this parameter to receive information only about alarms that are + // currently in the state that you specify. + StateValue *string `type:"string" enum:"StateValue"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAlarmsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAlarmsInput"} + if s.ActionPrefix != nil && len(*s.ActionPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionPrefix", 1)) + } + if s.AlarmNamePrefix != nil && len(*s.AlarmNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmNamePrefix", 1)) + } + if s.ChildrenOfAlarmName != nil && len(*s.ChildrenOfAlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChildrenOfAlarmName", 1)) + } + if s.MaxRecords != nil && *s.MaxRecords < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 1)) + } + if s.ParentsOfAlarmName != nil && len(*s.ParentsOfAlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ParentsOfAlarmName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionPrefix sets the ActionPrefix field's value. +func (s *DescribeAlarmsInput) SetActionPrefix(v string) *DescribeAlarmsInput { + s.ActionPrefix = &v + return s +} + +// SetAlarmNamePrefix sets the AlarmNamePrefix field's value. +func (s *DescribeAlarmsInput) SetAlarmNamePrefix(v string) *DescribeAlarmsInput { + s.AlarmNamePrefix = &v + return s +} + +// SetAlarmNames sets the AlarmNames field's value. +func (s *DescribeAlarmsInput) SetAlarmNames(v []*string) *DescribeAlarmsInput { + s.AlarmNames = v + return s +} + +// SetAlarmTypes sets the AlarmTypes field's value. +func (s *DescribeAlarmsInput) SetAlarmTypes(v []*string) *DescribeAlarmsInput { + s.AlarmTypes = v + return s +} + +// SetChildrenOfAlarmName sets the ChildrenOfAlarmName field's value. +func (s *DescribeAlarmsInput) SetChildrenOfAlarmName(v string) *DescribeAlarmsInput { + s.ChildrenOfAlarmName = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeAlarmsInput) SetMaxRecords(v int64) *DescribeAlarmsInput { + s.MaxRecords = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAlarmsInput) SetNextToken(v string) *DescribeAlarmsInput { + s.NextToken = &v + return s +} + +// SetParentsOfAlarmName sets the ParentsOfAlarmName field's value. +func (s *DescribeAlarmsInput) SetParentsOfAlarmName(v string) *DescribeAlarmsInput { + s.ParentsOfAlarmName = &v + return s +} + +// SetStateValue sets the StateValue field's value. +func (s *DescribeAlarmsInput) SetStateValue(v string) *DescribeAlarmsInput { + s.StateValue = &v + return s +} + +type DescribeAlarmsOutput struct { + _ struct{} `type:"structure"` + + // The information about any composite alarms returned by the operation. + CompositeAlarms []*CompositeAlarm `type:"list"` + + // The information about any metric alarms returned by the operation. + MetricAlarms []*MetricAlarm `type:"list"` + + // The token that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAlarmsOutput) GoString() string { + return s.String() +} + +// SetCompositeAlarms sets the CompositeAlarms field's value. +func (s *DescribeAlarmsOutput) SetCompositeAlarms(v []*CompositeAlarm) *DescribeAlarmsOutput { + s.CompositeAlarms = v + return s +} + +// SetMetricAlarms sets the MetricAlarms field's value. +func (s *DescribeAlarmsOutput) SetMetricAlarms(v []*MetricAlarm) *DescribeAlarmsOutput { + s.MetricAlarms = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAlarmsOutput) SetNextToken(v string) *DescribeAlarmsOutput { + s.NextToken = &v + return s +} + +type DescribeAnomalyDetectorsInput struct { + _ struct{} `type:"structure"` + + // The anomaly detector types to request when using DescribeAnomalyDetectorsInput. + // If empty, defaults to SINGLE_METRIC. + AnomalyDetectorTypes []*string `type:"list" enum:"AnomalyDetectorType"` + + // Limits the results to only the anomaly detection models that are associated + // with the specified metric dimensions. If there are multiple metrics that + // have these dimensions and have anomaly detection models associated, they're + // all returned. + Dimensions []*Dimension `type:"list"` + + // The maximum number of results to return in one operation. The maximum value + // that you can specify is 100. + // + // To retrieve the remaining results, make another call with the returned NextToken + // value. + MaxResults *int64 `min:"1" type:"integer"` + + // Limits the results to only the anomaly detection models that are associated + // with the specified metric name. If there are multiple metrics with this name + // in different namespaces that have anomaly detection models, they're all returned. + MetricName *string `min:"1" type:"string"` + + // Limits the results to only the anomaly detection models that are associated + // with the specified namespace. + Namespace *string `min:"1" type:"string"` + + // Use the token returned by the previous operation to request the next page + // of results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAnomalyDetectorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAnomalyDetectorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAnomalyDetectorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAnomalyDetectorsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnomalyDetectorTypes sets the AnomalyDetectorTypes field's value. +func (s *DescribeAnomalyDetectorsInput) SetAnomalyDetectorTypes(v []*string) *DescribeAnomalyDetectorsInput { + s.AnomalyDetectorTypes = v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *DescribeAnomalyDetectorsInput) SetDimensions(v []*Dimension) *DescribeAnomalyDetectorsInput { + s.Dimensions = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeAnomalyDetectorsInput) SetMaxResults(v int64) *DescribeAnomalyDetectorsInput { + s.MaxResults = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *DescribeAnomalyDetectorsInput) SetMetricName(v string) *DescribeAnomalyDetectorsInput { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DescribeAnomalyDetectorsInput) SetNamespace(v string) *DescribeAnomalyDetectorsInput { + s.Namespace = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAnomalyDetectorsInput) SetNextToken(v string) *DescribeAnomalyDetectorsInput { + s.NextToken = &v + return s +} + +type DescribeAnomalyDetectorsOutput struct { + _ struct{} `type:"structure"` + + // The list of anomaly detection models returned by the operation. + AnomalyDetectors []*AnomalyDetector `type:"list"` + + // A token that you can use in a subsequent operation to retrieve the next set + // of results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAnomalyDetectorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeAnomalyDetectorsOutput) GoString() string { + return s.String() +} + +// SetAnomalyDetectors sets the AnomalyDetectors field's value. +func (s *DescribeAnomalyDetectorsOutput) SetAnomalyDetectors(v []*AnomalyDetector) *DescribeAnomalyDetectorsOutput { + s.AnomalyDetectors = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeAnomalyDetectorsOutput) SetNextToken(v string) *DescribeAnomalyDetectorsOutput { + s.NextToken = &v + return s +} + +type DescribeInsightRulesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in one operation. If you omit this + // parameter, the default of 500 is used. + MaxResults *int64 `min:"1" type:"integer"` + + // Include this value, if it was returned by the previous operation, to get + // the next set of rules. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeInsightRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeInsightRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInsightRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInsightRulesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInsightRulesInput) SetMaxResults(v int64) *DescribeInsightRulesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInsightRulesInput) SetNextToken(v string) *DescribeInsightRulesInput { + s.NextToken = &v + return s +} + +type DescribeInsightRulesOutput struct { + _ struct{} `type:"structure"` + + // The rules returned by the operation. + InsightRules []*InsightRule `type:"list"` + + // If this parameter is present, it is a token that marks the start of the next + // batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeInsightRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeInsightRulesOutput) GoString() string { + return s.String() +} + +// SetInsightRules sets the InsightRules field's value. +func (s *DescribeInsightRulesOutput) SetInsightRules(v []*InsightRule) *DescribeInsightRulesOutput { + s.InsightRules = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInsightRulesOutput) SetNextToken(v string) *DescribeInsightRulesOutput { + s.NextToken = &v + return s +} + +// A dimension is a name/value pair that is part of the identity of a metric. +// Because dimensions are part of the unique identifier for a metric, whenever +// you add a unique name/value pair to one of your metrics, you are creating +// a new variation of that metric. For example, many Amazon EC2 metrics publish +// InstanceId as a dimension name, and the actual instance ID as the value for +// that dimension. +// +// You can assign up to 30 dimensions to a metric. +type Dimension struct { + _ struct{} `type:"structure"` + + // The name of the dimension. Dimension names must contain only ASCII characters, + // must include at least one non-whitespace character, and cannot start with + // a colon (:). ASCII control characters are not supported as part of dimension + // names. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The value of the dimension. Dimension values must contain only ASCII characters + // and must include at least one non-whitespace character. ASCII control characters + // are not supported as part of dimension values. + // + // Value is a required field + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Dimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Dimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Dimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Dimension"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *Dimension) SetName(v string) *Dimension { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Dimension) SetValue(v string) *Dimension { + s.Value = &v + return s +} + +// Represents filters for a dimension. +type DimensionFilter struct { + _ struct{} `type:"structure"` + + // The dimension name to be matched. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The value of the dimension to be matched. + Value *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DimensionFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DimensionFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DimensionFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DimensionFilter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DimensionFilter) SetName(v string) *DimensionFilter { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *DimensionFilter) SetValue(v string) *DimensionFilter { + s.Value = &v + return s +} + +type DisableAlarmActionsInput struct { + _ struct{} `type:"structure"` + + // The names of the alarms. + // + // AlarmNames is a required field + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAlarmActionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableAlarmActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableAlarmActionsInput"} + if s.AlarmNames == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlarmNames sets the AlarmNames field's value. +func (s *DisableAlarmActionsInput) SetAlarmNames(v []*string) *DisableAlarmActionsInput { + s.AlarmNames = v + return s +} + +type DisableAlarmActionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableAlarmActionsOutput) GoString() string { + return s.String() +} + +type DisableInsightRulesInput struct { + _ struct{} `type:"structure"` + + // An array of the rule names to disable. If you need to find out the names + // of your rules, use DescribeInsightRules (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html). + // + // RuleNames is a required field + RuleNames []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableInsightRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableInsightRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableInsightRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableInsightRulesInput"} + if s.RuleNames == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleNames sets the RuleNames field's value. +func (s *DisableInsightRulesInput) SetRuleNames(v []*string) *DisableInsightRulesInput { + s.RuleNames = v + return s +} + +type DisableInsightRulesOutput struct { + _ struct{} `type:"structure"` + + // An array listing the rules that could not be disabled. You cannot disable + // built-in rules. + Failures []*PartialFailure `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableInsightRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DisableInsightRulesOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *DisableInsightRulesOutput) SetFailures(v []*PartialFailure) *DisableInsightRulesOutput { + s.Failures = v + return s +} + +type EnableAlarmActionsInput struct { + _ struct{} `type:"structure"` + + // The names of the alarms. + // + // AlarmNames is a required field + AlarmNames []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAlarmActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAlarmActionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableAlarmActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableAlarmActionsInput"} + if s.AlarmNames == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlarmNames sets the AlarmNames field's value. +func (s *EnableAlarmActionsInput) SetAlarmNames(v []*string) *EnableAlarmActionsInput { + s.AlarmNames = v + return s +} + +type EnableAlarmActionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAlarmActionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableAlarmActionsOutput) GoString() string { + return s.String() +} + +type EnableInsightRulesInput struct { + _ struct{} `type:"structure"` + + // An array of the rule names to enable. If you need to find out the names of + // your rules, use DescribeInsightRules (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html). + // + // RuleNames is a required field + RuleNames []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableInsightRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableInsightRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableInsightRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableInsightRulesInput"} + if s.RuleNames == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleNames sets the RuleNames field's value. +func (s *EnableInsightRulesInput) SetRuleNames(v []*string) *EnableInsightRulesInput { + s.RuleNames = v + return s +} + +type EnableInsightRulesOutput struct { + _ struct{} `type:"structure"` + + // An array listing the rules that could not be enabled. You cannot disable + // or enable built-in rules. + Failures []*PartialFailure `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableInsightRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EnableInsightRulesOutput) GoString() string { + return s.String() +} + +// SetFailures sets the Failures field's value. +func (s *EnableInsightRulesOutput) SetFailures(v []*PartialFailure) *EnableInsightRulesOutput { + s.Failures = v + return s +} + +type Entity struct { + _ struct{} `type:"structure"` + + Attributes map[string]*string `type:"map"` + + KeyAttributes map[string]*string `min:"2" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Entity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Entity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Entity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Entity"} + if s.KeyAttributes != nil && len(s.KeyAttributes) < 2 { + invalidParams.Add(request.NewErrParamMinLen("KeyAttributes", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *Entity) SetAttributes(v map[string]*string) *Entity { + s.Attributes = v + return s +} + +// SetKeyAttributes sets the KeyAttributes field's value. +func (s *Entity) SetKeyAttributes(v map[string]*string) *Entity { + s.KeyAttributes = v + return s +} + +type EntityMetricData struct { + _ struct{} `type:"structure"` + + Entity *Entity `type:"structure"` + + MetricData []*MetricDatum `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EntityMetricData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EntityMetricData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EntityMetricData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EntityMetricData"} + if s.Entity != nil { + if err := s.Entity.Validate(); err != nil { + invalidParams.AddNested("Entity", err.(request.ErrInvalidParams)) + } + } + if s.MetricData != nil { + for i, v := range s.MetricData { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricData", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntity sets the Entity field's value. +func (s *EntityMetricData) SetEntity(v *Entity) *EntityMetricData { + s.Entity = v + return s +} + +// SetMetricData sets the MetricData field's value. +func (s *EntityMetricData) SetMetricData(v []*MetricDatum) *EntityMetricData { + s.MetricData = v + return s +} + +type GetDashboardInput struct { + _ struct{} `type:"structure"` + + // The name of the dashboard to be described. + // + // DashboardName is a required field + DashboardName *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDashboardInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDashboardInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDashboardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDashboardInput"} + if s.DashboardName == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDashboardName sets the DashboardName field's value. +func (s *GetDashboardInput) SetDashboardName(v string) *GetDashboardInput { + s.DashboardName = &v + return s +} + +type GetDashboardOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dashboard. + DashboardArn *string `type:"string"` + + // The detailed information about the dashboard, including what widgets are + // included and their location on the dashboard. For more information about + // the DashboardBody syntax, see Dashboard Body Structure and Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html). + DashboardBody *string `type:"string"` + + // The name of the dashboard. + DashboardName *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDashboardOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDashboardOutput) GoString() string { + return s.String() +} + +// SetDashboardArn sets the DashboardArn field's value. +func (s *GetDashboardOutput) SetDashboardArn(v string) *GetDashboardOutput { + s.DashboardArn = &v + return s +} + +// SetDashboardBody sets the DashboardBody field's value. +func (s *GetDashboardOutput) SetDashboardBody(v string) *GetDashboardOutput { + s.DashboardBody = &v + return s +} + +// SetDashboardName sets the DashboardName field's value. +func (s *GetDashboardOutput) SetDashboardName(v string) *GetDashboardOutput { + s.DashboardName = &v + return s +} + +type GetInsightRuleReportInput struct { + _ struct{} `type:"structure"` + + // The end time of the data to use in the report. When used in a raw HTTP Query + // API, it is formatted as yyyy-MM-dd'T'HH:mm:ss. For example, 2019-07-01T23:59:59. + // + // EndTime is a required field + EndTime *time.Time `type:"timestamp" required:"true"` + + // The maximum number of contributors to include in the report. The range is + // 1 to 100. If you omit this, the default of 10 is used. + MaxContributorCount *int64 `type:"integer"` + + // Specifies which metrics to use for aggregation of contributor values for + // the report. You can specify one or more of the following metrics: + // + // * UniqueContributors -- the number of unique contributors for each data + // point. + // + // * MaxContributorValue -- the value of the top contributor for each data + // point. The identity of the contributor might change for each data point + // in the graph. If this rule aggregates by COUNT, the top contributor for + // each data point is the contributor with the most occurrences in that period. + // If the rule aggregates by SUM, the top contributor is the contributor + // with the highest sum in the log field specified by the rule's Value, during + // that period. + // + // * SampleCount -- the number of data points matched by the rule. + // + // * Sum -- the sum of the values from all contributors during the time period + // represented by that data point. + // + // * Minimum -- the minimum value from a single observation during the time + // period represented by that data point. + // + // * Maximum -- the maximum value from a single observation during the time + // period represented by that data point. + // + // * Average -- the average value from all contributors during the time period + // represented by that data point. + Metrics []*string `type:"list"` + + // Determines what statistic to use to rank the contributors. Valid values are + // Sum and Maximum. + OrderBy *string `min:"1" type:"string"` + + // The period, in seconds, to use for the statistics in the InsightRuleMetricDatapoint + // results. + // + // Period is a required field + Period *int64 `min:"1" type:"integer" required:"true"` + + // The name of the rule that you want to see data from. + // + // RuleName is a required field + RuleName *string `min:"1" type:"string" required:"true"` + + // The start time of the data to use in the report. When used in a raw HTTP + // Query API, it is formatted as yyyy-MM-dd'T'HH:mm:ss. For example, 2019-07-01T23:59:59. + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInsightRuleReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInsightRuleReportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInsightRuleReportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInsightRuleReportInput"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.OrderBy != nil && len(*s.OrderBy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OrderBy", 1)) + } + if s.Period == nil { + invalidParams.Add(request.NewErrParamRequired("Period")) + } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *GetInsightRuleReportInput) SetEndTime(v time.Time) *GetInsightRuleReportInput { + s.EndTime = &v + return s +} + +// SetMaxContributorCount sets the MaxContributorCount field's value. +func (s *GetInsightRuleReportInput) SetMaxContributorCount(v int64) *GetInsightRuleReportInput { + s.MaxContributorCount = &v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *GetInsightRuleReportInput) SetMetrics(v []*string) *GetInsightRuleReportInput { + s.Metrics = v + return s +} + +// SetOrderBy sets the OrderBy field's value. +func (s *GetInsightRuleReportInput) SetOrderBy(v string) *GetInsightRuleReportInput { + s.OrderBy = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *GetInsightRuleReportInput) SetPeriod(v int64) *GetInsightRuleReportInput { + s.Period = &v + return s +} + +// SetRuleName sets the RuleName field's value. +func (s *GetInsightRuleReportInput) SetRuleName(v string) *GetInsightRuleReportInput { + s.RuleName = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetInsightRuleReportInput) SetStartTime(v time.Time) *GetInsightRuleReportInput { + s.StartTime = &v + return s +} + +type GetInsightRuleReportOutput struct { + _ struct{} `type:"structure"` + + // The sum of the values from all individual contributors that match the rule. + AggregateValue *float64 `type:"double"` + + // Specifies whether this rule aggregates contributor data by COUNT or SUM. + AggregationStatistic *string `type:"string"` + + // An approximate count of the unique contributors found by this rule in this + // time period. + ApproximateUniqueCount *int64 `type:"long"` + + // An array of the unique contributors found by this rule in this time period. + // If the rule contains multiple keys, each combination of values for the keys + // counts as a unique contributor. + Contributors []*InsightRuleContributor `type:"list"` + + // An array of the strings used as the keys for this rule. The keys are the + // dimensions used to classify contributors. If the rule contains more than + // one key, then each unique combination of values for the keys is counted as + // a unique contributor. + KeyLabels []*string `type:"list"` + + // A time series of metric data points that matches the time period in the rule + // request. + MetricDatapoints []*InsightRuleMetricDatapoint `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInsightRuleReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetInsightRuleReportOutput) GoString() string { + return s.String() +} + +// SetAggregateValue sets the AggregateValue field's value. +func (s *GetInsightRuleReportOutput) SetAggregateValue(v float64) *GetInsightRuleReportOutput { + s.AggregateValue = &v + return s +} + +// SetAggregationStatistic sets the AggregationStatistic field's value. +func (s *GetInsightRuleReportOutput) SetAggregationStatistic(v string) *GetInsightRuleReportOutput { + s.AggregationStatistic = &v + return s +} + +// SetApproximateUniqueCount sets the ApproximateUniqueCount field's value. +func (s *GetInsightRuleReportOutput) SetApproximateUniqueCount(v int64) *GetInsightRuleReportOutput { + s.ApproximateUniqueCount = &v + return s +} + +// SetContributors sets the Contributors field's value. +func (s *GetInsightRuleReportOutput) SetContributors(v []*InsightRuleContributor) *GetInsightRuleReportOutput { + s.Contributors = v + return s +} + +// SetKeyLabels sets the KeyLabels field's value. +func (s *GetInsightRuleReportOutput) SetKeyLabels(v []*string) *GetInsightRuleReportOutput { + s.KeyLabels = v + return s +} + +// SetMetricDatapoints sets the MetricDatapoints field's value. +func (s *GetInsightRuleReportOutput) SetMetricDatapoints(v []*InsightRuleMetricDatapoint) *GetInsightRuleReportOutput { + s.MetricDatapoints = v + return s +} + +type GetMetricDataInput struct { + _ struct{} `type:"structure"` + + // The time stamp indicating the latest data to be returned. + // + // The value specified is exclusive; results include data points up to the specified + // time stamp. + // + // For better performance, specify StartTime and EndTime values that align with + // the value of the metric's Period and sync up with the beginning and end of + // an hour. For example, if the Period of a metric is 5 minutes, specifying + // 12:05 or 12:30 as EndTime can get a faster response from CloudWatch than + // setting 12:07 or 12:29 as the EndTime. + // + // EndTime is a required field + EndTime *time.Time `type:"timestamp" required:"true"` + + // This structure includes the Timezone parameter, which you can use to specify + // your time zone so that the labels of returned data display the correct time + // for your time zone. + LabelOptions *LabelOptions `type:"structure"` + + // The maximum number of data points the request should return before paginating. + // If you omit this, the default of 100,800 is used. + MaxDatapoints *int64 `type:"integer"` + + // The metric queries to be returned. A single GetMetricData call can include + // as many as 500 MetricDataQuery structures. Each of these structures can specify + // either a metric to retrieve, a Metrics Insights query, or a math expression + // to perform on retrieved data. + // + // MetricDataQueries is a required field + MetricDataQueries []*MetricDataQuery `type:"list" required:"true"` + + // Include this value, if it was returned by the previous GetMetricData operation, + // to get the next set of data points. + NextToken *string `type:"string"` + + // The order in which data points should be returned. TimestampDescending returns + // the newest data first and paginates when the MaxDatapoints limit is reached. + // TimestampAscending returns the oldest data first and paginates when the MaxDatapoints + // limit is reached. + // + // If you omit this parameter, the default of TimestampDescending is used. + ScanBy *string `type:"string" enum:"ScanBy"` + + // The time stamp indicating the earliest data to be returned. + // + // The value specified is inclusive; results include data points with the specified + // time stamp. + // + // CloudWatch rounds the specified time stamp as follows: + // + // * Start time less than 15 days ago - Round down to the nearest whole minute. + // For example, 12:32:34 is rounded down to 12:32:00. + // + // * Start time between 15 and 63 days ago - Round down to the nearest 5-minute + // clock interval. For example, 12:32:34 is rounded down to 12:30:00. + // + // * Start time greater than 63 days ago - Round down to the nearest 1-hour + // clock interval. For example, 12:32:34 is rounded down to 12:00:00. + // + // If you set Period to 5, 10, or 30, the start time of your request is rounded + // down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions + // of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for + // the previous 10-second period, the start time of your request is rounded + // down and you receive data from 01:05:10 to 01:05:20. If you make a query + // at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, + // you receive data timestamped between 15:02:15 and 15:07:15. + // + // For better performance, specify StartTime and EndTime values that align with + // the value of the metric's Period and sync up with the beginning and end of + // an hour. For example, if the Period of a metric is 5 minutes, specifying + // 12:05 or 12:30 as StartTime can get a faster response from CloudWatch than + // setting 12:07 or 12:29 as the StartTime. + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMetricDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMetricDataInput"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.MetricDataQueries == nil { + invalidParams.Add(request.NewErrParamRequired("MetricDataQueries")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + if s.MetricDataQueries != nil { + for i, v := range s.MetricDataQueries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricDataQueries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *GetMetricDataInput) SetEndTime(v time.Time) *GetMetricDataInput { + s.EndTime = &v + return s +} + +// SetLabelOptions sets the LabelOptions field's value. +func (s *GetMetricDataInput) SetLabelOptions(v *LabelOptions) *GetMetricDataInput { + s.LabelOptions = v + return s +} + +// SetMaxDatapoints sets the MaxDatapoints field's value. +func (s *GetMetricDataInput) SetMaxDatapoints(v int64) *GetMetricDataInput { + s.MaxDatapoints = &v + return s +} + +// SetMetricDataQueries sets the MetricDataQueries field's value. +func (s *GetMetricDataInput) SetMetricDataQueries(v []*MetricDataQuery) *GetMetricDataInput { + s.MetricDataQueries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetMetricDataInput) SetNextToken(v string) *GetMetricDataInput { + s.NextToken = &v + return s +} + +// SetScanBy sets the ScanBy field's value. +func (s *GetMetricDataInput) SetScanBy(v string) *GetMetricDataInput { + s.ScanBy = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetMetricDataInput) SetStartTime(v time.Time) *GetMetricDataInput { + s.StartTime = &v + return s +} + +type GetMetricDataOutput struct { + _ struct{} `type:"structure"` + + // Contains a message about this GetMetricData operation, if the operation results + // in such a message. An example of a message that might be returned is Maximum + // number of allowed metrics exceeded. If there is a message, as much of the + // operation as possible is still executed. + // + // A message appears here only if it is related to the global GetMetricData + // operation. Any message about a specific metric returned by the operation + // appears in the MetricDataResult object returned for that metric. + Messages []*MessageData `type:"list"` + + // The metrics that are returned, including the metric name, namespace, and + // dimensions. + MetricDataResults []*MetricDataResult `type:"list"` + + // A token that marks the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricDataOutput) GoString() string { + return s.String() +} + +// SetMessages sets the Messages field's value. +func (s *GetMetricDataOutput) SetMessages(v []*MessageData) *GetMetricDataOutput { + s.Messages = v + return s +} + +// SetMetricDataResults sets the MetricDataResults field's value. +func (s *GetMetricDataOutput) SetMetricDataResults(v []*MetricDataResult) *GetMetricDataOutput { + s.MetricDataResults = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetMetricDataOutput) SetNextToken(v string) *GetMetricDataOutput { + s.NextToken = &v + return s +} + +type GetMetricStatisticsInput struct { + _ struct{} `type:"structure"` + + // The dimensions. If the metric contains multiple dimensions, you must include + // a value for each dimension. CloudWatch treats each unique combination of + // dimensions as a separate metric. If a specific combination of dimensions + // was not published, you can't retrieve statistics for it. You must specify + // the same dimensions that were used when the metrics were created. For an + // example, see Dimension Combinations (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#dimension-combinations) + // in the Amazon CloudWatch User Guide. For more information about specifying + // dimensions, see Publishing Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) + // in the Amazon CloudWatch User Guide. + Dimensions []*Dimension `type:"list"` + + // The time stamp that determines the last data point to return. + // + // The value specified is exclusive; results include data points up to the specified + // time stamp. In a raw HTTP query, the time stamp must be in ISO 8601 UTC format + // (for example, 2016-10-10T23:00:00Z). + // + // EndTime is a required field + EndTime *time.Time `type:"timestamp" required:"true"` + + // The percentile statistics. Specify values between p0.0 and p100. When calling + // GetMetricStatistics, you must specify either Statistics or ExtendedStatistics, + // but not both. Percentile statistics are not available for metrics when any + // of the metric values are negative numbers. + ExtendedStatistics []*string `min:"1" type:"list"` + + // The name of the metric, with or without spaces. + // + // MetricName is a required field + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric, with or without spaces. + // + // Namespace is a required field + Namespace *string `min:"1" type:"string" required:"true"` + + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData call that includes a StorageResolution of 1 second. + // + // If the StartTime parameter specifies a time stamp that is greater than 3 + // hours ago, you must specify the period as follows or no data points in that + // time range is returned: + // + // * Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds + // (1 minute). + // + // * Start time between 15 and 63 days ago - Use a multiple of 300 seconds + // (5 minutes). + // + // * Start time greater than 63 days ago - Use a multiple of 3600 seconds + // (1 hour). + // + // Period is a required field + Period *int64 `min:"1" type:"integer" required:"true"` + + // The time stamp that determines the first data point to return. Start times + // are evaluated relative to the time that CloudWatch receives the request. + // + // The value specified is inclusive; results include data points with the specified + // time stamp. In a raw HTTP query, the time stamp must be in ISO 8601 UTC format + // (for example, 2016-10-03T23:00:00Z). + // + // CloudWatch rounds the specified time stamp as follows: + // + // * Start time less than 15 days ago - Round down to the nearest whole minute. + // For example, 12:32:34 is rounded down to 12:32:00. + // + // * Start time between 15 and 63 days ago - Round down to the nearest 5-minute + // clock interval. For example, 12:32:34 is rounded down to 12:30:00. + // + // * Start time greater than 63 days ago - Round down to the nearest 1-hour + // clock interval. For example, 12:32:34 is rounded down to 12:00:00. + // + // If you set Period to 5, 10, or 30, the start time of your request is rounded + // down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions + // of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for + // the previous 10-second period, the start time of your request is rounded + // down and you receive data from 01:05:10 to 01:05:20. If you make a query + // at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, + // you receive data timestamped between 15:02:15 and 15:07:15. + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" required:"true"` + + // The metric statistics, other than percentile. For percentile statistics, + // use ExtendedStatistics. When calling GetMetricStatistics, you must specify + // either Statistics or ExtendedStatistics, but not both. + Statistics []*string `min:"1" type:"list" enum:"Statistic"` + + // The unit for a given metric. If you omit Unit, all data that was collected + // with any unit is returned, along with the corresponding units that were specified + // when the data was reported to CloudWatch. If you specify a unit, the operation + // returns only data that was collected with that unit specified. If you specify + // a unit that does not match the data collected, the results of the operation + // are null. CloudWatch does not perform unit conversions. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricStatisticsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMetricStatisticsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMetricStatisticsInput"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.ExtendedStatistics != nil && len(s.ExtendedStatistics) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExtendedStatistics", 1)) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Period == nil { + invalidParams.Add(request.NewErrParamRequired("Period")) + } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + if s.Statistics != nil && len(s.Statistics) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statistics", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *GetMetricStatisticsInput) SetDimensions(v []*Dimension) *GetMetricStatisticsInput { + s.Dimensions = v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *GetMetricStatisticsInput) SetEndTime(v time.Time) *GetMetricStatisticsInput { + s.EndTime = &v + return s +} + +// SetExtendedStatistics sets the ExtendedStatistics field's value. +func (s *GetMetricStatisticsInput) SetExtendedStatistics(v []*string) *GetMetricStatisticsInput { + s.ExtendedStatistics = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *GetMetricStatisticsInput) SetMetricName(v string) *GetMetricStatisticsInput { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *GetMetricStatisticsInput) SetNamespace(v string) *GetMetricStatisticsInput { + s.Namespace = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *GetMetricStatisticsInput) SetPeriod(v int64) *GetMetricStatisticsInput { + s.Period = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *GetMetricStatisticsInput) SetStartTime(v time.Time) *GetMetricStatisticsInput { + s.StartTime = &v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *GetMetricStatisticsInput) SetStatistics(v []*string) *GetMetricStatisticsInput { + s.Statistics = v + return s +} + +// SetUnit sets the Unit field's value. +func (s *GetMetricStatisticsInput) SetUnit(v string) *GetMetricStatisticsInput { + s.Unit = &v + return s +} + +type GetMetricStatisticsOutput struct { + _ struct{} `type:"structure"` + + // The data points for the specified metric. + Datapoints []*Datapoint `type:"list"` + + // A label for the specified metric. + Label *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricStatisticsOutput) GoString() string { + return s.String() +} + +// SetDatapoints sets the Datapoints field's value. +func (s *GetMetricStatisticsOutput) SetDatapoints(v []*Datapoint) *GetMetricStatisticsOutput { + s.Datapoints = v + return s +} + +// SetLabel sets the Label field's value. +func (s *GetMetricStatisticsOutput) SetLabel(v string) *GetMetricStatisticsOutput { + s.Label = &v + return s +} + +type GetMetricStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the metric stream to retrieve information about. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMetricStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMetricStreamInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetMetricStreamInput) SetName(v string) *GetMetricStreamInput { + s.Name = &v + return s +} + +type GetMetricStreamOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the metric stream. + Arn *string `min:"1" type:"string"` + + // The date that the metric stream was created. + CreationDate *time.Time `type:"timestamp"` + + // If this array of metric namespaces is present, then these namespaces are + // the only metric namespaces that are not streamed by this metric stream. In + // this case, all other metric namespaces in the account are streamed by this + // metric stream. + ExcludeFilters []*MetricStreamFilter `type:"list"` + + // The ARN of the Amazon Kinesis Data Firehose delivery stream that is used + // by this metric stream. + FirehoseArn *string `min:"1" type:"string"` + + // If this array of metric namespaces is present, then these namespaces are + // the only metric namespaces that are streamed by this metric stream. + IncludeFilters []*MetricStreamFilter `type:"list"` + + // If this is true and this metric stream is in a monitoring account, then the + // stream includes metrics from source accounts that the monitoring account + // is linked to. + IncludeLinkedAccountsMetrics *bool `type:"boolean"` + + // The date of the most recent update to the metric stream's configuration. + LastUpdateDate *time.Time `type:"timestamp"` + + // The name of the metric stream. + Name *string `min:"1" type:"string"` + + // The output format for the stream. Valid values are json, opentelemetry1.0, + // and opentelemetry0.7. For more information about metric stream output formats, + // see Metric streams output formats (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html). + OutputFormat *string `min:"1" type:"string"` + + // The ARN of the IAM role that is used by this metric stream. + RoleArn *string `min:"1" type:"string"` + + // The state of the metric stream. The possible values are running and stopped. + State *string `type:"string"` + + // Each entry in this array displays information about one or more metrics that + // include additional statistics in the metric stream. For more information + // about the additional statistics, see CloudWatch statistics definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). + StatisticsConfigurations []*MetricStreamStatisticsConfiguration `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricStreamOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *GetMetricStreamOutput) SetArn(v string) *GetMetricStreamOutput { + s.Arn = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *GetMetricStreamOutput) SetCreationDate(v time.Time) *GetMetricStreamOutput { + s.CreationDate = &v + return s +} + +// SetExcludeFilters sets the ExcludeFilters field's value. +func (s *GetMetricStreamOutput) SetExcludeFilters(v []*MetricStreamFilter) *GetMetricStreamOutput { + s.ExcludeFilters = v + return s +} + +// SetFirehoseArn sets the FirehoseArn field's value. +func (s *GetMetricStreamOutput) SetFirehoseArn(v string) *GetMetricStreamOutput { + s.FirehoseArn = &v + return s +} + +// SetIncludeFilters sets the IncludeFilters field's value. +func (s *GetMetricStreamOutput) SetIncludeFilters(v []*MetricStreamFilter) *GetMetricStreamOutput { + s.IncludeFilters = v + return s +} + +// SetIncludeLinkedAccountsMetrics sets the IncludeLinkedAccountsMetrics field's value. +func (s *GetMetricStreamOutput) SetIncludeLinkedAccountsMetrics(v bool) *GetMetricStreamOutput { + s.IncludeLinkedAccountsMetrics = &v + return s +} + +// SetLastUpdateDate sets the LastUpdateDate field's value. +func (s *GetMetricStreamOutput) SetLastUpdateDate(v time.Time) *GetMetricStreamOutput { + s.LastUpdateDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetMetricStreamOutput) SetName(v string) *GetMetricStreamOutput { + s.Name = &v + return s +} + +// SetOutputFormat sets the OutputFormat field's value. +func (s *GetMetricStreamOutput) SetOutputFormat(v string) *GetMetricStreamOutput { + s.OutputFormat = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *GetMetricStreamOutput) SetRoleArn(v string) *GetMetricStreamOutput { + s.RoleArn = &v + return s +} + +// SetState sets the State field's value. +func (s *GetMetricStreamOutput) SetState(v string) *GetMetricStreamOutput { + s.State = &v + return s +} + +// SetStatisticsConfigurations sets the StatisticsConfigurations field's value. +func (s *GetMetricStreamOutput) SetStatisticsConfigurations(v []*MetricStreamStatisticsConfiguration) *GetMetricStreamOutput { + s.StatisticsConfigurations = v + return s +} + +type GetMetricWidgetImageInput struct { + _ struct{} `type:"structure"` + + // A JSON string that defines the bitmap graph to be retrieved. The string includes + // the metrics to include in the graph, statistics, annotations, title, axis + // limits, and so on. You can include only one MetricWidget parameter in each + // GetMetricWidgetImage call. + // + // For more information about the syntax of MetricWidget see GetMetricWidgetImage: + // Metric Widget Structure and Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Metric-Widget-Structure.html). + // + // If any metric on the graph could not load all the requested data points, + // an orange triangle with an exclamation point appears next to the graph legend. + // + // MetricWidget is a required field + MetricWidget *string `type:"string" required:"true"` + + // The format of the resulting image. Only PNG images are supported. + // + // The default is png. If you specify png, the API returns an HTTP response + // with the content-type set to text/xml. The image data is in a MetricWidgetImage + // field. For example: + // + // > + // + // + // + // + // + // iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip... + // + // + // + // + // + // + // + // 6f0d4192-4d42-11e8-82c1-f539a07e0e3b + // + // + // + // + // + // The image/png setting is intended only for custom HTTP requests. For most + // use cases, and all actions using an Amazon Web Services SDK, you should use + // png. If you specify image/png, the HTTP response has a content-type set to + // image/png, and the body of the response is a PNG image. + OutputFormat *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricWidgetImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricWidgetImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMetricWidgetImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMetricWidgetImageInput"} + if s.MetricWidget == nil { + invalidParams.Add(request.NewErrParamRequired("MetricWidget")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetricWidget sets the MetricWidget field's value. +func (s *GetMetricWidgetImageInput) SetMetricWidget(v string) *GetMetricWidgetImageInput { + s.MetricWidget = &v + return s +} + +// SetOutputFormat sets the OutputFormat field's value. +func (s *GetMetricWidgetImageInput) SetOutputFormat(v string) *GetMetricWidgetImageInput { + s.OutputFormat = &v + return s +} + +type GetMetricWidgetImageOutput struct { + _ struct{} `type:"structure"` + + // The image of the graph, in the output format specified. The output is base64-encoded. + // MetricWidgetImage is automatically base64 encoded/decoded by the SDK. + MetricWidgetImage []byte `type:"blob"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricWidgetImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetMetricWidgetImageOutput) GoString() string { + return s.String() +} + +// SetMetricWidgetImage sets the MetricWidgetImage field's value. +func (s *GetMetricWidgetImageOutput) SetMetricWidgetImage(v []byte) *GetMetricWidgetImageOutput { + s.MetricWidgetImage = v + return s +} + +// This structure contains the definition for a Contributor Insights rule. For +// more information about this rule, see Using Constributor Insights to analyze +// high-cardinality data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html) +// in the Amazon CloudWatch User Guide. +type InsightRule struct { + _ struct{} `type:"structure"` + + // The definition of the rule, as a JSON object. The definition contains the + // keywords used to define contributors, the value to aggregate on if this rule + // returns a sum instead of a count, and the filters. For details on the valid + // syntax, see Contributor Insights Rule Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html). + // + // Definition is a required field + Definition *string `min:"1" type:"string" required:"true"` + + DeleteAt *time.Time `type:"timestamp"` + + DisableAt *time.Time `type:"timestamp"` + + // An optional built-in rule that Amazon Web Services manages. + ManagedRule *bool `type:"boolean"` + + // The name of the rule. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // For rules that you create, this is always {"Name": "CloudWatchLogRule", "Version": + // 1}. For managed rules, this is {"Name": "ServiceLogRule", "Version": 1} + // + // Schema is a required field + Schema *string `type:"string" required:"true"` + + // Indicates whether the rule is enabled or disabled. + // + // State is a required field + State *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InsightRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InsightRule) GoString() string { + return s.String() +} + +// SetDefinition sets the Definition field's value. +func (s *InsightRule) SetDefinition(v string) *InsightRule { + s.Definition = &v + return s +} + +// SetDeleteAt sets the DeleteAt field's value. +func (s *InsightRule) SetDeleteAt(v time.Time) *InsightRule { + s.DeleteAt = &v + return s +} + +// SetDisableAt sets the DisableAt field's value. +func (s *InsightRule) SetDisableAt(v time.Time) *InsightRule { + s.DisableAt = &v + return s +} + +// SetManagedRule sets the ManagedRule field's value. +func (s *InsightRule) SetManagedRule(v bool) *InsightRule { + s.ManagedRule = &v + return s +} + +// SetName sets the Name field's value. +func (s *InsightRule) SetName(v string) *InsightRule { + s.Name = &v + return s +} + +// SetSchema sets the Schema field's value. +func (s *InsightRule) SetSchema(v string) *InsightRule { + s.Schema = &v + return s +} + +// SetState sets the State field's value. +func (s *InsightRule) SetState(v string) *InsightRule { + s.State = &v + return s +} + +// One of the unique contributors found by a Contributor Insights rule. If the +// rule contains multiple keys, then a unique contributor is a unique combination +// of values from all the keys in the rule. +// +// If the rule contains a single key, then each unique contributor is each unique +// value for this key. +// +// For more information, see GetInsightRuleReport (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html). +type InsightRuleContributor struct { + _ struct{} `type:"structure"` + + // An approximation of the aggregate value that comes from this contributor. + // + // ApproximateAggregateValue is a required field + ApproximateAggregateValue *float64 `type:"double" required:"true"` + + // An array of the data points where this contributor is present. Only the data + // points when this contributor appeared are included in the array. + // + // Datapoints is a required field + Datapoints []*InsightRuleContributorDatapoint `type:"list" required:"true"` + + // One of the log entry field keywords that is used to define contributors for + // this rule. + // + // Keys is a required field + Keys []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InsightRuleContributor) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InsightRuleContributor) GoString() string { + return s.String() +} + +// SetApproximateAggregateValue sets the ApproximateAggregateValue field's value. +func (s *InsightRuleContributor) SetApproximateAggregateValue(v float64) *InsightRuleContributor { + s.ApproximateAggregateValue = &v + return s +} + +// SetDatapoints sets the Datapoints field's value. +func (s *InsightRuleContributor) SetDatapoints(v []*InsightRuleContributorDatapoint) *InsightRuleContributor { + s.Datapoints = v + return s +} + +// SetKeys sets the Keys field's value. +func (s *InsightRuleContributor) SetKeys(v []*string) *InsightRuleContributor { + s.Keys = v + return s +} + +// One data point related to one contributor. +// +// For more information, see GetInsightRuleReport (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html) +// and InsightRuleContributor (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_InsightRuleContributor.html). +type InsightRuleContributorDatapoint struct { + _ struct{} `type:"structure"` + + // The approximate value that this contributor added during this timestamp. + // + // ApproximateValue is a required field + ApproximateValue *float64 `type:"double" required:"true"` + + // The timestamp of the data point. + // + // Timestamp is a required field + Timestamp *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InsightRuleContributorDatapoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InsightRuleContributorDatapoint) GoString() string { + return s.String() +} + +// SetApproximateValue sets the ApproximateValue field's value. +func (s *InsightRuleContributorDatapoint) SetApproximateValue(v float64) *InsightRuleContributorDatapoint { + s.ApproximateValue = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *InsightRuleContributorDatapoint) SetTimestamp(v time.Time) *InsightRuleContributorDatapoint { + s.Timestamp = &v + return s +} + +// One data point from the metric time series returned in a Contributor Insights +// rule report. +// +// For more information, see GetInsightRuleReport (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetInsightRuleReport.html). +type InsightRuleMetricDatapoint struct { + _ struct{} `type:"structure"` + + // The average value from all contributors during the time period represented + // by that data point. + // + // This statistic is returned only if you included it in the Metrics array in + // your request. + Average *float64 `type:"double"` + + // The maximum value provided by one contributor during this timestamp. Each + // timestamp is evaluated separately, so the identity of the max contributor + // could be different for each timestamp. + // + // This statistic is returned only if you included it in the Metrics array in + // your request. + MaxContributorValue *float64 `type:"double"` + + // The maximum value from a single occurence from a single contributor during + // the time period represented by that data point. + // + // This statistic is returned only if you included it in the Metrics array in + // your request. + Maximum *float64 `type:"double"` + + // The minimum value from a single contributor during the time period represented + // by that data point. + // + // This statistic is returned only if you included it in the Metrics array in + // your request. + Minimum *float64 `type:"double"` + + // The number of occurrences that matched the rule during this data point. + // + // This statistic is returned only if you included it in the Metrics array in + // your request. + SampleCount *float64 `type:"double"` + + // The sum of the values from all contributors during the time period represented + // by that data point. + // + // This statistic is returned only if you included it in the Metrics array in + // your request. + Sum *float64 `type:"double"` + + // The timestamp of the data point. + // + // Timestamp is a required field + Timestamp *time.Time `type:"timestamp" required:"true"` + + // The number of unique contributors who published data during this timestamp. + // + // This statistic is returned only if you included it in the Metrics array in + // your request. + UniqueContributors *float64 `type:"double"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InsightRuleMetricDatapoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InsightRuleMetricDatapoint) GoString() string { + return s.String() +} + +// SetAverage sets the Average field's value. +func (s *InsightRuleMetricDatapoint) SetAverage(v float64) *InsightRuleMetricDatapoint { + s.Average = &v + return s +} + +// SetMaxContributorValue sets the MaxContributorValue field's value. +func (s *InsightRuleMetricDatapoint) SetMaxContributorValue(v float64) *InsightRuleMetricDatapoint { + s.MaxContributorValue = &v + return s +} + +// SetMaximum sets the Maximum field's value. +func (s *InsightRuleMetricDatapoint) SetMaximum(v float64) *InsightRuleMetricDatapoint { + s.Maximum = &v + return s +} + +// SetMinimum sets the Minimum field's value. +func (s *InsightRuleMetricDatapoint) SetMinimum(v float64) *InsightRuleMetricDatapoint { + s.Minimum = &v + return s +} + +// SetSampleCount sets the SampleCount field's value. +func (s *InsightRuleMetricDatapoint) SetSampleCount(v float64) *InsightRuleMetricDatapoint { + s.SampleCount = &v + return s +} + +// SetSum sets the Sum field's value. +func (s *InsightRuleMetricDatapoint) SetSum(v float64) *InsightRuleMetricDatapoint { + s.Sum = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *InsightRuleMetricDatapoint) SetTimestamp(v time.Time) *InsightRuleMetricDatapoint { + s.Timestamp = &v + return s +} + +// SetUniqueContributors sets the UniqueContributors field's value. +func (s *InsightRuleMetricDatapoint) SetUniqueContributors(v float64) *InsightRuleMetricDatapoint { + s.UniqueContributors = &v + return s +} + +// This structure includes the Timezone parameter, which you can use to specify +// your time zone so that the labels that are associated with returned metrics +// display the correct time for your time zone. +// +// The Timezone value affects a label only if you have a time-based dynamic +// expression in the label. For more information about dynamic expressions in +// labels, see Using Dynamic Labels (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html). +type LabelOptions struct { + _ struct{} `type:"structure"` + + // The time zone to use for metric data return in this operation. The format + // is + or - followed by four digits. The first two digits indicate the number + // of hours ahead or behind of UTC, and the final two digits are the number + // of minutes. For example, +0130 indicates a time zone that is 1 hour and 30 + // minutes ahead of UTC. The default is +0000. + Timezone *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LabelOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LabelOptions) GoString() string { + return s.String() +} + +// SetTimezone sets the Timezone field's value. +func (s *LabelOptions) SetTimezone(v string) *LabelOptions { + s.Timezone = &v + return s +} + +type ListDashboardsInput struct { + _ struct{} `type:"structure"` + + // If you specify this parameter, only the dashboards with names starting with + // the specified string are listed. The maximum length is 255, and valid characters + // are A-Z, a-z, 0-9, ".", "-", and "_". + DashboardNamePrefix *string `type:"string"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDashboardsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDashboardsInput) GoString() string { + return s.String() +} + +// SetDashboardNamePrefix sets the DashboardNamePrefix field's value. +func (s *ListDashboardsInput) SetDashboardNamePrefix(v string) *ListDashboardsInput { + s.DashboardNamePrefix = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDashboardsInput) SetNextToken(v string) *ListDashboardsInput { + s.NextToken = &v + return s +} + +type ListDashboardsOutput struct { + _ struct{} `type:"structure"` + + // The list of matching dashboards. + DashboardEntries []*DashboardEntry `type:"list"` + + // The token that marks the start of the next batch of returned results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDashboardsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDashboardsOutput) GoString() string { + return s.String() +} + +// SetDashboardEntries sets the DashboardEntries field's value. +func (s *ListDashboardsOutput) SetDashboardEntries(v []*DashboardEntry) *ListDashboardsOutput { + s.DashboardEntries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDashboardsOutput) SetNextToken(v string) *ListDashboardsOutput { + s.NextToken = &v + return s +} + +type ListMetricStreamsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in one operation. + MaxResults *int64 `min:"1" type:"integer"` + + // Include this value, if it was returned by the previous call, to get the next + // set of metric streams. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMetricStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMetricStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMetricStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMetricStreamsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListMetricStreamsInput) SetMaxResults(v int64) *ListMetricStreamsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMetricStreamsInput) SetNextToken(v string) *ListMetricStreamsInput { + s.NextToken = &v + return s +} + +type ListMetricStreamsOutput struct { + _ struct{} `type:"structure"` + + // The array of metric stream information. + Entries []*MetricStreamEntry `type:"list"` + + // The token that marks the start of the next batch of returned results. You + // can use this token in a subsequent operation to get the next batch of results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMetricStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMetricStreamsOutput) GoString() string { + return s.String() +} + +// SetEntries sets the Entries field's value. +func (s *ListMetricStreamsOutput) SetEntries(v []*MetricStreamEntry) *ListMetricStreamsOutput { + s.Entries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMetricStreamsOutput) SetNextToken(v string) *ListMetricStreamsOutput { + s.NextToken = &v + return s +} + +type ListMetricsInput struct { + _ struct{} `type:"structure"` + + // The dimensions to filter against. Only the dimensions that match exactly + // will be returned. + Dimensions []*DimensionFilter `type:"list"` + + // If you are using this operation in a monitoring account, specify true to + // include metrics from source accounts in the returned data. + // + // The default is false. + IncludeLinkedAccounts *bool `type:"boolean"` + + // The name of the metric to filter against. Only the metrics with names that + // match exactly will be returned. + MetricName *string `min:"1" type:"string"` + + // The metric namespace to filter against. Only the namespace that matches exactly + // will be returned. + Namespace *string `min:"1" type:"string"` + + // The token returned by a previous call to indicate that there is more data + // available. + NextToken *string `type:"string"` + + // When you use this operation in a monitoring account, use this field to return + // metrics only from one source account. To do so, specify that source account + // ID in this field, and also specify true for IncludeLinkedAccounts. + OwningAccount *string `min:"1" type:"string"` + + // To filter the results to show only metrics that have had data points published + // in the past three hours, specify this parameter with a value of PT3H. This + // is the only valid value for this parameter. + // + // The results that are returned are an approximation of the value you specify. + // There is a low probability that the returned results include metrics with + // last published data as much as 40 minutes more than the specified time interval. + RecentlyActive *string `type:"string" enum:"RecentlyActive"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMetricsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMetricsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMetricsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMetricsInput"} + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.OwningAccount != nil && len(*s.OwningAccount) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OwningAccount", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *ListMetricsInput) SetDimensions(v []*DimensionFilter) *ListMetricsInput { + s.Dimensions = v + return s +} + +// SetIncludeLinkedAccounts sets the IncludeLinkedAccounts field's value. +func (s *ListMetricsInput) SetIncludeLinkedAccounts(v bool) *ListMetricsInput { + s.IncludeLinkedAccounts = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *ListMetricsInput) SetMetricName(v string) *ListMetricsInput { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *ListMetricsInput) SetNamespace(v string) *ListMetricsInput { + s.Namespace = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMetricsInput) SetNextToken(v string) *ListMetricsInput { + s.NextToken = &v + return s +} + +// SetOwningAccount sets the OwningAccount field's value. +func (s *ListMetricsInput) SetOwningAccount(v string) *ListMetricsInput { + s.OwningAccount = &v + return s +} + +// SetRecentlyActive sets the RecentlyActive field's value. +func (s *ListMetricsInput) SetRecentlyActive(v string) *ListMetricsInput { + s.RecentlyActive = &v + return s +} + +type ListMetricsOutput struct { + _ struct{} `type:"structure"` + + // The metrics that match your request. + Metrics []*Metric `type:"list"` + + // The token that marks the start of the next batch of returned results. + NextToken *string `type:"string"` + + // If you are using this operation in a monitoring account, this array contains + // the account IDs of the source accounts where the metrics in the returned + // data are from. + // + // This field is a 1:1 mapping between each metric that is returned and the + // ID of the owning account. + OwningAccounts []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMetricsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMetricsOutput) GoString() string { + return s.String() +} + +// SetMetrics sets the Metrics field's value. +func (s *ListMetricsOutput) SetMetrics(v []*Metric) *ListMetricsOutput { + s.Metrics = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMetricsOutput) SetNextToken(v string) *ListMetricsOutput { + s.NextToken = &v + return s +} + +// SetOwningAccounts sets the OwningAccounts field's value. +func (s *ListMetricsOutput) SetOwningAccounts(v []*string) *ListMetricsOutput { + s.OwningAccounts = v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the CloudWatch resource that you want to view tags for. + // + // The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // + // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name + // + // For more information about ARN format, see Resource Types Defined by Amazon + // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) + // in the Amazon Web Services General Reference. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The list of tag keys and values associated with the resource you specified. + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// A message returned by the GetMetricDataAPI, including a code and a description. +// +// If a cross-Region GetMetricData operation fails with a code of Forbidden +// and a value of Authentication too complex to retrieve cross region data, +// you can correct the problem by running the GetMetricData operation in the +// same Region where the metric data is. +type MessageData struct { + _ struct{} `type:"structure"` + + // The error code or status code associated with the message. + Code *string `type:"string"` + + // The message text. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MessageData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MessageData) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *MessageData) SetCode(v string) *MessageData { + s.Code = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MessageData) SetValue(v string) *MessageData { + s.Value = &v + return s +} + +// Represents a specific metric. +type Metric struct { + _ struct{} `type:"structure"` + + // The dimensions for the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. This is a required field. + MetricName *string `min:"1" type:"string"` + + // The namespace of the metric. + Namespace *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Metric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Metric) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metric) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metric"} + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *Metric) SetDimensions(v []*Dimension) *Metric { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *Metric) SetMetricName(v string) *Metric { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *Metric) SetNamespace(v string) *Metric { + s.Namespace = &v + return s +} + +// The details about a metric alarm. +type MetricAlarm struct { + _ struct{} `type:"structure"` + + // Indicates whether actions should be executed during any changes to the alarm + // state. + ActionsEnabled *bool `type:"boolean"` + + // The actions to execute when this alarm transitions to the ALARM state from + // any other state. Each action is specified as an Amazon Resource Name (ARN). + AlarmActions []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the alarm. + AlarmArn *string `min:"1" type:"string"` + + // The time stamp of the last update to the alarm configuration. + AlarmConfigurationUpdatedTimestamp *time.Time `type:"timestamp"` + + // The description of the alarm. + AlarmDescription *string `type:"string"` + + // The name of the alarm. + AlarmName *string `min:"1" type:"string"` + + // The arithmetic operation to use when comparing the specified statistic and + // threshold. The specified statistic value is used as the first operand. + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // The number of data points that must be breaching to trigger the alarm. + DatapointsToAlarm *int64 `min:"1" type:"integer"` + + // The dimensions for the metric associated with the alarm. + Dimensions []*Dimension `type:"list"` + + // Used only for alarms based on percentiles. If ignore, the alarm state does + // not change during periods with too few data points to be statistically significant. + // If evaluate or this parameter is not used, the alarm is always evaluated + // and possibly changes state no matter how many data points are available. + EvaluateLowSampleCountPercentile *string `min:"1" type:"string"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `min:"1" type:"integer"` + + // The percentile statistic for the metric associated with the alarm. Specify + // a value between p0.0 and p100. + ExtendedStatistic *string `type:"string"` + + // The actions to execute when this alarm transitions to the INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Name (ARN). + InsufficientDataActions []*string `type:"list"` + + // The name of the metric associated with the alarm, if this is an alarm based + // on a single metric. + MetricName *string `min:"1" type:"string"` + + // An array of MetricDataQuery structures, used in an alarm based on a metric + // math expression. Each structure either retrieves a metric or performs a math + // expression. One item in the Metrics array is the math expression that the + // alarm watches. This expression by designated by having ReturnData set to + // true. + Metrics []*MetricDataQuery `type:"list"` + + // The namespace of the metric associated with the alarm. + Namespace *string `min:"1" type:"string"` + + // The actions to execute when this alarm transitions to the OK state from any + // other state. Each action is specified as an Amazon Resource Name (ARN). + OKActions []*string `type:"list"` + + // The period, in seconds, over which the statistic is applied. + Period *int64 `min:"1" type:"integer"` + + // An explanation for the alarm state, in text format. + StateReason *string `type:"string"` + + // An explanation for the alarm state, in JSON format. + StateReasonData *string `type:"string"` + + // The date and time that the alarm's StateValue most recently changed. + StateTransitionedTimestamp *time.Time `type:"timestamp"` + + // The time stamp of the last update to the value of either the StateValue or + // EvaluationState parameters. + StateUpdatedTimestamp *time.Time `type:"timestamp"` + + // The state value for the alarm. + StateValue *string `type:"string" enum:"StateValue"` + + // The statistic for the metric associated with the alarm, other than percentile. + // For percentile statistics, use ExtendedStatistic. + Statistic *string `type:"string" enum:"Statistic"` + + // The value to compare with the specified statistic. + Threshold *float64 `type:"double"` + + // In an alarm based on an anomaly detection model, this is the ID of the ANOMALY_DETECTION_BAND + // function used as the threshold for the alarm. + ThresholdMetricId *string `min:"1" type:"string"` + + // Sets how this alarm is to handle missing data points. The valid values are + // breaching, notBreaching, ignore, and missing. For more information, see Configuring + // how CloudWatch alarms treat missing data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data). + // + // If this parameter is omitted, the default behavior of missing is used. + TreatMissingData *string `min:"1" type:"string"` + + // The unit of the metric associated with the alarm. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricAlarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricAlarm) GoString() string { + return s.String() +} + +// SetActionsEnabled sets the ActionsEnabled field's value. +func (s *MetricAlarm) SetActionsEnabled(v bool) *MetricAlarm { + s.ActionsEnabled = &v + return s +} + +// SetAlarmActions sets the AlarmActions field's value. +func (s *MetricAlarm) SetAlarmActions(v []*string) *MetricAlarm { + s.AlarmActions = v + return s +} + +// SetAlarmArn sets the AlarmArn field's value. +func (s *MetricAlarm) SetAlarmArn(v string) *MetricAlarm { + s.AlarmArn = &v + return s +} + +// SetAlarmConfigurationUpdatedTimestamp sets the AlarmConfigurationUpdatedTimestamp field's value. +func (s *MetricAlarm) SetAlarmConfigurationUpdatedTimestamp(v time.Time) *MetricAlarm { + s.AlarmConfigurationUpdatedTimestamp = &v + return s +} + +// SetAlarmDescription sets the AlarmDescription field's value. +func (s *MetricAlarm) SetAlarmDescription(v string) *MetricAlarm { + s.AlarmDescription = &v + return s +} + +// SetAlarmName sets the AlarmName field's value. +func (s *MetricAlarm) SetAlarmName(v string) *MetricAlarm { + s.AlarmName = &v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *MetricAlarm) SetComparisonOperator(v string) *MetricAlarm { + s.ComparisonOperator = &v + return s +} + +// SetDatapointsToAlarm sets the DatapointsToAlarm field's value. +func (s *MetricAlarm) SetDatapointsToAlarm(v int64) *MetricAlarm { + s.DatapointsToAlarm = &v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *MetricAlarm) SetDimensions(v []*Dimension) *MetricAlarm { + s.Dimensions = v + return s +} + +// SetEvaluateLowSampleCountPercentile sets the EvaluateLowSampleCountPercentile field's value. +func (s *MetricAlarm) SetEvaluateLowSampleCountPercentile(v string) *MetricAlarm { + s.EvaluateLowSampleCountPercentile = &v + return s +} + +// SetEvaluationPeriods sets the EvaluationPeriods field's value. +func (s *MetricAlarm) SetEvaluationPeriods(v int64) *MetricAlarm { + s.EvaluationPeriods = &v + return s +} + +// SetExtendedStatistic sets the ExtendedStatistic field's value. +func (s *MetricAlarm) SetExtendedStatistic(v string) *MetricAlarm { + s.ExtendedStatistic = &v + return s +} + +// SetInsufficientDataActions sets the InsufficientDataActions field's value. +func (s *MetricAlarm) SetInsufficientDataActions(v []*string) *MetricAlarm { + s.InsufficientDataActions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *MetricAlarm) SetMetricName(v string) *MetricAlarm { + s.MetricName = &v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *MetricAlarm) SetMetrics(v []*MetricDataQuery) *MetricAlarm { + s.Metrics = v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *MetricAlarm) SetNamespace(v string) *MetricAlarm { + s.Namespace = &v + return s +} + +// SetOKActions sets the OKActions field's value. +func (s *MetricAlarm) SetOKActions(v []*string) *MetricAlarm { + s.OKActions = v + return s +} + +// SetPeriod sets the Period field's value. +func (s *MetricAlarm) SetPeriod(v int64) *MetricAlarm { + s.Period = &v + return s +} + +// SetStateReason sets the StateReason field's value. +func (s *MetricAlarm) SetStateReason(v string) *MetricAlarm { + s.StateReason = &v + return s +} + +// SetStateReasonData sets the StateReasonData field's value. +func (s *MetricAlarm) SetStateReasonData(v string) *MetricAlarm { + s.StateReasonData = &v + return s +} + +// SetStateTransitionedTimestamp sets the StateTransitionedTimestamp field's value. +func (s *MetricAlarm) SetStateTransitionedTimestamp(v time.Time) *MetricAlarm { + s.StateTransitionedTimestamp = &v + return s +} + +// SetStateUpdatedTimestamp sets the StateUpdatedTimestamp field's value. +func (s *MetricAlarm) SetStateUpdatedTimestamp(v time.Time) *MetricAlarm { + s.StateUpdatedTimestamp = &v + return s +} + +// SetStateValue sets the StateValue field's value. +func (s *MetricAlarm) SetStateValue(v string) *MetricAlarm { + s.StateValue = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *MetricAlarm) SetStatistic(v string) *MetricAlarm { + s.Statistic = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *MetricAlarm) SetThreshold(v float64) *MetricAlarm { + s.Threshold = &v + return s +} + +// SetThresholdMetricId sets the ThresholdMetricId field's value. +func (s *MetricAlarm) SetThresholdMetricId(v string) *MetricAlarm { + s.ThresholdMetricId = &v + return s +} + +// SetTreatMissingData sets the TreatMissingData field's value. +func (s *MetricAlarm) SetTreatMissingData(v string) *MetricAlarm { + s.TreatMissingData = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *MetricAlarm) SetUnit(v string) *MetricAlarm { + s.Unit = &v + return s +} + +// This object includes parameters that you can use to provide information to +// CloudWatch to help it build more accurate anomaly detection models. +type MetricCharacteristics struct { + _ struct{} `type:"structure"` + + // Set this parameter to true if values for this metric consistently include + // spikes that should not be considered to be anomalies. With this set to true, + // CloudWatch will expect to see spikes that occurred consistently during the + // model training period, and won't flag future similar spikes as anomalies. + PeriodicSpikes *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricCharacteristics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricCharacteristics) GoString() string { + return s.String() +} + +// SetPeriodicSpikes sets the PeriodicSpikes field's value. +func (s *MetricCharacteristics) SetPeriodicSpikes(v bool) *MetricCharacteristics { + s.PeriodicSpikes = &v + return s +} + +// This structure is used in both GetMetricData and PutMetricAlarm. The supported +// use of this structure is different for those two operations. +// +// When used in GetMetricData, it indicates the metric data to return, and whether +// this call is just retrieving a batch set of data for one metric, or is performing +// a Metrics Insights query or a math expression. A single GetMetricData call +// can include up to 500 MetricDataQuery structures. +// +// When used in PutMetricAlarm, it enables you to create an alarm based on a +// metric math expression. Each MetricDataQuery in the array specifies either +// a metric to retrieve, or a math expression to be performed on retrieved metrics. +// A single PutMetricAlarm call can include up to 20 MetricDataQuery structures +// in the array. The 20 structures can include as many as 10 structures that +// contain a MetricStat parameter to retrieve a metric, and as many as 10 structures +// that contain the Expression parameter to perform a math expression. Of those +// Expression structures, one must have true as the value for ReturnData. The +// result of this expression is the value the alarm watches. +// +// Any expression used in a PutMetricAlarm operation must return a single time +// series. For more information, see Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax) +// in the Amazon CloudWatch User Guide. +// +// Some of the parameters of this structure also have different uses whether +// you are using this structure in a GetMetricData operation or a PutMetricAlarm +// operation. These differences are explained in the following parameter list. +type MetricDataQuery struct { + _ struct{} `type:"structure"` + + // The ID of the account where the metrics are located. + // + // If you are performing a GetMetricData operation in a monitoring account, + // use this to specify which account to retrieve this metric from. + // + // If you are performing a PutMetricAlarm operation, use this to specify which + // account contains the metric that the alarm is watching. + AccountId *string `min:"1" type:"string"` + + // This field can contain either a Metrics Insights query, or a metric math + // expression to be performed on the returned data. For more information about + // Metrics Insights queries, see Metrics Insights query components and syntax + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch-metrics-insights-querylanguage) + // in the Amazon CloudWatch User Guide. + // + // A math expression can use the Id of the other metrics or queries to refer + // to those metrics, and can also use the Id of other expressions to use the + // result of those expressions. For more information about metric math expressions, + // see Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax) + // in the Amazon CloudWatch User Guide. + // + // Within each MetricDataQuery object, you must specify either Expression or + // MetricStat but not both. + Expression *string `min:"1" type:"string"` + + // A short name used to tie this object to the results in the response. This + // name must be unique within a single call to GetMetricData. If you are performing + // math expressions on this set of data, this name represents that data and + // can serve as a variable in the mathematical expression. The valid characters + // are letters, numbers, and underscore. The first character must be a lowercase + // letter. + // + // Id is a required field + Id *string `min:"1" type:"string" required:"true"` + + // A human-readable label for this metric or expression. This is especially + // useful if this is an expression, so that you know what the value represents. + // If the metric or expression is shown in a CloudWatch dashboard widget, the + // label is shown. If Label is omitted, CloudWatch generates a default. + // + // You can put dynamic expressions into a label, so that it is more descriptive. + // For more information, see Using Dynamic Labels (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html). + Label *string `type:"string"` + + // The metric to be returned, along with statistics, period, and units. Use + // this parameter only if this object is retrieving a metric and not performing + // a math expression on returned data. + // + // Within one MetricDataQuery object, you must specify either Expression or + // MetricStat but not both. + MetricStat *MetricStat `type:"structure"` + + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData operation that includes a StorageResolution of 1 second. + Period *int64 `min:"1" type:"integer"` + + // When used in GetMetricData, this option indicates whether to return the timestamps + // and raw data values of this metric. If you are performing this call just + // to do math expressions and do not also need the raw data returned, you can + // specify false. If you omit this, the default of true is used. + // + // When used in PutMetricAlarm, specify true for the one expression result to + // use as the alarm. For all other metrics and expressions in the same PutMetricAlarm + // operation, specify ReturnData as False. + ReturnData *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricDataQuery) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricDataQuery) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricDataQuery) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricDataQuery"} + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Expression != nil && len(*s.Expression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Expression", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } + if s.MetricStat != nil { + if err := s.MetricStat.Validate(); err != nil { + invalidParams.AddNested("MetricStat", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *MetricDataQuery) SetAccountId(v string) *MetricDataQuery { + s.AccountId = &v + return s +} + +// SetExpression sets the Expression field's value. +func (s *MetricDataQuery) SetExpression(v string) *MetricDataQuery { + s.Expression = &v + return s +} + +// SetId sets the Id field's value. +func (s *MetricDataQuery) SetId(v string) *MetricDataQuery { + s.Id = &v + return s +} + +// SetLabel sets the Label field's value. +func (s *MetricDataQuery) SetLabel(v string) *MetricDataQuery { + s.Label = &v + return s +} + +// SetMetricStat sets the MetricStat field's value. +func (s *MetricDataQuery) SetMetricStat(v *MetricStat) *MetricDataQuery { + s.MetricStat = v + return s +} + +// SetPeriod sets the Period field's value. +func (s *MetricDataQuery) SetPeriod(v int64) *MetricDataQuery { + s.Period = &v + return s +} + +// SetReturnData sets the ReturnData field's value. +func (s *MetricDataQuery) SetReturnData(v bool) *MetricDataQuery { + s.ReturnData = &v + return s +} + +// A GetMetricData call returns an array of MetricDataResult structures. Each +// of these structures includes the data points for that metric, along with +// the timestamps of those data points and other identifying information. +type MetricDataResult struct { + _ struct{} `type:"structure"` + + // The short name you specified to represent this metric. + Id *string `min:"1" type:"string"` + + // The human-readable label associated with the data. + Label *string `type:"string"` + + // A list of messages with additional information about the data returned. + Messages []*MessageData `type:"list"` + + // The status of the returned data. Complete indicates that all data points + // in the requested time range were returned. PartialData means that an incomplete + // set of data points were returned. You can use the NextToken value that was + // returned and repeat your request to get more data points. NextToken is not + // returned if you are performing a math expression. InternalError indicates + // that an error occurred. Retry your request using NextToken, if present. + StatusCode *string `type:"string" enum:"StatusCode"` + + // The timestamps for the data points, formatted in Unix timestamp format. The + // number of timestamps always matches the number of values and the value for + // Timestamps[x] is Values[x]. + Timestamps []*time.Time `type:"list"` + + // The data points for the metric corresponding to Timestamps. The number of + // values always matches the number of timestamps and the timestamp for Values[x] + // is Timestamps[x]. + Values []*float64 `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricDataResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricDataResult) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *MetricDataResult) SetId(v string) *MetricDataResult { + s.Id = &v + return s +} + +// SetLabel sets the Label field's value. +func (s *MetricDataResult) SetLabel(v string) *MetricDataResult { + s.Label = &v + return s +} + +// SetMessages sets the Messages field's value. +func (s *MetricDataResult) SetMessages(v []*MessageData) *MetricDataResult { + s.Messages = v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *MetricDataResult) SetStatusCode(v string) *MetricDataResult { + s.StatusCode = &v + return s +} + +// SetTimestamps sets the Timestamps field's value. +func (s *MetricDataResult) SetTimestamps(v []*time.Time) *MetricDataResult { + s.Timestamps = v + return s +} + +// SetValues sets the Values field's value. +func (s *MetricDataResult) SetValues(v []*float64) *MetricDataResult { + s.Values = v + return s +} + +// Encapsulates the information sent to either create a metric or add new values +// to be aggregated into an existing metric. +type MetricDatum struct { + _ struct{} `type:"structure"` + + // Array of numbers that is used along with the Values array. Each number in + // the Count array is the number of times the corresponding value in the Values + // array occurred during the period. + // + // If you omit the Counts array, the default of 1 is used as the value for each + // count. If you include a Counts array, it must include the same amount of + // values as the Values array. + Counts []*float64 `type:"list"` + + // The dimensions associated with the metric. + Dimensions []*Dimension `type:"list"` + + // The name of the metric. + // + // MetricName is a required field + MetricName *string `min:"1" type:"string" required:"true"` + + // The statistical values for the metric. + StatisticValues *StatisticSet `type:"structure"` + + // Valid values are 1 and 60. Setting this to 1 specifies this metric as a high-resolution + // metric, so that CloudWatch stores the metric with sub-minute resolution down + // to one second. Setting this to 60 specifies this metric as a regular-resolution + // metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution + // is available only for custom metrics. For more information about high-resolution + // metrics, see High-Resolution Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html#high-resolution-metrics) + // in the Amazon CloudWatch User Guide. + // + // This field is optional, if you do not specify it the default of 60 is used. + StorageResolution *int64 `min:"1" type:"integer"` + + // The time the metric data was received, expressed as the number of milliseconds + // since Jan 1, 1970 00:00:00 UTC. + Timestamp *time.Time `type:"timestamp"` + + // When you are using a Put operation, this defines what unit you want to use + // when storing the metric. + // + // In a Get operation, this displays the unit that is used for the metric. + Unit *string `type:"string" enum:"StandardUnit"` + + // The value for the metric. + // + // Although the parameter accepts numbers of type Double, CloudWatch rejects + // values that are either too small or too large. Values must be in the range + // of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, + // -Infinity) are not supported. + Value *float64 `type:"double"` + + // Array of numbers representing the values for the metric during the period. + // Each unique value is listed just once in this array, and the corresponding + // number in the Counts array specifies the number of times that value occurred + // during the period. You can include up to 150 unique values in each PutMetricData + // action that specifies a Values array. + // + // Although the Values array accepts numbers of type Double, CloudWatch rejects + // values that are either too small or too large. Values must be in the range + // of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, + // -Infinity) are not supported. + Values []*float64 `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricDatum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricDatum) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricDatum) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricDatum"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.StorageResolution != nil && *s.StorageResolution < 1 { + invalidParams.Add(request.NewErrParamMinValue("StorageResolution", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.StatisticValues != nil { + if err := s.StatisticValues.Validate(); err != nil { + invalidParams.AddNested("StatisticValues", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCounts sets the Counts field's value. +func (s *MetricDatum) SetCounts(v []*float64) *MetricDatum { + s.Counts = v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *MetricDatum) SetDimensions(v []*Dimension) *MetricDatum { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *MetricDatum) SetMetricName(v string) *MetricDatum { + s.MetricName = &v + return s +} + +// SetStatisticValues sets the StatisticValues field's value. +func (s *MetricDatum) SetStatisticValues(v *StatisticSet) *MetricDatum { + s.StatisticValues = v + return s +} + +// SetStorageResolution sets the StorageResolution field's value. +func (s *MetricDatum) SetStorageResolution(v int64) *MetricDatum { + s.StorageResolution = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *MetricDatum) SetTimestamp(v time.Time) *MetricDatum { + s.Timestamp = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *MetricDatum) SetUnit(v string) *MetricDatum { + s.Unit = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetricDatum) SetValue(v float64) *MetricDatum { + s.Value = &v + return s +} + +// SetValues sets the Values field's value. +func (s *MetricDatum) SetValues(v []*float64) *MetricDatum { + s.Values = v + return s +} + +// Indicates the CloudWatch math expression that provides the time series the +// anomaly detector uses as input. The designated math expression must return +// a single time series. +type MetricMathAnomalyDetector struct { + _ struct{} `type:"structure"` + + // An array of metric data query structures that enables you to create an anomaly + // detector based on the result of a metric math expression. Each item in MetricDataQueries + // gets a metric or performs a math expression. One item in MetricDataQueries + // is the expression that provides the time series that the anomaly detector + // uses as input. Designate the expression by setting ReturnData to true for + // this object in the array. For all other expressions and metrics, set ReturnData + // to false. The designated expression must return a single time series. + MetricDataQueries []*MetricDataQuery `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricMathAnomalyDetector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricMathAnomalyDetector) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricMathAnomalyDetector) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricMathAnomalyDetector"} + if s.MetricDataQueries != nil { + for i, v := range s.MetricDataQueries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricDataQueries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetricDataQueries sets the MetricDataQueries field's value. +func (s *MetricMathAnomalyDetector) SetMetricDataQueries(v []*MetricDataQuery) *MetricMathAnomalyDetector { + s.MetricDataQueries = v + return s +} + +// This structure defines the metric to be returned, along with the statistics, +// period, and units. +type MetricStat struct { + _ struct{} `type:"structure"` + + // The metric to return, including the metric name, namespace, and dimensions. + // + // Metric is a required field + Metric *Metric `type:"structure" required:"true"` + + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData call that includes a StorageResolution of 1 second. + // + // If the StartTime parameter specifies a time stamp that is greater than 3 + // hours ago, you must specify the period as follows or no data points in that + // time range is returned: + // + // * Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds + // (1 minute). + // + // * Start time between 15 and 63 days ago - Use a multiple of 300 seconds + // (5 minutes). + // + // * Start time greater than 63 days ago - Use a multiple of 3600 seconds + // (1 hour). + // + // Period is a required field + Period *int64 `min:"1" type:"integer" required:"true"` + + // The statistic to return. It can include any CloudWatch statistic or extended + // statistic. + // + // Stat is a required field + Stat *string `type:"string" required:"true"` + + // When you are using a Put operation, this defines what unit you want to use + // when storing the metric. + // + // In a Get operation, if you omit Unit then all data that was collected with + // any unit is returned, along with the corresponding units that were specified + // when the data was reported to CloudWatch. If you specify a unit, the operation + // returns only data that was collected with that unit specified. If you specify + // a unit that does not match the data collected, the results of the operation + // are null. CloudWatch does not perform unit conversions. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStat) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricStat) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricStat"} + if s.Metric == nil { + invalidParams.Add(request.NewErrParamRequired("Metric")) + } + if s.Period == nil { + invalidParams.Add(request.NewErrParamRequired("Period")) + } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } + if s.Stat == nil { + invalidParams.Add(request.NewErrParamRequired("Stat")) + } + if s.Metric != nil { + if err := s.Metric.Validate(); err != nil { + invalidParams.AddNested("Metric", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetric sets the Metric field's value. +func (s *MetricStat) SetMetric(v *Metric) *MetricStat { + s.Metric = v + return s +} + +// SetPeriod sets the Period field's value. +func (s *MetricStat) SetPeriod(v int64) *MetricStat { + s.Period = &v + return s +} + +// SetStat sets the Stat field's value. +func (s *MetricStat) SetStat(v string) *MetricStat { + s.Stat = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *MetricStat) SetUnit(v string) *MetricStat { + s.Unit = &v + return s +} + +// This structure contains the configuration information about one metric stream. +type MetricStreamEntry struct { + _ struct{} `type:"structure"` + + // The ARN of the metric stream. + Arn *string `min:"1" type:"string"` + + // The date that the metric stream was originally created. + CreationDate *time.Time `type:"timestamp"` + + // The ARN of the Kinesis Firehose devlivery stream that is used for this metric + // stream. + FirehoseArn *string `min:"1" type:"string"` + + // The date that the configuration of this metric stream was most recently updated. + LastUpdateDate *time.Time `type:"timestamp"` + + // The name of the metric stream. + Name *string `min:"1" type:"string"` + + // The output format of this metric stream. Valid values are json, opentelemetry1.0, + // and opentelemetry0.7. + OutputFormat *string `min:"1" type:"string"` + + // The current state of this stream. Valid values are running and stopped. + State *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStreamEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStreamEntry) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *MetricStreamEntry) SetArn(v string) *MetricStreamEntry { + s.Arn = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *MetricStreamEntry) SetCreationDate(v time.Time) *MetricStreamEntry { + s.CreationDate = &v + return s +} + +// SetFirehoseArn sets the FirehoseArn field's value. +func (s *MetricStreamEntry) SetFirehoseArn(v string) *MetricStreamEntry { + s.FirehoseArn = &v + return s +} + +// SetLastUpdateDate sets the LastUpdateDate field's value. +func (s *MetricStreamEntry) SetLastUpdateDate(v time.Time) *MetricStreamEntry { + s.LastUpdateDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *MetricStreamEntry) SetName(v string) *MetricStreamEntry { + s.Name = &v + return s +} + +// SetOutputFormat sets the OutputFormat field's value. +func (s *MetricStreamEntry) SetOutputFormat(v string) *MetricStreamEntry { + s.OutputFormat = &v + return s +} + +// SetState sets the State field's value. +func (s *MetricStreamEntry) SetState(v string) *MetricStreamEntry { + s.State = &v + return s +} + +// This structure contains a metric namespace and optionally, a list of metric +// names, to either include in a metric stream or exclude from a metric stream. +// +// A metric stream's filters can include up to 1000 total names. This limit +// applies to the sum of namespace names and metric names in the filters. For +// example, this could include 10 metric namespace filters with 99 metrics each, +// or 20 namespace filters with 49 metrics specified in each filter. +type MetricStreamFilter struct { + _ struct{} `type:"structure"` + + // The names of the metrics to either include or exclude from the metric stream. + // + // If you omit this parameter, all metrics in the namespace are included or + // excluded, depending on whether this filter is specified as an exclude filter + // or an include filter. + // + // Each metric name can contain only ASCII printable characters (ASCII range + // 32 through 126). Each metric name must contain at least one non-whitespace + // character. + MetricNames []*string `type:"list"` + + // The name of the metric namespace for this filter. + // + // The namespace can contain only ASCII printable characters (ASCII range 32 + // through 126). It must contain at least one non-whitespace character. + Namespace *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStreamFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStreamFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricStreamFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricStreamFilter"} + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetricNames sets the MetricNames field's value. +func (s *MetricStreamFilter) SetMetricNames(v []*string) *MetricStreamFilter { + s.MetricNames = v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *MetricStreamFilter) SetNamespace(v string) *MetricStreamFilter { + s.Namespace = &v + return s +} + +// By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT +// statistics for each metric that is streamed. This structure contains information +// for one metric that includes additional statistics in the stream. For more +// information about statistics, see CloudWatch, listed in CloudWatch statistics +// definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). +type MetricStreamStatisticsConfiguration struct { + _ struct{} `type:"structure"` + + // The list of additional statistics that are to be streamed for the metrics + // listed in the IncludeMetrics array in this structure. This list can include + // as many as 20 statistics. + // + // If the OutputFormat for the stream is opentelemetry1.0 or opentelemetry0.7, + // the only valid values are p?? percentile statistics such as p90, p99 and + // so on. + // + // If the OutputFormat for the stream is json, the valid values include the + // abbreviations for all of the statistics listed in CloudWatch statistics definitions + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). + // For example, this includes tm98, wm90, PR(:300), and so on. + // + // AdditionalStatistics is a required field + AdditionalStatistics []*string `type:"list" required:"true"` + + // An array of metric name and namespace pairs that stream the additional statistics + // listed in the value of the AdditionalStatistics parameter. There can be as + // many as 100 pairs in the array. + // + // All metrics that match the combination of metric name and namespace will + // be streamed with the additional statistics, no matter their dimensions. + // + // IncludeMetrics is a required field + IncludeMetrics []*MetricStreamStatisticsMetric `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStreamStatisticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStreamStatisticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricStreamStatisticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricStreamStatisticsConfiguration"} + if s.AdditionalStatistics == nil { + invalidParams.Add(request.NewErrParamRequired("AdditionalStatistics")) + } + if s.IncludeMetrics == nil { + invalidParams.Add(request.NewErrParamRequired("IncludeMetrics")) + } + if s.IncludeMetrics != nil { + for i, v := range s.IncludeMetrics { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "IncludeMetrics", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalStatistics sets the AdditionalStatistics field's value. +func (s *MetricStreamStatisticsConfiguration) SetAdditionalStatistics(v []*string) *MetricStreamStatisticsConfiguration { + s.AdditionalStatistics = v + return s +} + +// SetIncludeMetrics sets the IncludeMetrics field's value. +func (s *MetricStreamStatisticsConfiguration) SetIncludeMetrics(v []*MetricStreamStatisticsMetric) *MetricStreamStatisticsConfiguration { + s.IncludeMetrics = v + return s +} + +// This object contains the information for one metric that is to be streamed +// with additional statistics. +type MetricStreamStatisticsMetric struct { + _ struct{} `type:"structure"` + + // The name of the metric. + // + // MetricName is a required field + MetricName *string `min:"1" type:"string" required:"true"` + + // The namespace of the metric. + // + // Namespace is a required field + Namespace *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStreamStatisticsMetric) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricStreamStatisticsMetric) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricStreamStatisticsMetric) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricStreamStatisticsMetric"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetricName sets the MetricName field's value. +func (s *MetricStreamStatisticsMetric) SetMetricName(v string) *MetricStreamStatisticsMetric { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *MetricStreamStatisticsMetric) SetNamespace(v string) *MetricStreamStatisticsMetric { + s.Namespace = &v + return s +} + +// This array is empty if the API operation was successful for all the rules +// specified in the request. If the operation could not process one of the rules, +// the following data is returned for each of those rules. +type PartialFailure struct { + _ struct{} `type:"structure"` + + // The type of error. + ExceptionType *string `type:"string"` + + // The code of the error. + FailureCode *string `type:"string"` + + // A description of the error. + FailureDescription *string `type:"string"` + + // The specified rule that could not be deleted. + FailureResource *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartialFailure) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartialFailure) GoString() string { + return s.String() +} + +// SetExceptionType sets the ExceptionType field's value. +func (s *PartialFailure) SetExceptionType(v string) *PartialFailure { + s.ExceptionType = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *PartialFailure) SetFailureCode(v string) *PartialFailure { + s.FailureCode = &v + return s +} + +// SetFailureDescription sets the FailureDescription field's value. +func (s *PartialFailure) SetFailureDescription(v string) *PartialFailure { + s.FailureDescription = &v + return s +} + +// SetFailureResource sets the FailureResource field's value. +func (s *PartialFailure) SetFailureResource(v string) *PartialFailure { + s.FailureResource = &v + return s +} + +type PutAnomalyDetectorInput struct { + _ struct{} `type:"structure"` + + // The configuration specifies details about how the anomaly detection model + // is to be trained, including time ranges to exclude when training and updating + // the model. You can specify as many as 10 time ranges. + // + // The configuration can also include the time zone to use for the metric. + Configuration *AnomalyDetectorConfiguration `type:"structure"` + + // The metric dimensions to create the anomaly detection model for. + // + // Deprecated: Use SingleMetricAnomalyDetector. + Dimensions []*Dimension `deprecated:"true" type:"list"` + + // Use this object to include parameters to provide information about your metric + // to CloudWatch to help it build more accurate anomaly detection models. Currently, + // it includes the PeriodicSpikes parameter. + MetricCharacteristics *MetricCharacteristics `type:"structure"` + + // The metric math anomaly detector to be created. + // + // When using MetricMathAnomalyDetector, you cannot include the following parameters + // in the same operation: + // + // * Dimensions + // + // * MetricName + // + // * Namespace + // + // * Stat + // + // * the SingleMetricAnomalyDetector parameters of PutAnomalyDetectorInput + // + // Instead, specify the metric math anomaly detector attributes as part of the + // property MetricMathAnomalyDetector. + MetricMathAnomalyDetector *MetricMathAnomalyDetector `type:"structure"` + + // The name of the metric to create the anomaly detection model for. + // + // Deprecated: Use SingleMetricAnomalyDetector. + MetricName *string `min:"1" deprecated:"true" type:"string"` + + // The namespace of the metric to create the anomaly detection model for. + // + // Deprecated: Use SingleMetricAnomalyDetector. + Namespace *string `min:"1" deprecated:"true" type:"string"` + + // A single metric anomaly detector to be created. + // + // When using SingleMetricAnomalyDetector, you cannot include the following + // parameters in the same operation: + // + // * Dimensions + // + // * MetricName + // + // * Namespace + // + // * Stat + // + // * the MetricMathAnomalyDetector parameters of PutAnomalyDetectorInput + // + // Instead, specify the single metric anomaly detector attributes as part of + // the property SingleMetricAnomalyDetector. + SingleMetricAnomalyDetector *SingleMetricAnomalyDetector `type:"structure"` + + // The statistic to use for the metric and the anomaly detection model. + // + // Deprecated: Use SingleMetricAnomalyDetector. + Stat *string `deprecated:"true" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutAnomalyDetectorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutAnomalyDetectorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutAnomalyDetectorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAnomalyDetectorInput"} + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Configuration != nil { + if err := s.Configuration.Validate(); err != nil { + invalidParams.AddNested("Configuration", err.(request.ErrInvalidParams)) + } + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.MetricMathAnomalyDetector != nil { + if err := s.MetricMathAnomalyDetector.Validate(); err != nil { + invalidParams.AddNested("MetricMathAnomalyDetector", err.(request.ErrInvalidParams)) + } + } + if s.SingleMetricAnomalyDetector != nil { + if err := s.SingleMetricAnomalyDetector.Validate(); err != nil { + invalidParams.AddNested("SingleMetricAnomalyDetector", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfiguration sets the Configuration field's value. +func (s *PutAnomalyDetectorInput) SetConfiguration(v *AnomalyDetectorConfiguration) *PutAnomalyDetectorInput { + s.Configuration = v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *PutAnomalyDetectorInput) SetDimensions(v []*Dimension) *PutAnomalyDetectorInput { + s.Dimensions = v + return s +} + +// SetMetricCharacteristics sets the MetricCharacteristics field's value. +func (s *PutAnomalyDetectorInput) SetMetricCharacteristics(v *MetricCharacteristics) *PutAnomalyDetectorInput { + s.MetricCharacteristics = v + return s +} + +// SetMetricMathAnomalyDetector sets the MetricMathAnomalyDetector field's value. +func (s *PutAnomalyDetectorInput) SetMetricMathAnomalyDetector(v *MetricMathAnomalyDetector) *PutAnomalyDetectorInput { + s.MetricMathAnomalyDetector = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *PutAnomalyDetectorInput) SetMetricName(v string) *PutAnomalyDetectorInput { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *PutAnomalyDetectorInput) SetNamespace(v string) *PutAnomalyDetectorInput { + s.Namespace = &v + return s +} + +// SetSingleMetricAnomalyDetector sets the SingleMetricAnomalyDetector field's value. +func (s *PutAnomalyDetectorInput) SetSingleMetricAnomalyDetector(v *SingleMetricAnomalyDetector) *PutAnomalyDetectorInput { + s.SingleMetricAnomalyDetector = v + return s +} + +// SetStat sets the Stat field's value. +func (s *PutAnomalyDetectorInput) SetStat(v string) *PutAnomalyDetectorInput { + s.Stat = &v + return s +} + +type PutAnomalyDetectorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutAnomalyDetectorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutAnomalyDetectorOutput) GoString() string { + return s.String() +} + +type PutCompositeAlarmInput struct { + _ struct{} `type:"structure"` + + // Indicates whether actions should be executed during any changes to the alarm + // state of the composite alarm. The default is TRUE. + ActionsEnabled *bool `type:"boolean"` + + // Actions will be suppressed if the suppressor alarm is in the ALARM state. + // ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) from + // an existing alarm. + ActionsSuppressor *string `min:"1" type:"string"` + + // The maximum time in seconds that the composite alarm waits after suppressor + // alarm goes out of the ALARM state. After this time, the composite alarm performs + // its actions. + // + // ExtensionPeriod is required only when ActionsSuppressor is specified. + ActionsSuppressorExtensionPeriod *int64 `type:"integer"` + + // The maximum time in seconds that the composite alarm waits for the suppressor + // alarm to go into the ALARM state. After this time, the composite alarm performs + // its actions. + // + // WaitPeriod is required only when ActionsSuppressor is specified. + ActionsSuppressorWaitPeriod *int64 `type:"integer"` + + // The actions to execute when this alarm transitions to the ALARM state from + // any other state. Each action is specified as an Amazon Resource Name (ARN). + // + // Valid Values: ] + // + // Amazon SNS actions: + // + // arn:aws:sns:region:account-id:sns-topic-name + // + // Lambda actions: + // + // * Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + // + // * Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number + // + // * Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name + // + // Systems Manager actions: + // + // arn:aws:ssm:region:account-id:opsitem:severity + AlarmActions []*string `type:"list"` + + // The description for the composite alarm. + AlarmDescription *string `type:"string"` + + // The name for the composite alarm. This name must be unique within the Region. + // + // AlarmName is a required field + AlarmName *string `min:"1" type:"string" required:"true"` + + // An expression that specifies which other alarms are to be evaluated to determine + // this composite alarm's state. For each alarm that you reference, you designate + // a function that specifies whether that alarm needs to be in ALARM state, + // OK state, or INSUFFICIENT_DATA state. You can use operators (AND, OR and + // NOT) to combine multiple functions in a single expression. You can use parenthesis + // to logically group the functions in your expression. + // + // You can use either alarm names or ARNs to reference the other alarms that + // are to be evaluated. + // + // Functions can include the following: + // + // * ALARM("alarm-name or alarm-ARN") is TRUE if the named alarm is in ALARM + // state. + // + // * OK("alarm-name or alarm-ARN") is TRUE if the named alarm is in OK state. + // + // * INSUFFICIENT_DATA("alarm-name or alarm-ARN") is TRUE if the named alarm + // is in INSUFFICIENT_DATA state. + // + // * TRUE always evaluates to TRUE. + // + // * FALSE always evaluates to FALSE. + // + // TRUE and FALSE are useful for testing a complex AlarmRule structure, and + // for testing your alarm actions. + // + // Alarm names specified in AlarmRule can be surrounded with double-quotes ("), + // but do not have to be. + // + // The following are some examples of AlarmRule: + // + // * ALARM(CPUUtilizationTooHigh) AND ALARM(DiskReadOpsTooHigh) specifies + // that the composite alarm goes into ALARM state only if both CPUUtilizationTooHigh + // and DiskReadOpsTooHigh alarms are in ALARM state. + // + // * ALARM(CPUUtilizationTooHigh) AND NOT ALARM(DeploymentInProgress) specifies + // that the alarm goes to ALARM state if CPUUtilizationTooHigh is in ALARM + // state and DeploymentInProgress is not in ALARM state. This example reduces + // alarm noise during a known deployment window. + // + // * (ALARM(CPUUtilizationTooHigh) OR ALARM(DiskReadOpsTooHigh)) AND OK(NetworkOutTooHigh) + // goes into ALARM state if CPUUtilizationTooHigh OR DiskReadOpsTooHigh is + // in ALARM state, and if NetworkOutTooHigh is in OK state. This provides + // another example of using a composite alarm to prevent noise. This rule + // ensures that you are not notified with an alarm action on high CPU or + // disk usage if a known network problem is also occurring. + // + // The AlarmRule can specify as many as 100 "children" alarms. The AlarmRule + // expression can have as many as 500 elements. Elements are child alarms, TRUE + // or FALSE statements, and parentheses. + // + // AlarmRule is a required field + AlarmRule *string `min:"1" type:"string" required:"true"` + + // The actions to execute when this alarm transitions to the INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Name (ARN). + // + // Valid Values: ] + // + // Amazon SNS actions: + // + // arn:aws:sns:region:account-id:sns-topic-name + // + // Lambda actions: + // + // * Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + // + // * Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number + // + // * Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name + InsufficientDataActions []*string `type:"list"` + + // The actions to execute when this alarm transitions to an OK state from any + // other state. Each action is specified as an Amazon Resource Name (ARN). + // + // Valid Values: ] + // + // Amazon SNS actions: + // + // arn:aws:sns:region:account-id:sns-topic-name + // + // Lambda actions: + // + // * Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + // + // * Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number + // + // * Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name + OKActions []*string `type:"list"` + + // A list of key-value pairs to associate with the alarm. You can associate + // as many as 50 tags with an alarm. To be able to associate tags with the alarm + // when you create the alarm, you must have the cloudwatch:TagResource permission. + // + // Tags can help you organize and categorize your resources. You can also use + // them to scope user permissions by granting a user permission to access or + // change only resources with certain tag values. + // + // If you are using this operation to update an existing alarm, any tags you + // specify in this parameter are ignored. To change the tags of an existing + // alarm, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) + // or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html). + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutCompositeAlarmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutCompositeAlarmInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutCompositeAlarmInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutCompositeAlarmInput"} + if s.ActionsSuppressor != nil && len(*s.ActionsSuppressor) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ActionsSuppressor", 1)) + } + if s.AlarmName == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmName")) + } + if s.AlarmName != nil && len(*s.AlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmName", 1)) + } + if s.AlarmRule == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmRule")) + } + if s.AlarmRule != nil && len(*s.AlarmRule) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmRule", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionsEnabled sets the ActionsEnabled field's value. +func (s *PutCompositeAlarmInput) SetActionsEnabled(v bool) *PutCompositeAlarmInput { + s.ActionsEnabled = &v + return s +} + +// SetActionsSuppressor sets the ActionsSuppressor field's value. +func (s *PutCompositeAlarmInput) SetActionsSuppressor(v string) *PutCompositeAlarmInput { + s.ActionsSuppressor = &v + return s +} + +// SetActionsSuppressorExtensionPeriod sets the ActionsSuppressorExtensionPeriod field's value. +func (s *PutCompositeAlarmInput) SetActionsSuppressorExtensionPeriod(v int64) *PutCompositeAlarmInput { + s.ActionsSuppressorExtensionPeriod = &v + return s +} + +// SetActionsSuppressorWaitPeriod sets the ActionsSuppressorWaitPeriod field's value. +func (s *PutCompositeAlarmInput) SetActionsSuppressorWaitPeriod(v int64) *PutCompositeAlarmInput { + s.ActionsSuppressorWaitPeriod = &v + return s +} + +// SetAlarmActions sets the AlarmActions field's value. +func (s *PutCompositeAlarmInput) SetAlarmActions(v []*string) *PutCompositeAlarmInput { + s.AlarmActions = v + return s +} + +// SetAlarmDescription sets the AlarmDescription field's value. +func (s *PutCompositeAlarmInput) SetAlarmDescription(v string) *PutCompositeAlarmInput { + s.AlarmDescription = &v + return s +} + +// SetAlarmName sets the AlarmName field's value. +func (s *PutCompositeAlarmInput) SetAlarmName(v string) *PutCompositeAlarmInput { + s.AlarmName = &v + return s +} + +// SetAlarmRule sets the AlarmRule field's value. +func (s *PutCompositeAlarmInput) SetAlarmRule(v string) *PutCompositeAlarmInput { + s.AlarmRule = &v + return s +} + +// SetInsufficientDataActions sets the InsufficientDataActions field's value. +func (s *PutCompositeAlarmInput) SetInsufficientDataActions(v []*string) *PutCompositeAlarmInput { + s.InsufficientDataActions = v + return s +} + +// SetOKActions sets the OKActions field's value. +func (s *PutCompositeAlarmInput) SetOKActions(v []*string) *PutCompositeAlarmInput { + s.OKActions = v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutCompositeAlarmInput) SetTags(v []*Tag) *PutCompositeAlarmInput { + s.Tags = v + return s +} + +type PutCompositeAlarmOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutCompositeAlarmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutCompositeAlarmOutput) GoString() string { + return s.String() +} + +type PutDashboardInput struct { + _ struct{} `type:"structure"` + + // The detailed information about the dashboard in JSON format, including the + // widgets to include and their location on the dashboard. This parameter is + // required. + // + // For more information about the syntax, see Dashboard Body Structure and Syntax + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html). + // + // DashboardBody is a required field + DashboardBody *string `type:"string" required:"true"` + + // The name of the dashboard. If a dashboard with this name already exists, + // this call modifies that dashboard, replacing its current contents. Otherwise, + // a new dashboard is created. The maximum length is 255, and valid characters + // are A-Z, a-z, 0-9, "-", and "_". This parameter is required. + // + // DashboardName is a required field + DashboardName *string `type:"string" required:"true"` + + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDashboardInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDashboardInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDashboardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDashboardInput"} + if s.DashboardBody == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardBody")) + } + if s.DashboardName == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardName")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDashboardBody sets the DashboardBody field's value. +func (s *PutDashboardInput) SetDashboardBody(v string) *PutDashboardInput { + s.DashboardBody = &v + return s +} + +// SetDashboardName sets the DashboardName field's value. +func (s *PutDashboardInput) SetDashboardName(v string) *PutDashboardInput { + s.DashboardName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutDashboardInput) SetTags(v []*Tag) *PutDashboardInput { + s.Tags = v + return s +} + +type PutDashboardOutput struct { + _ struct{} `type:"structure"` + + // If the input for PutDashboard was correct and the dashboard was successfully + // created or modified, this result is empty. + // + // If this result includes only warning messages, then the input was valid enough + // for the dashboard to be created or modified, but some elements of the dashboard + // might not render. + // + // If this result includes error messages, the input was not valid and the operation + // failed. + DashboardValidationMessages []*DashboardValidationMessage `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDashboardOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDashboardOutput) GoString() string { + return s.String() +} + +// SetDashboardValidationMessages sets the DashboardValidationMessages field's value. +func (s *PutDashboardOutput) SetDashboardValidationMessages(v []*DashboardValidationMessage) *PutDashboardOutput { + s.DashboardValidationMessages = v + return s +} + +type PutInsightRuleInput struct { + _ struct{} `type:"structure"` + + // The definition of the rule, as a JSON object. For details on the valid syntax, + // see Contributor Insights Rule Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html). + // + // RuleDefinition is a required field + RuleDefinition *string `min:"1" type:"string" required:"true"` + + // A unique name for the rule. + // + // RuleName is a required field + RuleName *string `min:"1" type:"string" required:"true"` + + // The state of the rule. Valid values are ENABLED and DISABLED. + RuleState *string `min:"1" type:"string"` + + // A list of key-value pairs to associate with the Contributor Insights rule. + // You can associate as many as 50 tags with a rule. + // + // Tags can help you organize and categorize your resources. You can also use + // them to scope user permissions, by granting a user permission to access or + // change only the resources that have certain tag values. + // + // To be able to associate tags with a rule, you must have the cloudwatch:TagResource + // permission in addition to the cloudwatch:PutInsightRule permission. + // + // If you are using this operation to update an existing Contributor Insights + // rule, any tags you specify in this parameter are ignored. To change the tags + // of an existing rule, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html). + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutInsightRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutInsightRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutInsightRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutInsightRuleInput"} + if s.RuleDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("RuleDefinition")) + } + if s.RuleDefinition != nil && len(*s.RuleDefinition) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleDefinition", 1)) + } + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + if s.RuleState != nil && len(*s.RuleState) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleState", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleDefinition sets the RuleDefinition field's value. +func (s *PutInsightRuleInput) SetRuleDefinition(v string) *PutInsightRuleInput { + s.RuleDefinition = &v + return s +} + +// SetRuleName sets the RuleName field's value. +func (s *PutInsightRuleInput) SetRuleName(v string) *PutInsightRuleInput { + s.RuleName = &v + return s +} + +// SetRuleState sets the RuleState field's value. +func (s *PutInsightRuleInput) SetRuleState(v string) *PutInsightRuleInput { + s.RuleState = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutInsightRuleInput) SetTags(v []*Tag) *PutInsightRuleInput { + s.Tags = v + return s +} + +type PutInsightRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutInsightRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutInsightRuleOutput) GoString() string { + return s.String() +} + +type PutMetricAlarmInput struct { + _ struct{} `type:"structure"` + + // Indicates whether actions should be executed during any changes to the alarm + // state. The default is TRUE. + ActionsEnabled *bool `type:"boolean"` + + // The actions to execute when this alarm transitions to the ALARM state from + // any other state. Each action is specified as an Amazon Resource Name (ARN). + // Valid values: + // + // EC2 actions: + // + // * arn:aws:automate:region:ec2:stop + // + // * arn:aws:automate:region:ec2:terminate + // + // * arn:aws:automate:region:ec2:reboot + // + // * arn:aws:automate:region:ec2:recover + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 + // + // Autoscaling action: + // + // * arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name + // + // Lambda actions: + // + // * Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + // + // * Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number + // + // * Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name + // + // SNS notification action: + // + // * arn:aws:sns:region:account-id:sns-topic-name + // + // SSM integration actions: + // + // * arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name + // + // * arn:aws:ssm-incidents::account-id:responseplan/response-plan-name + AlarmActions []*string `type:"list"` + + // The description for the alarm. + AlarmDescription *string `type:"string"` + + // The name for the alarm. This name must be unique within the Region. + // + // The name must contain only UTF-8 characters, and can't contain ASCII control + // characters + // + // AlarmName is a required field + AlarmName *string `min:"1" type:"string" required:"true"` + + // The arithmetic operation to use when comparing the specified statistic and + // threshold. The specified statistic value is used as the first operand. + // + // The values LessThanLowerOrGreaterThanUpperThreshold, LessThanLowerThreshold, + // and GreaterThanUpperThreshold are used only for alarms based on anomaly detection + // models. + // + // ComparisonOperator is a required field + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // The number of data points that must be breaching to trigger the alarm. This + // is used only if you are setting an "M out of N" alarm. In that case, this + // value is the M. For more information, see Evaluating an Alarm (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation) + // in the Amazon CloudWatch User Guide. + DatapointsToAlarm *int64 `min:"1" type:"integer"` + + // The dimensions for the metric specified in MetricName. + Dimensions []*Dimension `type:"list"` + + // Used only for alarms based on percentiles. If you specify ignore, the alarm + // state does not change during periods with too few data points to be statistically + // significant. If you specify evaluate or omit this parameter, the alarm is + // always evaluated and possibly changes state no matter how many data points + // are available. For more information, see Percentile-Based CloudWatch Alarms + // and Low Data Samples (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples). + // + // Valid Values: evaluate | ignore + EvaluateLowSampleCountPercentile *string `min:"1" type:"string"` + + // The number of periods over which data is compared to the specified threshold. + // If you are setting an alarm that requires that a number of consecutive data + // points be breaching to trigger the alarm, this value specifies that number. + // If you are setting an "M out of N" alarm, this value is the N. + // + // An alarm's total current evaluation period can be no longer than one day, + // so this number multiplied by Period cannot be more than 86,400 seconds. + // + // EvaluationPeriods is a required field + EvaluationPeriods *int64 `min:"1" type:"integer" required:"true"` + + // The extended statistic for the metric specified in MetricName. When you call + // PutMetricAlarm and specify a MetricName, you must specify either Statistic + // or ExtendedStatistic but not both. + // + // If you specify ExtendedStatistic, the following are valid values: + // + // * p90 + // + // * tm90 + // + // * tc90 + // + // * ts90 + // + // * wm90 + // + // * IQM + // + // * PR(n:m) where n and m are values of the metric + // + // * TC(X%:X%) where X is between 10 and 90 inclusive. + // + // * TM(X%:X%) where X is between 10 and 90 inclusive. + // + // * TS(X%:X%) where X is between 10 and 90 inclusive. + // + // * WM(X%:X%) where X is between 10 and 90 inclusive. + // + // For more information about these extended statistics, see CloudWatch statistics + // definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html). + ExtendedStatistic *string `type:"string"` + + // The actions to execute when this alarm transitions to the INSUFFICIENT_DATA + // state from any other state. Each action is specified as an Amazon Resource + // Name (ARN). Valid values: + // + // EC2 actions: + // + // * arn:aws:automate:region:ec2:stop + // + // * arn:aws:automate:region:ec2:terminate + // + // * arn:aws:automate:region:ec2:reboot + // + // * arn:aws:automate:region:ec2:recover + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 + // + // Autoscaling action: + // + // * arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name + // + // Lambda actions: + // + // * Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + // + // * Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number + // + // * Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name + // + // SNS notification action: + // + // * arn:aws:sns:region:account-id:sns-topic-name + // + // SSM integration actions: + // + // * arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name + // + // * arn:aws:ssm-incidents::account-id:responseplan/response-plan-name + InsufficientDataActions []*string `type:"list"` + + // The name for the metric associated with the alarm. For each PutMetricAlarm + // operation, you must specify either MetricName or a Metrics array. + // + // If you are creating an alarm based on a math expression, you cannot specify + // this parameter, or any of the Namespace, Dimensions, Period, Unit, Statistic, + // or ExtendedStatistic parameters. Instead, you specify all this information + // in the Metrics array. + MetricName *string `min:"1" type:"string"` + + // An array of MetricDataQuery structures that enable you to create an alarm + // based on the result of a metric math expression. For each PutMetricAlarm + // operation, you must specify either MetricName or a Metrics array. + // + // Each item in the Metrics array either retrieves a metric or performs a math + // expression. + // + // One item in the Metrics array is the expression that the alarm watches. You + // designate this expression by setting ReturnData to true for this object in + // the array. For more information, see MetricDataQuery (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDataQuery.html). + // + // If you use the Metrics parameter, you cannot include the Namespace, MetricName, + // Dimensions, Period, Unit, Statistic, or ExtendedStatistic parameters of PutMetricAlarm + // in the same operation. Instead, you retrieve the metrics you are using in + // your math expression as part of the Metrics array. + Metrics []*MetricDataQuery `type:"list"` + + // The namespace for the metric associated specified in MetricName. + Namespace *string `min:"1" type:"string"` + + // The actions to execute when this alarm transitions to an OK state from any + // other state. Each action is specified as an Amazon Resource Name (ARN). Valid + // values: + // + // EC2 actions: + // + // * arn:aws:automate:region:ec2:stop + // + // * arn:aws:automate:region:ec2:terminate + // + // * arn:aws:automate:region:ec2:reboot + // + // * arn:aws:automate:region:ec2:recover + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0 + // + // * arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0 + // + // Autoscaling action: + // + // * arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name + // + // Lambda actions: + // + // * Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name + // + // * Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number + // + // * Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name + // + // SNS notification action: + // + // * arn:aws:sns:region:account-id:sns-topic-name + // + // SSM integration actions: + // + // * arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name + // + // * arn:aws:ssm-incidents::account-id:responseplan/response-plan-name + OKActions []*string `type:"list"` + + // The length, in seconds, used each time the metric specified in MetricName + // is evaluated. Valid values are 10, 30, and any multiple of 60. + // + // Period is required for alarms based on static thresholds. If you are creating + // an alarm based on a metric math expression, you specify the period for each + // metric within the objects in the Metrics array. + // + // Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData + // call with a StorageResolution of 1. If you specify a period of 10 or 30 for + // a metric that does not have sub-minute resolution, the alarm still attempts + // to gather data at the period rate that you specify. In this case, it does + // not receive data for the attempts that do not correspond to a one-minute + // data resolution, and the alarm might often lapse into INSUFFICENT_DATA status. + // Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which + // has a higher charge than other alarms. For more information about pricing, + // see Amazon CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/). + // + // An alarm's total current evaluation period can be no longer than one day, + // so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds. + Period *int64 `min:"1" type:"integer"` + + // The statistic for the metric specified in MetricName, other than percentile. + // For percentile statistics, use ExtendedStatistic. When you call PutMetricAlarm + // and specify a MetricName, you must specify either Statistic or ExtendedStatistic, + // but not both. + Statistic *string `type:"string" enum:"Statistic"` + + // A list of key-value pairs to associate with the alarm. You can associate + // as many as 50 tags with an alarm. To be able to associate tags with the alarm + // when you create the alarm, you must have the cloudwatch:TagResource permission. + // + // Tags can help you organize and categorize your resources. You can also use + // them to scope user permissions by granting a user permission to access or + // change only resources with certain tag values. + // + // If you are using this operation to update an existing alarm, any tags you + // specify in this parameter are ignored. To change the tags of an existing + // alarm, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) + // or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html). + // + // To use this field to set tags for an alarm when you create it, you must be + // signed on with both the cloudwatch:PutMetricAlarm and cloudwatch:TagResource + // permissions. + Tags []*Tag `type:"list"` + + // The value against which the specified statistic is compared. + // + // This parameter is required for alarms based on static thresholds, but should + // not be used for alarms based on anomaly detection models. + Threshold *float64 `type:"double"` + + // If this is an alarm based on an anomaly detection model, make this value + // match the ID of the ANOMALY_DETECTION_BAND function. + // + // For an example of how to use this parameter, see the Anomaly Detection Model + // Alarm example on this page. + // + // If your alarm uses this parameter, it cannot have Auto Scaling actions. + ThresholdMetricId *string `min:"1" type:"string"` + + // Sets how this alarm is to handle missing data points. If TreatMissingData + // is omitted, the default behavior of missing is used. For more information, + // see Configuring How CloudWatch Alarms Treats Missing Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data). + // + // Valid Values: breaching | notBreaching | ignore | missing + // + // Alarms that evaluate metrics in the AWS/DynamoDB namespace always ignore + // missing data even if you choose a different option for TreatMissingData. + // When an AWS/DynamoDB metric has missing data, alarms that evaluate that metric + // remain in their current state. + TreatMissingData *string `min:"1" type:"string"` + + // The unit of measure for the statistic. For example, the units for the Amazon + // EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes + // that an instance receives on all network interfaces. You can also specify + // a unit when you create a custom metric. Units help provide conceptual meaning + // to your data. Metric data points that specify a unit of measure, such as + // Percent, are aggregated separately. If you are creating an alarm based on + // a metric math expression, you can specify the unit for each metric (if needed) + // within the objects in the Metrics array. + // + // If you don't specify Unit, CloudWatch retrieves all unit types that have + // been published for the metric and attempts to evaluate the alarm. Usually, + // metrics are published with only one unit, so the alarm works as intended. + // + // However, if the metric is published with multiple types of units and you + // don't specify a unit, the alarm's behavior is not defined and it behaves + // unpredictably. + // + // We recommend omitting Unit so that you don't inadvertently specify an incorrect + // unit that is not published for this metric. Doing so causes the alarm to + // be stuck in the INSUFFICIENT DATA state. + Unit *string `type:"string" enum:"StandardUnit"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricAlarmInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricAlarmInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricAlarmInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricAlarmInput"} + if s.AlarmName == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmName")) + } + if s.AlarmName != nil && len(*s.AlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmName", 1)) + } + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + if s.DatapointsToAlarm != nil && *s.DatapointsToAlarm < 1 { + invalidParams.Add(request.NewErrParamMinValue("DatapointsToAlarm", 1)) + } + if s.EvaluateLowSampleCountPercentile != nil && len(*s.EvaluateLowSampleCountPercentile) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EvaluateLowSampleCountPercentile", 1)) + } + if s.EvaluationPeriods == nil { + invalidParams.Add(request.NewErrParamRequired("EvaluationPeriods")) + } + if s.EvaluationPeriods != nil && *s.EvaluationPeriods < 1 { + invalidParams.Add(request.NewErrParamMinValue("EvaluationPeriods", 1)) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } + if s.ThresholdMetricId != nil && len(*s.ThresholdMetricId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThresholdMetricId", 1)) + } + if s.TreatMissingData != nil && len(*s.TreatMissingData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TreatMissingData", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Metrics != nil { + for i, v := range s.Metrics { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metrics", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionsEnabled sets the ActionsEnabled field's value. +func (s *PutMetricAlarmInput) SetActionsEnabled(v bool) *PutMetricAlarmInput { + s.ActionsEnabled = &v + return s +} + +// SetAlarmActions sets the AlarmActions field's value. +func (s *PutMetricAlarmInput) SetAlarmActions(v []*string) *PutMetricAlarmInput { + s.AlarmActions = v + return s +} + +// SetAlarmDescription sets the AlarmDescription field's value. +func (s *PutMetricAlarmInput) SetAlarmDescription(v string) *PutMetricAlarmInput { + s.AlarmDescription = &v + return s +} + +// SetAlarmName sets the AlarmName field's value. +func (s *PutMetricAlarmInput) SetAlarmName(v string) *PutMetricAlarmInput { + s.AlarmName = &v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *PutMetricAlarmInput) SetComparisonOperator(v string) *PutMetricAlarmInput { + s.ComparisonOperator = &v + return s +} + +// SetDatapointsToAlarm sets the DatapointsToAlarm field's value. +func (s *PutMetricAlarmInput) SetDatapointsToAlarm(v int64) *PutMetricAlarmInput { + s.DatapointsToAlarm = &v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *PutMetricAlarmInput) SetDimensions(v []*Dimension) *PutMetricAlarmInput { + s.Dimensions = v + return s +} + +// SetEvaluateLowSampleCountPercentile sets the EvaluateLowSampleCountPercentile field's value. +func (s *PutMetricAlarmInput) SetEvaluateLowSampleCountPercentile(v string) *PutMetricAlarmInput { + s.EvaluateLowSampleCountPercentile = &v + return s +} + +// SetEvaluationPeriods sets the EvaluationPeriods field's value. +func (s *PutMetricAlarmInput) SetEvaluationPeriods(v int64) *PutMetricAlarmInput { + s.EvaluationPeriods = &v + return s +} + +// SetExtendedStatistic sets the ExtendedStatistic field's value. +func (s *PutMetricAlarmInput) SetExtendedStatistic(v string) *PutMetricAlarmInput { + s.ExtendedStatistic = &v + return s +} + +// SetInsufficientDataActions sets the InsufficientDataActions field's value. +func (s *PutMetricAlarmInput) SetInsufficientDataActions(v []*string) *PutMetricAlarmInput { + s.InsufficientDataActions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *PutMetricAlarmInput) SetMetricName(v string) *PutMetricAlarmInput { + s.MetricName = &v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *PutMetricAlarmInput) SetMetrics(v []*MetricDataQuery) *PutMetricAlarmInput { + s.Metrics = v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *PutMetricAlarmInput) SetNamespace(v string) *PutMetricAlarmInput { + s.Namespace = &v + return s +} + +// SetOKActions sets the OKActions field's value. +func (s *PutMetricAlarmInput) SetOKActions(v []*string) *PutMetricAlarmInput { + s.OKActions = v + return s +} + +// SetPeriod sets the Period field's value. +func (s *PutMetricAlarmInput) SetPeriod(v int64) *PutMetricAlarmInput { + s.Period = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *PutMetricAlarmInput) SetStatistic(v string) *PutMetricAlarmInput { + s.Statistic = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutMetricAlarmInput) SetTags(v []*Tag) *PutMetricAlarmInput { + s.Tags = v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *PutMetricAlarmInput) SetThreshold(v float64) *PutMetricAlarmInput { + s.Threshold = &v + return s +} + +// SetThresholdMetricId sets the ThresholdMetricId field's value. +func (s *PutMetricAlarmInput) SetThresholdMetricId(v string) *PutMetricAlarmInput { + s.ThresholdMetricId = &v + return s +} + +// SetTreatMissingData sets the TreatMissingData field's value. +func (s *PutMetricAlarmInput) SetTreatMissingData(v string) *PutMetricAlarmInput { + s.TreatMissingData = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *PutMetricAlarmInput) SetUnit(v string) *PutMetricAlarmInput { + s.Unit = &v + return s +} + +type PutMetricAlarmOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricAlarmOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricAlarmOutput) GoString() string { + return s.String() +} + +type PutMetricDataInput struct { + _ struct{} `type:"structure"` + + EntityMetricData []*EntityMetricData `type:"list"` + + // The data for the metric. The array can include no more than 1000 metrics + // per call. + // + // MetricData is a required field + MetricData []*MetricDatum `type:"list" required:"true"` + + // The namespace for the metric data. You can use ASCII characters for the namespace, + // except for control characters which are not supported. + // + // To avoid conflicts with Amazon Web Services service namespaces, you should + // not specify a namespace that begins with AWS/ + // + // Namespace is a required field + Namespace *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricDataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricDataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricDataInput"} + if s.MetricData == nil { + invalidParams.Add(request.NewErrParamRequired("MetricData")) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.EntityMetricData != nil { + for i, v := range s.EntityMetricData { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EntityMetricData", i), err.(request.ErrInvalidParams)) + } + } + } + if s.MetricData != nil { + for i, v := range s.MetricData { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricData", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntityMetricData sets the EntityMetricData field's value. +func (s *PutMetricDataInput) SetEntityMetricData(v []*EntityMetricData) *PutMetricDataInput { + s.EntityMetricData = v + return s +} + +// SetMetricData sets the MetricData field's value. +func (s *PutMetricDataInput) SetMetricData(v []*MetricDatum) *PutMetricDataInput { + s.MetricData = v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *PutMetricDataInput) SetNamespace(v string) *PutMetricDataInput { + s.Namespace = &v + return s +} + +type PutMetricDataOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricDataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricDataOutput) GoString() string { + return s.String() +} + +type PutMetricStreamInput struct { + _ struct{} `type:"structure"` + + // If you specify this parameter, the stream sends metrics from all metric namespaces + // except for the namespaces that you specify here. + // + // You cannot include ExcludeFilters and IncludeFilters in the same operation. + ExcludeFilters []*MetricStreamFilter `type:"list"` + + // The ARN of the Amazon Kinesis Data Firehose delivery stream to use for this + // metric stream. This Amazon Kinesis Data Firehose delivery stream must already + // exist and must be in the same account as the metric stream. + // + // FirehoseArn is a required field + FirehoseArn *string `min:"1" type:"string" required:"true"` + + // If you specify this parameter, the stream sends only the metrics from the + // metric namespaces that you specify here. + // + // You cannot include IncludeFilters and ExcludeFilters in the same operation. + IncludeFilters []*MetricStreamFilter `type:"list"` + + // If you are creating a metric stream in a monitoring account, specify true + // to include metrics from source accounts in the metric stream. + IncludeLinkedAccountsMetrics *bool `type:"boolean"` + + // If you are creating a new metric stream, this is the name for the new stream. + // The name must be different than the names of other metric streams in this + // account and Region. + // + // If you are updating a metric stream, specify the name of that stream here. + // + // Valid characters are A-Z, a-z, 0-9, "-" and "_". + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The output format for the stream. Valid values are json, opentelemetry1.0, + // and opentelemetry0.7. For more information about metric stream output formats, + // see Metric streams output formats (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html). + // + // OutputFormat is a required field + OutputFormat *string `min:"1" type:"string" required:"true"` + + // The ARN of an IAM role that this metric stream will use to access Amazon + // Kinesis Data Firehose resources. This IAM role must already exist and must + // be in the same account as the metric stream. This IAM role must include the + // following permissions: + // + // * firehose:PutRecord + // + // * firehose:PutRecordBatch + // + // RoleArn is a required field + RoleArn *string `min:"1" type:"string" required:"true"` + + // By default, a metric stream always sends the MAX, MIN, SUM, and SAMPLECOUNT + // statistics for each metric that is streamed. You can use this parameter to + // have the metric stream also send additional statistics in the stream. This + // array can have up to 100 members. + // + // For each entry in this array, you specify one or more metrics and the list + // of additional statistics to stream for those metrics. The additional statistics + // that you can stream depend on the stream's OutputFormat. If the OutputFormat + // is json, you can stream any additional statistic that is supported by CloudWatch, + // listed in CloudWatch statistics definitions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html.html). + // If the OutputFormat is opentelemetry1.0 or opentelemetry0.7, you can stream + // percentile statistics such as p95, p99.9, and so on. + StatisticsConfigurations []*MetricStreamStatisticsConfiguration `type:"list"` + + // A list of key-value pairs to associate with the metric stream. You can associate + // as many as 50 tags with a metric stream. + // + // Tags can help you organize and categorize your resources. You can also use + // them to scope user permissions by granting a user permission to access or + // change only resources with certain tag values. + // + // You can use this parameter only when you are creating a new metric stream. + // If you are using this operation to update an existing metric stream, any + // tags you specify in this parameter are ignored. To change the tags of an + // existing metric stream, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html) + // or UntagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html). + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricStreamInput"} + if s.FirehoseArn == nil { + invalidParams.Add(request.NewErrParamRequired("FirehoseArn")) + } + if s.FirehoseArn != nil && len(*s.FirehoseArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FirehoseArn", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.OutputFormat == nil { + invalidParams.Add(request.NewErrParamRequired("OutputFormat")) + } + if s.OutputFormat != nil && len(*s.OutputFormat) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OutputFormat", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.ExcludeFilters != nil { + for i, v := range s.ExcludeFilters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExcludeFilters", i), err.(request.ErrInvalidParams)) + } + } + } + if s.IncludeFilters != nil { + for i, v := range s.IncludeFilters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "IncludeFilters", i), err.(request.ErrInvalidParams)) + } + } + } + if s.StatisticsConfigurations != nil { + for i, v := range s.StatisticsConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StatisticsConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExcludeFilters sets the ExcludeFilters field's value. +func (s *PutMetricStreamInput) SetExcludeFilters(v []*MetricStreamFilter) *PutMetricStreamInput { + s.ExcludeFilters = v + return s +} + +// SetFirehoseArn sets the FirehoseArn field's value. +func (s *PutMetricStreamInput) SetFirehoseArn(v string) *PutMetricStreamInput { + s.FirehoseArn = &v + return s +} + +// SetIncludeFilters sets the IncludeFilters field's value. +func (s *PutMetricStreamInput) SetIncludeFilters(v []*MetricStreamFilter) *PutMetricStreamInput { + s.IncludeFilters = v + return s +} + +// SetIncludeLinkedAccountsMetrics sets the IncludeLinkedAccountsMetrics field's value. +func (s *PutMetricStreamInput) SetIncludeLinkedAccountsMetrics(v bool) *PutMetricStreamInput { + s.IncludeLinkedAccountsMetrics = &v + return s +} + +// SetName sets the Name field's value. +func (s *PutMetricStreamInput) SetName(v string) *PutMetricStreamInput { + s.Name = &v + return s +} + +// SetOutputFormat sets the OutputFormat field's value. +func (s *PutMetricStreamInput) SetOutputFormat(v string) *PutMetricStreamInput { + s.OutputFormat = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *PutMetricStreamInput) SetRoleArn(v string) *PutMetricStreamInput { + s.RoleArn = &v + return s +} + +// SetStatisticsConfigurations sets the StatisticsConfigurations field's value. +func (s *PutMetricStreamInput) SetStatisticsConfigurations(v []*MetricStreamStatisticsConfiguration) *PutMetricStreamInput { + s.StatisticsConfigurations = v + return s +} + +// SetTags sets the Tags field's value. +func (s *PutMetricStreamInput) SetTags(v []*Tag) *PutMetricStreamInput { + s.Tags = v + return s +} + +type PutMetricStreamOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the metric stream. + Arn *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutMetricStreamOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *PutMetricStreamOutput) SetArn(v string) *PutMetricStreamOutput { + s.Arn = &v + return s +} + +// Specifies one range of days or times to exclude from use for training an +// anomaly detection model. +type Range struct { + _ struct{} `type:"structure"` + + // The end time of the range to exclude. The format is yyyy-MM-dd'T'HH:mm:ss. + // For example, 2019-07-01T23:59:59. + // + // EndTime is a required field + EndTime *time.Time `type:"timestamp" required:"true"` + + // The start time of the range to exclude. The format is yyyy-MM-dd'T'HH:mm:ss. + // For example, 2019-07-01T23:59:59. + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Range) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Range) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Range) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Range"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *Range) SetEndTime(v time.Time) *Range { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Range) SetStartTime(v time.Time) *Range { + s.StartTime = &v + return s +} + +type SetAlarmStateInput struct { + _ struct{} `type:"structure"` + + // The name of the alarm. + // + // AlarmName is a required field + AlarmName *string `min:"1" type:"string" required:"true"` + + // The reason that this alarm is set to this specific state, in text format. + // + // StateReason is a required field + StateReason *string `type:"string" required:"true"` + + // The reason that this alarm is set to this specific state, in JSON format. + // + // For SNS or EC2 alarm actions, this is just informational. But for EC2 Auto + // Scaling or application Auto Scaling alarm actions, the Auto Scaling policy + // uses the information in this field to take the correct action. + StateReasonData *string `type:"string"` + + // The value of the state. + // + // StateValue is a required field + StateValue *string `type:"string" required:"true" enum:"StateValue"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SetAlarmStateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SetAlarmStateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetAlarmStateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetAlarmStateInput"} + if s.AlarmName == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmName")) + } + if s.AlarmName != nil && len(*s.AlarmName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlarmName", 1)) + } + if s.StateReason == nil { + invalidParams.Add(request.NewErrParamRequired("StateReason")) + } + if s.StateValue == nil { + invalidParams.Add(request.NewErrParamRequired("StateValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlarmName sets the AlarmName field's value. +func (s *SetAlarmStateInput) SetAlarmName(v string) *SetAlarmStateInput { + s.AlarmName = &v + return s +} + +// SetStateReason sets the StateReason field's value. +func (s *SetAlarmStateInput) SetStateReason(v string) *SetAlarmStateInput { + s.StateReason = &v + return s +} + +// SetStateReasonData sets the StateReasonData field's value. +func (s *SetAlarmStateInput) SetStateReasonData(v string) *SetAlarmStateInput { + s.StateReasonData = &v + return s +} + +// SetStateValue sets the StateValue field's value. +func (s *SetAlarmStateInput) SetStateValue(v string) *SetAlarmStateInput { + s.StateValue = &v + return s +} + +type SetAlarmStateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SetAlarmStateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SetAlarmStateOutput) GoString() string { + return s.String() +} + +// Designates the CloudWatch metric and statistic that provides the time series +// the anomaly detector uses as input. If you have enabled unified cross-account +// observability, and this account is a monitoring account, the metric can be +// in the same account or a source account. +type SingleMetricAnomalyDetector struct { + _ struct{} `type:"structure"` + + // If the CloudWatch metric that provides the time series that the anomaly detector + // uses as input is in another account, specify that account ID here. If you + // omit this parameter, the current account is used. + AccountId *string `min:"1" type:"string"` + + // The metric dimensions to create the anomaly detection model for. + Dimensions []*Dimension `type:"list"` + + // The name of the metric to create the anomaly detection model for. + MetricName *string `min:"1" type:"string"` + + // The namespace of the metric to create the anomaly detection model for. + Namespace *string `min:"1" type:"string"` + + // The statistic to use for the metric and anomaly detection model. + Stat *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SingleMetricAnomalyDetector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SingleMetricAnomalyDetector) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SingleMetricAnomalyDetector) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SingleMetricAnomalyDetector"} + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *SingleMetricAnomalyDetector) SetAccountId(v string) *SingleMetricAnomalyDetector { + s.AccountId = &v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *SingleMetricAnomalyDetector) SetDimensions(v []*Dimension) *SingleMetricAnomalyDetector { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *SingleMetricAnomalyDetector) SetMetricName(v string) *SingleMetricAnomalyDetector { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *SingleMetricAnomalyDetector) SetNamespace(v string) *SingleMetricAnomalyDetector { + s.Namespace = &v + return s +} + +// SetStat sets the Stat field's value. +func (s *SingleMetricAnomalyDetector) SetStat(v string) *SingleMetricAnomalyDetector { + s.Stat = &v + return s +} + +type StartMetricStreamsInput struct { + _ struct{} `type:"structure"` + + // The array of the names of metric streams to start streaming. + // + // This is an "all or nothing" operation. If you do not have permission to access + // all of the metric streams that you list here, then none of the streams that + // you list in the operation will start streaming. + // + // Names is a required field + Names []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartMetricStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartMetricStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMetricStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMetricStreamsInput"} + if s.Names == nil { + invalidParams.Add(request.NewErrParamRequired("Names")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNames sets the Names field's value. +func (s *StartMetricStreamsInput) SetNames(v []*string) *StartMetricStreamsInput { + s.Names = v + return s +} + +type StartMetricStreamsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartMetricStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartMetricStreamsOutput) GoString() string { + return s.String() +} + +// Represents a set of statistics that describes a specific metric. +type StatisticSet struct { + _ struct{} `type:"structure"` + + // The maximum value of the sample set. + // + // Maximum is a required field + Maximum *float64 `type:"double" required:"true"` + + // The minimum value of the sample set. + // + // Minimum is a required field + Minimum *float64 `type:"double" required:"true"` + + // The number of samples used for the statistic set. + // + // SampleCount is a required field + SampleCount *float64 `type:"double" required:"true"` + + // The sum of values for the sample set. + // + // Sum is a required field + Sum *float64 `type:"double" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StatisticSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StatisticSet) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StatisticSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StatisticSet"} + if s.Maximum == nil { + invalidParams.Add(request.NewErrParamRequired("Maximum")) + } + if s.Minimum == nil { + invalidParams.Add(request.NewErrParamRequired("Minimum")) + } + if s.SampleCount == nil { + invalidParams.Add(request.NewErrParamRequired("SampleCount")) + } + if s.Sum == nil { + invalidParams.Add(request.NewErrParamRequired("Sum")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximum sets the Maximum field's value. +func (s *StatisticSet) SetMaximum(v float64) *StatisticSet { + s.Maximum = &v + return s +} + +// SetMinimum sets the Minimum field's value. +func (s *StatisticSet) SetMinimum(v float64) *StatisticSet { + s.Minimum = &v + return s +} + +// SetSampleCount sets the SampleCount field's value. +func (s *StatisticSet) SetSampleCount(v float64) *StatisticSet { + s.SampleCount = &v + return s +} + +// SetSum sets the Sum field's value. +func (s *StatisticSet) SetSum(v float64) *StatisticSet { + s.Sum = &v + return s +} + +type StopMetricStreamsInput struct { + _ struct{} `type:"structure"` + + // The array of the names of metric streams to stop streaming. + // + // This is an "all or nothing" operation. If you do not have permission to access + // all of the metric streams that you list here, then none of the streams that + // you list in the operation will stop streaming. + // + // Names is a required field + Names []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopMetricStreamsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopMetricStreamsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopMetricStreamsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopMetricStreamsInput"} + if s.Names == nil { + invalidParams.Add(request.NewErrParamRequired("Names")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNames sets the Names field's value. +func (s *StopMetricStreamsInput) SetNames(v []*string) *StopMetricStreamsInput { + s.Names = v + return s +} + +type StopMetricStreamsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopMetricStreamsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopMetricStreamsOutput) GoString() string { + return s.String() +} + +// A key-value pair associated with a CloudWatch resource. +type Tag struct { + _ struct{} `type:"structure"` + + // A string that you can use to assign a value. The combination of tag keys + // and values can help you organize and categorize your resources. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for the specified tag key. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the CloudWatch resource that you're adding tags to. + // + // The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // + // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name + // + // For more information about ARN format, see Resource Types Defined by Amazon + // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) + // in the Amazon Web Services General Reference. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // The list of key-value pairs to associate with the alarm. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TagResourceOutput) GoString() string { + return s.String() +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the CloudWatch resource that you're removing tags from. + // + // The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // + // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name + // + // For more information about ARN format, see Resource Types Defined by Amazon + // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) + // in the Amazon Web Services General Reference. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // The list of tag keys to remove from the resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +const ( + // ActionsSuppressedByWaitPeriod is a ActionsSuppressedBy enum value + ActionsSuppressedByWaitPeriod = "WaitPeriod" + + // ActionsSuppressedByExtensionPeriod is a ActionsSuppressedBy enum value + ActionsSuppressedByExtensionPeriod = "ExtensionPeriod" + + // ActionsSuppressedByAlarm is a ActionsSuppressedBy enum value + ActionsSuppressedByAlarm = "Alarm" +) + +// ActionsSuppressedBy_Values returns all elements of the ActionsSuppressedBy enum +func ActionsSuppressedBy_Values() []string { + return []string{ + ActionsSuppressedByWaitPeriod, + ActionsSuppressedByExtensionPeriod, + ActionsSuppressedByAlarm, + } +} + +const ( + // AlarmTypeCompositeAlarm is a AlarmType enum value + AlarmTypeCompositeAlarm = "CompositeAlarm" + + // AlarmTypeMetricAlarm is a AlarmType enum value + AlarmTypeMetricAlarm = "MetricAlarm" +) + +// AlarmType_Values returns all elements of the AlarmType enum +func AlarmType_Values() []string { + return []string{ + AlarmTypeCompositeAlarm, + AlarmTypeMetricAlarm, + } +} + +const ( + // AnomalyDetectorStateValuePendingTraining is a AnomalyDetectorStateValue enum value + AnomalyDetectorStateValuePendingTraining = "PENDING_TRAINING" + + // AnomalyDetectorStateValueTrainedInsufficientData is a AnomalyDetectorStateValue enum value + AnomalyDetectorStateValueTrainedInsufficientData = "TRAINED_INSUFFICIENT_DATA" + + // AnomalyDetectorStateValueTrained is a AnomalyDetectorStateValue enum value + AnomalyDetectorStateValueTrained = "TRAINED" +) + +// AnomalyDetectorStateValue_Values returns all elements of the AnomalyDetectorStateValue enum +func AnomalyDetectorStateValue_Values() []string { + return []string{ + AnomalyDetectorStateValuePendingTraining, + AnomalyDetectorStateValueTrainedInsufficientData, + AnomalyDetectorStateValueTrained, + } +} + +const ( + // AnomalyDetectorTypeSingleMetric is a AnomalyDetectorType enum value + AnomalyDetectorTypeSingleMetric = "SINGLE_METRIC" + + // AnomalyDetectorTypeMetricMath is a AnomalyDetectorType enum value + AnomalyDetectorTypeMetricMath = "METRIC_MATH" +) + +// AnomalyDetectorType_Values returns all elements of the AnomalyDetectorType enum +func AnomalyDetectorType_Values() []string { + return []string{ + AnomalyDetectorTypeSingleMetric, + AnomalyDetectorTypeMetricMath, + } +} + +const ( + // ComparisonOperatorGreaterThanOrEqualToThreshold is a ComparisonOperator enum value + ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" + + // ComparisonOperatorGreaterThanThreshold is a ComparisonOperator enum value + ComparisonOperatorGreaterThanThreshold = "GreaterThanThreshold" + + // ComparisonOperatorLessThanThreshold is a ComparisonOperator enum value + ComparisonOperatorLessThanThreshold = "LessThanThreshold" + + // ComparisonOperatorLessThanOrEqualToThreshold is a ComparisonOperator enum value + ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" + + // ComparisonOperatorLessThanLowerOrGreaterThanUpperThreshold is a ComparisonOperator enum value + ComparisonOperatorLessThanLowerOrGreaterThanUpperThreshold = "LessThanLowerOrGreaterThanUpperThreshold" + + // ComparisonOperatorLessThanLowerThreshold is a ComparisonOperator enum value + ComparisonOperatorLessThanLowerThreshold = "LessThanLowerThreshold" + + // ComparisonOperatorGreaterThanUpperThreshold is a ComparisonOperator enum value + ComparisonOperatorGreaterThanUpperThreshold = "GreaterThanUpperThreshold" +) + +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorGreaterThanOrEqualToThreshold, + ComparisonOperatorGreaterThanThreshold, + ComparisonOperatorLessThanThreshold, + ComparisonOperatorLessThanOrEqualToThreshold, + ComparisonOperatorLessThanLowerOrGreaterThanUpperThreshold, + ComparisonOperatorLessThanLowerThreshold, + ComparisonOperatorGreaterThanUpperThreshold, + } +} + +const ( + // HistoryItemTypeConfigurationUpdate is a HistoryItemType enum value + HistoryItemTypeConfigurationUpdate = "ConfigurationUpdate" + + // HistoryItemTypeStateUpdate is a HistoryItemType enum value + HistoryItemTypeStateUpdate = "StateUpdate" + + // HistoryItemTypeAction is a HistoryItemType enum value + HistoryItemTypeAction = "Action" +) + +// HistoryItemType_Values returns all elements of the HistoryItemType enum +func HistoryItemType_Values() []string { + return []string{ + HistoryItemTypeConfigurationUpdate, + HistoryItemTypeStateUpdate, + HistoryItemTypeAction, + } +} + +const ( + // RecentlyActivePt3h is a RecentlyActive enum value + RecentlyActivePt3h = "PT3H" +) + +// RecentlyActive_Values returns all elements of the RecentlyActive enum +func RecentlyActive_Values() []string { + return []string{ + RecentlyActivePt3h, + } +} + +const ( + // ScanByTimestampDescending is a ScanBy enum value + ScanByTimestampDescending = "TimestampDescending" + + // ScanByTimestampAscending is a ScanBy enum value + ScanByTimestampAscending = "TimestampAscending" +) + +// ScanBy_Values returns all elements of the ScanBy enum +func ScanBy_Values() []string { + return []string{ + ScanByTimestampDescending, + ScanByTimestampAscending, + } +} + +const ( + // StandardUnitSeconds is a StandardUnit enum value + StandardUnitSeconds = "Seconds" + + // StandardUnitMicroseconds is a StandardUnit enum value + StandardUnitMicroseconds = "Microseconds" + + // StandardUnitMilliseconds is a StandardUnit enum value + StandardUnitMilliseconds = "Milliseconds" + + // StandardUnitBytes is a StandardUnit enum value + StandardUnitBytes = "Bytes" + + // StandardUnitKilobytes is a StandardUnit enum value + StandardUnitKilobytes = "Kilobytes" + + // StandardUnitMegabytes is a StandardUnit enum value + StandardUnitMegabytes = "Megabytes" + + // StandardUnitGigabytes is a StandardUnit enum value + StandardUnitGigabytes = "Gigabytes" + + // StandardUnitTerabytes is a StandardUnit enum value + StandardUnitTerabytes = "Terabytes" + + // StandardUnitBits is a StandardUnit enum value + StandardUnitBits = "Bits" + + // StandardUnitKilobits is a StandardUnit enum value + StandardUnitKilobits = "Kilobits" + + // StandardUnitMegabits is a StandardUnit enum value + StandardUnitMegabits = "Megabits" + + // StandardUnitGigabits is a StandardUnit enum value + StandardUnitGigabits = "Gigabits" + + // StandardUnitTerabits is a StandardUnit enum value + StandardUnitTerabits = "Terabits" + + // StandardUnitPercent is a StandardUnit enum value + StandardUnitPercent = "Percent" + + // StandardUnitCount is a StandardUnit enum value + StandardUnitCount = "Count" + + // StandardUnitBytesSecond is a StandardUnit enum value + StandardUnitBytesSecond = "Bytes/Second" + + // StandardUnitKilobytesSecond is a StandardUnit enum value + StandardUnitKilobytesSecond = "Kilobytes/Second" + + // StandardUnitMegabytesSecond is a StandardUnit enum value + StandardUnitMegabytesSecond = "Megabytes/Second" + + // StandardUnitGigabytesSecond is a StandardUnit enum value + StandardUnitGigabytesSecond = "Gigabytes/Second" + + // StandardUnitTerabytesSecond is a StandardUnit enum value + StandardUnitTerabytesSecond = "Terabytes/Second" + + // StandardUnitBitsSecond is a StandardUnit enum value + StandardUnitBitsSecond = "Bits/Second" + + // StandardUnitKilobitsSecond is a StandardUnit enum value + StandardUnitKilobitsSecond = "Kilobits/Second" + + // StandardUnitMegabitsSecond is a StandardUnit enum value + StandardUnitMegabitsSecond = "Megabits/Second" + + // StandardUnitGigabitsSecond is a StandardUnit enum value + StandardUnitGigabitsSecond = "Gigabits/Second" + + // StandardUnitTerabitsSecond is a StandardUnit enum value + StandardUnitTerabitsSecond = "Terabits/Second" + + // StandardUnitCountSecond is a StandardUnit enum value + StandardUnitCountSecond = "Count/Second" + + // StandardUnitNone is a StandardUnit enum value + StandardUnitNone = "None" +) + +// StandardUnit_Values returns all elements of the StandardUnit enum +func StandardUnit_Values() []string { + return []string{ + StandardUnitSeconds, + StandardUnitMicroseconds, + StandardUnitMilliseconds, + StandardUnitBytes, + StandardUnitKilobytes, + StandardUnitMegabytes, + StandardUnitGigabytes, + StandardUnitTerabytes, + StandardUnitBits, + StandardUnitKilobits, + StandardUnitMegabits, + StandardUnitGigabits, + StandardUnitTerabits, + StandardUnitPercent, + StandardUnitCount, + StandardUnitBytesSecond, + StandardUnitKilobytesSecond, + StandardUnitMegabytesSecond, + StandardUnitGigabytesSecond, + StandardUnitTerabytesSecond, + StandardUnitBitsSecond, + StandardUnitKilobitsSecond, + StandardUnitMegabitsSecond, + StandardUnitGigabitsSecond, + StandardUnitTerabitsSecond, + StandardUnitCountSecond, + StandardUnitNone, + } +} + +const ( + // StateValueOk is a StateValue enum value + StateValueOk = "OK" + + // StateValueAlarm is a StateValue enum value + StateValueAlarm = "ALARM" + + // StateValueInsufficientData is a StateValue enum value + StateValueInsufficientData = "INSUFFICIENT_DATA" +) + +// StateValue_Values returns all elements of the StateValue enum +func StateValue_Values() []string { + return []string{ + StateValueOk, + StateValueAlarm, + StateValueInsufficientData, + } +} + +const ( + // StatisticSampleCount is a Statistic enum value + StatisticSampleCount = "SampleCount" + + // StatisticAverage is a Statistic enum value + StatisticAverage = "Average" + + // StatisticSum is a Statistic enum value + StatisticSum = "Sum" + + // StatisticMinimum is a Statistic enum value + StatisticMinimum = "Minimum" + + // StatisticMaximum is a Statistic enum value + StatisticMaximum = "Maximum" +) + +// Statistic_Values returns all elements of the Statistic enum +func Statistic_Values() []string { + return []string{ + StatisticSampleCount, + StatisticAverage, + StatisticSum, + StatisticMinimum, + StatisticMaximum, + } +} + +const ( + // StatusCodeComplete is a StatusCode enum value + StatusCodeComplete = "Complete" + + // StatusCodeInternalError is a StatusCode enum value + StatusCodeInternalError = "InternalError" + + // StatusCodePartialData is a StatusCode enum value + StatusCodePartialData = "PartialData" + + // StatusCodeForbidden is a StatusCode enum value + StatusCodeForbidden = "Forbidden" +) + +// StatusCode_Values returns all elements of the StatusCode enum +func StatusCode_Values() []string { + return []string{ + StatusCodeComplete, + StatusCodeInternalError, + StatusCodePartialData, + StatusCodeForbidden, + } +} diff --git a/sdk/service/cloudwatch/cloudwatchiface/interface.go b/sdk/service/cloudwatch/cloudwatchiface/interface.go new file mode 100644 index 0000000000..267394511e --- /dev/null +++ b/sdk/service/cloudwatch/cloudwatchiface/interface.go @@ -0,0 +1,239 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package cloudwatchiface provides an interface to enable mocking the Amazon CloudWatch service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package cloudwatchiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" +) + +// CloudWatchAPI provides an interface to enable mocking the +// cloudwatch.CloudWatch service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon CloudWatch. +// func myFunc(svc cloudwatchiface.CloudWatchAPI) bool { +// // Make svc.DeleteAlarms request +// } +// +// func main() { +// sess := session.New() +// svc := cloudwatch.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockCloudWatchClient struct { +// cloudwatchiface.CloudWatchAPI +// } +// func (m *mockCloudWatchClient) DeleteAlarms(input *cloudwatch.DeleteAlarmsInput) (*cloudwatch.DeleteAlarmsOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockCloudWatchClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type CloudWatchAPI interface { + DeleteAlarms(*cloudwatch.DeleteAlarmsInput) (*cloudwatch.DeleteAlarmsOutput, error) + DeleteAlarmsWithContext(aws.Context, *cloudwatch.DeleteAlarmsInput, ...request.Option) (*cloudwatch.DeleteAlarmsOutput, error) + DeleteAlarmsRequest(*cloudwatch.DeleteAlarmsInput) (*request.Request, *cloudwatch.DeleteAlarmsOutput) + + DeleteAnomalyDetector(*cloudwatch.DeleteAnomalyDetectorInput) (*cloudwatch.DeleteAnomalyDetectorOutput, error) + DeleteAnomalyDetectorWithContext(aws.Context, *cloudwatch.DeleteAnomalyDetectorInput, ...request.Option) (*cloudwatch.DeleteAnomalyDetectorOutput, error) + DeleteAnomalyDetectorRequest(*cloudwatch.DeleteAnomalyDetectorInput) (*request.Request, *cloudwatch.DeleteAnomalyDetectorOutput) + + DeleteDashboards(*cloudwatch.DeleteDashboardsInput) (*cloudwatch.DeleteDashboardsOutput, error) + DeleteDashboardsWithContext(aws.Context, *cloudwatch.DeleteDashboardsInput, ...request.Option) (*cloudwatch.DeleteDashboardsOutput, error) + DeleteDashboardsRequest(*cloudwatch.DeleteDashboardsInput) (*request.Request, *cloudwatch.DeleteDashboardsOutput) + + DeleteInsightRules(*cloudwatch.DeleteInsightRulesInput) (*cloudwatch.DeleteInsightRulesOutput, error) + DeleteInsightRulesWithContext(aws.Context, *cloudwatch.DeleteInsightRulesInput, ...request.Option) (*cloudwatch.DeleteInsightRulesOutput, error) + DeleteInsightRulesRequest(*cloudwatch.DeleteInsightRulesInput) (*request.Request, *cloudwatch.DeleteInsightRulesOutput) + + DeleteMetricStream(*cloudwatch.DeleteMetricStreamInput) (*cloudwatch.DeleteMetricStreamOutput, error) + DeleteMetricStreamWithContext(aws.Context, *cloudwatch.DeleteMetricStreamInput, ...request.Option) (*cloudwatch.DeleteMetricStreamOutput, error) + DeleteMetricStreamRequest(*cloudwatch.DeleteMetricStreamInput) (*request.Request, *cloudwatch.DeleteMetricStreamOutput) + + DescribeAlarmHistory(*cloudwatch.DescribeAlarmHistoryInput) (*cloudwatch.DescribeAlarmHistoryOutput, error) + DescribeAlarmHistoryWithContext(aws.Context, *cloudwatch.DescribeAlarmHistoryInput, ...request.Option) (*cloudwatch.DescribeAlarmHistoryOutput, error) + DescribeAlarmHistoryRequest(*cloudwatch.DescribeAlarmHistoryInput) (*request.Request, *cloudwatch.DescribeAlarmHistoryOutput) + + DescribeAlarmHistoryPages(*cloudwatch.DescribeAlarmHistoryInput, func(*cloudwatch.DescribeAlarmHistoryOutput, bool) bool) error + DescribeAlarmHistoryPagesWithContext(aws.Context, *cloudwatch.DescribeAlarmHistoryInput, func(*cloudwatch.DescribeAlarmHistoryOutput, bool) bool, ...request.Option) error + + DescribeAlarms(*cloudwatch.DescribeAlarmsInput) (*cloudwatch.DescribeAlarmsOutput, error) + DescribeAlarmsWithContext(aws.Context, *cloudwatch.DescribeAlarmsInput, ...request.Option) (*cloudwatch.DescribeAlarmsOutput, error) + DescribeAlarmsRequest(*cloudwatch.DescribeAlarmsInput) (*request.Request, *cloudwatch.DescribeAlarmsOutput) + + DescribeAlarmsPages(*cloudwatch.DescribeAlarmsInput, func(*cloudwatch.DescribeAlarmsOutput, bool) bool) error + DescribeAlarmsPagesWithContext(aws.Context, *cloudwatch.DescribeAlarmsInput, func(*cloudwatch.DescribeAlarmsOutput, bool) bool, ...request.Option) error + + DescribeAlarmsForMetric(*cloudwatch.DescribeAlarmsForMetricInput) (*cloudwatch.DescribeAlarmsForMetricOutput, error) + DescribeAlarmsForMetricWithContext(aws.Context, *cloudwatch.DescribeAlarmsForMetricInput, ...request.Option) (*cloudwatch.DescribeAlarmsForMetricOutput, error) + DescribeAlarmsForMetricRequest(*cloudwatch.DescribeAlarmsForMetricInput) (*request.Request, *cloudwatch.DescribeAlarmsForMetricOutput) + + DescribeAnomalyDetectors(*cloudwatch.DescribeAnomalyDetectorsInput) (*cloudwatch.DescribeAnomalyDetectorsOutput, error) + DescribeAnomalyDetectorsWithContext(aws.Context, *cloudwatch.DescribeAnomalyDetectorsInput, ...request.Option) (*cloudwatch.DescribeAnomalyDetectorsOutput, error) + DescribeAnomalyDetectorsRequest(*cloudwatch.DescribeAnomalyDetectorsInput) (*request.Request, *cloudwatch.DescribeAnomalyDetectorsOutput) + + DescribeAnomalyDetectorsPages(*cloudwatch.DescribeAnomalyDetectorsInput, func(*cloudwatch.DescribeAnomalyDetectorsOutput, bool) bool) error + DescribeAnomalyDetectorsPagesWithContext(aws.Context, *cloudwatch.DescribeAnomalyDetectorsInput, func(*cloudwatch.DescribeAnomalyDetectorsOutput, bool) bool, ...request.Option) error + + DescribeInsightRules(*cloudwatch.DescribeInsightRulesInput) (*cloudwatch.DescribeInsightRulesOutput, error) + DescribeInsightRulesWithContext(aws.Context, *cloudwatch.DescribeInsightRulesInput, ...request.Option) (*cloudwatch.DescribeInsightRulesOutput, error) + DescribeInsightRulesRequest(*cloudwatch.DescribeInsightRulesInput) (*request.Request, *cloudwatch.DescribeInsightRulesOutput) + + DescribeInsightRulesPages(*cloudwatch.DescribeInsightRulesInput, func(*cloudwatch.DescribeInsightRulesOutput, bool) bool) error + DescribeInsightRulesPagesWithContext(aws.Context, *cloudwatch.DescribeInsightRulesInput, func(*cloudwatch.DescribeInsightRulesOutput, bool) bool, ...request.Option) error + + DisableAlarmActions(*cloudwatch.DisableAlarmActionsInput) (*cloudwatch.DisableAlarmActionsOutput, error) + DisableAlarmActionsWithContext(aws.Context, *cloudwatch.DisableAlarmActionsInput, ...request.Option) (*cloudwatch.DisableAlarmActionsOutput, error) + DisableAlarmActionsRequest(*cloudwatch.DisableAlarmActionsInput) (*request.Request, *cloudwatch.DisableAlarmActionsOutput) + + DisableInsightRules(*cloudwatch.DisableInsightRulesInput) (*cloudwatch.DisableInsightRulesOutput, error) + DisableInsightRulesWithContext(aws.Context, *cloudwatch.DisableInsightRulesInput, ...request.Option) (*cloudwatch.DisableInsightRulesOutput, error) + DisableInsightRulesRequest(*cloudwatch.DisableInsightRulesInput) (*request.Request, *cloudwatch.DisableInsightRulesOutput) + + EnableAlarmActions(*cloudwatch.EnableAlarmActionsInput) (*cloudwatch.EnableAlarmActionsOutput, error) + EnableAlarmActionsWithContext(aws.Context, *cloudwatch.EnableAlarmActionsInput, ...request.Option) (*cloudwatch.EnableAlarmActionsOutput, error) + EnableAlarmActionsRequest(*cloudwatch.EnableAlarmActionsInput) (*request.Request, *cloudwatch.EnableAlarmActionsOutput) + + EnableInsightRules(*cloudwatch.EnableInsightRulesInput) (*cloudwatch.EnableInsightRulesOutput, error) + EnableInsightRulesWithContext(aws.Context, *cloudwatch.EnableInsightRulesInput, ...request.Option) (*cloudwatch.EnableInsightRulesOutput, error) + EnableInsightRulesRequest(*cloudwatch.EnableInsightRulesInput) (*request.Request, *cloudwatch.EnableInsightRulesOutput) + + GetDashboard(*cloudwatch.GetDashboardInput) (*cloudwatch.GetDashboardOutput, error) + GetDashboardWithContext(aws.Context, *cloudwatch.GetDashboardInput, ...request.Option) (*cloudwatch.GetDashboardOutput, error) + GetDashboardRequest(*cloudwatch.GetDashboardInput) (*request.Request, *cloudwatch.GetDashboardOutput) + + GetInsightRuleReport(*cloudwatch.GetInsightRuleReportInput) (*cloudwatch.GetInsightRuleReportOutput, error) + GetInsightRuleReportWithContext(aws.Context, *cloudwatch.GetInsightRuleReportInput, ...request.Option) (*cloudwatch.GetInsightRuleReportOutput, error) + GetInsightRuleReportRequest(*cloudwatch.GetInsightRuleReportInput) (*request.Request, *cloudwatch.GetInsightRuleReportOutput) + + GetMetricData(*cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) + GetMetricDataWithContext(aws.Context, *cloudwatch.GetMetricDataInput, ...request.Option) (*cloudwatch.GetMetricDataOutput, error) + GetMetricDataRequest(*cloudwatch.GetMetricDataInput) (*request.Request, *cloudwatch.GetMetricDataOutput) + + GetMetricDataPages(*cloudwatch.GetMetricDataInput, func(*cloudwatch.GetMetricDataOutput, bool) bool) error + GetMetricDataPagesWithContext(aws.Context, *cloudwatch.GetMetricDataInput, func(*cloudwatch.GetMetricDataOutput, bool) bool, ...request.Option) error + + GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) + GetMetricStatisticsWithContext(aws.Context, *cloudwatch.GetMetricStatisticsInput, ...request.Option) (*cloudwatch.GetMetricStatisticsOutput, error) + GetMetricStatisticsRequest(*cloudwatch.GetMetricStatisticsInput) (*request.Request, *cloudwatch.GetMetricStatisticsOutput) + + GetMetricStream(*cloudwatch.GetMetricStreamInput) (*cloudwatch.GetMetricStreamOutput, error) + GetMetricStreamWithContext(aws.Context, *cloudwatch.GetMetricStreamInput, ...request.Option) (*cloudwatch.GetMetricStreamOutput, error) + GetMetricStreamRequest(*cloudwatch.GetMetricStreamInput) (*request.Request, *cloudwatch.GetMetricStreamOutput) + + GetMetricWidgetImage(*cloudwatch.GetMetricWidgetImageInput) (*cloudwatch.GetMetricWidgetImageOutput, error) + GetMetricWidgetImageWithContext(aws.Context, *cloudwatch.GetMetricWidgetImageInput, ...request.Option) (*cloudwatch.GetMetricWidgetImageOutput, error) + GetMetricWidgetImageRequest(*cloudwatch.GetMetricWidgetImageInput) (*request.Request, *cloudwatch.GetMetricWidgetImageOutput) + + ListDashboards(*cloudwatch.ListDashboardsInput) (*cloudwatch.ListDashboardsOutput, error) + ListDashboardsWithContext(aws.Context, *cloudwatch.ListDashboardsInput, ...request.Option) (*cloudwatch.ListDashboardsOutput, error) + ListDashboardsRequest(*cloudwatch.ListDashboardsInput) (*request.Request, *cloudwatch.ListDashboardsOutput) + + ListDashboardsPages(*cloudwatch.ListDashboardsInput, func(*cloudwatch.ListDashboardsOutput, bool) bool) error + ListDashboardsPagesWithContext(aws.Context, *cloudwatch.ListDashboardsInput, func(*cloudwatch.ListDashboardsOutput, bool) bool, ...request.Option) error + + ListMetricStreams(*cloudwatch.ListMetricStreamsInput) (*cloudwatch.ListMetricStreamsOutput, error) + ListMetricStreamsWithContext(aws.Context, *cloudwatch.ListMetricStreamsInput, ...request.Option) (*cloudwatch.ListMetricStreamsOutput, error) + ListMetricStreamsRequest(*cloudwatch.ListMetricStreamsInput) (*request.Request, *cloudwatch.ListMetricStreamsOutput) + + ListMetricStreamsPages(*cloudwatch.ListMetricStreamsInput, func(*cloudwatch.ListMetricStreamsOutput, bool) bool) error + ListMetricStreamsPagesWithContext(aws.Context, *cloudwatch.ListMetricStreamsInput, func(*cloudwatch.ListMetricStreamsOutput, bool) bool, ...request.Option) error + + ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) + ListMetricsWithContext(aws.Context, *cloudwatch.ListMetricsInput, ...request.Option) (*cloudwatch.ListMetricsOutput, error) + ListMetricsRequest(*cloudwatch.ListMetricsInput) (*request.Request, *cloudwatch.ListMetricsOutput) + + ListMetricsPages(*cloudwatch.ListMetricsInput, func(*cloudwatch.ListMetricsOutput, bool) bool) error + ListMetricsPagesWithContext(aws.Context, *cloudwatch.ListMetricsInput, func(*cloudwatch.ListMetricsOutput, bool) bool, ...request.Option) error + + ListTagsForResource(*cloudwatch.ListTagsForResourceInput) (*cloudwatch.ListTagsForResourceOutput, error) + ListTagsForResourceWithContext(aws.Context, *cloudwatch.ListTagsForResourceInput, ...request.Option) (*cloudwatch.ListTagsForResourceOutput, error) + ListTagsForResourceRequest(*cloudwatch.ListTagsForResourceInput) (*request.Request, *cloudwatch.ListTagsForResourceOutput) + + PutAnomalyDetector(*cloudwatch.PutAnomalyDetectorInput) (*cloudwatch.PutAnomalyDetectorOutput, error) + PutAnomalyDetectorWithContext(aws.Context, *cloudwatch.PutAnomalyDetectorInput, ...request.Option) (*cloudwatch.PutAnomalyDetectorOutput, error) + PutAnomalyDetectorRequest(*cloudwatch.PutAnomalyDetectorInput) (*request.Request, *cloudwatch.PutAnomalyDetectorOutput) + + PutCompositeAlarm(*cloudwatch.PutCompositeAlarmInput) (*cloudwatch.PutCompositeAlarmOutput, error) + PutCompositeAlarmWithContext(aws.Context, *cloudwatch.PutCompositeAlarmInput, ...request.Option) (*cloudwatch.PutCompositeAlarmOutput, error) + PutCompositeAlarmRequest(*cloudwatch.PutCompositeAlarmInput) (*request.Request, *cloudwatch.PutCompositeAlarmOutput) + + PutDashboard(*cloudwatch.PutDashboardInput) (*cloudwatch.PutDashboardOutput, error) + PutDashboardWithContext(aws.Context, *cloudwatch.PutDashboardInput, ...request.Option) (*cloudwatch.PutDashboardOutput, error) + PutDashboardRequest(*cloudwatch.PutDashboardInput) (*request.Request, *cloudwatch.PutDashboardOutput) + + PutInsightRule(*cloudwatch.PutInsightRuleInput) (*cloudwatch.PutInsightRuleOutput, error) + PutInsightRuleWithContext(aws.Context, *cloudwatch.PutInsightRuleInput, ...request.Option) (*cloudwatch.PutInsightRuleOutput, error) + PutInsightRuleRequest(*cloudwatch.PutInsightRuleInput) (*request.Request, *cloudwatch.PutInsightRuleOutput) + + PutMetricAlarm(*cloudwatch.PutMetricAlarmInput) (*cloudwatch.PutMetricAlarmOutput, error) + PutMetricAlarmWithContext(aws.Context, *cloudwatch.PutMetricAlarmInput, ...request.Option) (*cloudwatch.PutMetricAlarmOutput, error) + PutMetricAlarmRequest(*cloudwatch.PutMetricAlarmInput) (*request.Request, *cloudwatch.PutMetricAlarmOutput) + + PutMetricData(*cloudwatch.PutMetricDataInput) (*cloudwatch.PutMetricDataOutput, error) + PutMetricDataWithContext(aws.Context, *cloudwatch.PutMetricDataInput, ...request.Option) (*cloudwatch.PutMetricDataOutput, error) + PutMetricDataRequest(*cloudwatch.PutMetricDataInput) (*request.Request, *cloudwatch.PutMetricDataOutput) + + PutMetricStream(*cloudwatch.PutMetricStreamInput) (*cloudwatch.PutMetricStreamOutput, error) + PutMetricStreamWithContext(aws.Context, *cloudwatch.PutMetricStreamInput, ...request.Option) (*cloudwatch.PutMetricStreamOutput, error) + PutMetricStreamRequest(*cloudwatch.PutMetricStreamInput) (*request.Request, *cloudwatch.PutMetricStreamOutput) + + SetAlarmState(*cloudwatch.SetAlarmStateInput) (*cloudwatch.SetAlarmStateOutput, error) + SetAlarmStateWithContext(aws.Context, *cloudwatch.SetAlarmStateInput, ...request.Option) (*cloudwatch.SetAlarmStateOutput, error) + SetAlarmStateRequest(*cloudwatch.SetAlarmStateInput) (*request.Request, *cloudwatch.SetAlarmStateOutput) + + StartMetricStreams(*cloudwatch.StartMetricStreamsInput) (*cloudwatch.StartMetricStreamsOutput, error) + StartMetricStreamsWithContext(aws.Context, *cloudwatch.StartMetricStreamsInput, ...request.Option) (*cloudwatch.StartMetricStreamsOutput, error) + StartMetricStreamsRequest(*cloudwatch.StartMetricStreamsInput) (*request.Request, *cloudwatch.StartMetricStreamsOutput) + + StopMetricStreams(*cloudwatch.StopMetricStreamsInput) (*cloudwatch.StopMetricStreamsOutput, error) + StopMetricStreamsWithContext(aws.Context, *cloudwatch.StopMetricStreamsInput, ...request.Option) (*cloudwatch.StopMetricStreamsOutput, error) + StopMetricStreamsRequest(*cloudwatch.StopMetricStreamsInput) (*request.Request, *cloudwatch.StopMetricStreamsOutput) + + TagResource(*cloudwatch.TagResourceInput) (*cloudwatch.TagResourceOutput, error) + TagResourceWithContext(aws.Context, *cloudwatch.TagResourceInput, ...request.Option) (*cloudwatch.TagResourceOutput, error) + TagResourceRequest(*cloudwatch.TagResourceInput) (*request.Request, *cloudwatch.TagResourceOutput) + + UntagResource(*cloudwatch.UntagResourceInput) (*cloudwatch.UntagResourceOutput, error) + UntagResourceWithContext(aws.Context, *cloudwatch.UntagResourceInput, ...request.Option) (*cloudwatch.UntagResourceOutput, error) + UntagResourceRequest(*cloudwatch.UntagResourceInput) (*request.Request, *cloudwatch.UntagResourceOutput) + + WaitUntilAlarmExists(*cloudwatch.DescribeAlarmsInput) error + WaitUntilAlarmExistsWithContext(aws.Context, *cloudwatch.DescribeAlarmsInput, ...request.WaiterOption) error + + WaitUntilCompositeAlarmExists(*cloudwatch.DescribeAlarmsInput) error + WaitUntilCompositeAlarmExistsWithContext(aws.Context, *cloudwatch.DescribeAlarmsInput, ...request.WaiterOption) error +} + +var _ CloudWatchAPI = (*cloudwatch.CloudWatch)(nil) diff --git a/sdk/service/cloudwatch/doc.go b/sdk/service/cloudwatch/doc.go new file mode 100644 index 0000000000..deffff1b0a --- /dev/null +++ b/sdk/service/cloudwatch/doc.go @@ -0,0 +1,43 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package cloudwatch provides the client and types for making API +// requests to Amazon CloudWatch. +// +// Amazon CloudWatch monitors your Amazon Web Services (Amazon Web Services) +// resources and the applications you run on Amazon Web Services in real time. +// You can use CloudWatch to collect and track metrics, which are the variables +// you want to measure for your resources and applications. +// +// CloudWatch alarms send notifications or automatically change the resources +// you are monitoring based on rules that you define. For example, you can monitor +// the CPU usage and disk reads and writes of your Amazon EC2 instances. Then, +// use this data to determine whether you should launch additional instances +// to handle increased load. You can also use this data to stop under-used instances +// to save money. +// +// In addition to monitoring the built-in metrics that come with Amazon Web +// Services, you can monitor your own custom metrics. With CloudWatch, you gain +// system-wide visibility into resource utilization, application performance, +// and operational health. +// +// See https://docs.aws.amazon.com/goto/WebAPI/monitoring-2010-08-01 for more information on this service. +// +// See cloudwatch package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/ +// +// # Using the Client +// +// To contact Amazon CloudWatch with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon CloudWatch client CloudWatch for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/#New +package cloudwatch diff --git a/sdk/service/cloudwatch/errors.go b/sdk/service/cloudwatch/errors.go new file mode 100644 index 0000000000..77d0ded200 --- /dev/null +++ b/sdk/service/cloudwatch/errors.go @@ -0,0 +1,84 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatch + +const ( + + // ErrCodeConcurrentModificationException for service response error code + // "ConcurrentModificationException". + // + // More than one process tried to modify a resource at the same time. + ErrCodeConcurrentModificationException = "ConcurrentModificationException" + + // ErrCodeDashboardInvalidInputError for service response error code + // "InvalidParameterInput". + // + // Some part of the dashboard data is invalid. + ErrCodeDashboardInvalidInputError = "InvalidParameterInput" + + // ErrCodeDashboardNotFoundError for service response error code + // "ResourceNotFound". + // + // The specified dashboard does not exist. + ErrCodeDashboardNotFoundError = "ResourceNotFound" + + // ErrCodeInternalServiceFault for service response error code + // "InternalServiceError". + // + // Request processing has failed due to some unknown error, exception, or failure. + ErrCodeInternalServiceFault = "InternalServiceError" + + // ErrCodeInvalidFormatFault for service response error code + // "InvalidFormat". + // + // Data was not syntactically valid JSON. + ErrCodeInvalidFormatFault = "InvalidFormat" + + // ErrCodeInvalidNextToken for service response error code + // "InvalidNextToken". + // + // The next token specified is invalid. + ErrCodeInvalidNextToken = "InvalidNextToken" + + // ErrCodeInvalidParameterCombinationException for service response error code + // "InvalidParameterCombination". + // + // Parameters were used together that cannot be used together. + ErrCodeInvalidParameterCombinationException = "InvalidParameterCombination" + + // ErrCodeInvalidParameterValueException for service response error code + // "InvalidParameterValue". + // + // The value of an input parameter is bad or out-of-range. + ErrCodeInvalidParameterValueException = "InvalidParameterValue" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The operation exceeded one or more limits. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeLimitExceededFault for service response error code + // "LimitExceeded". + // + // The quota for alarms for this customer has already been reached. + ErrCodeLimitExceededFault = "LimitExceeded" + + // ErrCodeMissingRequiredParameterException for service response error code + // "MissingParameter". + // + // An input parameter that is required is missing. + ErrCodeMissingRequiredParameterException = "MissingParameter" + + // ErrCodeResourceNotFound for service response error code + // "ResourceNotFound". + // + // The named resource does not exist. + ErrCodeResourceNotFound = "ResourceNotFound" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The named resource does not exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" +) diff --git a/sdk/service/cloudwatch/integ_test.go b/sdk/service/cloudwatch/integ_test.go new file mode 100644 index 0000000000..0e3952c1fe --- /dev/null +++ b/sdk/service/cloudwatch/integ_test.go @@ -0,0 +1,70 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +//go:build go1.16 && integration +// +build go1.16,integration + +package cloudwatch_test + +import ( + "context" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/awstesting/integration" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +var _ aws.Config +var _ awserr.Error +var _ request.Request + +func TestInteg_00_ListMetrics(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + sess := integration.SessionWithDefaultRegion("us-west-2") + svc := cloudwatch.New(sess) + params := &cloudwatch.ListMetricsInput{ + Namespace: aws.String("AWS/EC2"), + } + _, err := svc.ListMetricsWithContext(ctx, params, func(r *request.Request) { + r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") + }) + if err != nil { + t.Errorf("expect no error, got %v", err) + } +} +func TestInteg_01_SetAlarmState(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + sess := integration.SessionWithDefaultRegion("us-west-2") + svc := cloudwatch.New(sess) + params := &cloudwatch.SetAlarmStateInput{ + AlarmName: aws.String("abc"), + StateReason: aws.String("xyz"), + StateValue: aws.String("mno"), + } + _, err := svc.SetAlarmStateWithContext(ctx, params, func(r *request.Request) { + r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") + }) + if err == nil { + t.Fatalf("expect request to fail") + } + aerr, ok := err.(awserr.RequestFailure) + if !ok { + t.Fatalf("expect awserr, was %T", err) + } + if len(aerr.Code()) == 0 { + t.Errorf("expect non-empty error code") + } + if len(aerr.Message()) == 0 { + t.Errorf("expect non-empty error message") + } + if v := aerr.Code(); v == request.ErrCodeSerialization { + t.Errorf("expect API error code got serialization failure") + } +} diff --git a/sdk/service/cloudwatch/service.go b/sdk/service/cloudwatch/service.go new file mode 100644 index 0000000000..a581f22a33 --- /dev/null +++ b/sdk/service/cloudwatch/service.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// CloudWatch provides the API operation methods for making requests to +// Amazon CloudWatch. See this package's package overview docs +// for details on the service. +// +// CloudWatch methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type CloudWatch struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "monitoring" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "CloudWatch" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the CloudWatch client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a CloudWatch client from just a session. +// svc := cloudwatch.New(mySession) +// +// // Create a CloudWatch client with additional configuration +// svc := cloudwatch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatch { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *CloudWatch { + svc := &CloudWatch{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2010-08-01", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a CloudWatch operation and runs any +// custom request initialization. +func (c *CloudWatch) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/sdk/service/cloudwatch/waiters.go b/sdk/service/cloudwatch/waiters.go new file mode 100644 index 0000000000..164d306c45 --- /dev/null +++ b/sdk/service/cloudwatch/waiters.go @@ -0,0 +1,102 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudwatch + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilAlarmExists uses the CloudWatch API operation +// DescribeAlarms to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *CloudWatch) WaitUntilAlarmExists(input *DescribeAlarmsInput) error { + return c.WaitUntilAlarmExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilAlarmExistsWithContext is an extended version of WaitUntilAlarmExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) WaitUntilAlarmExistsWithContext(ctx aws.Context, input *DescribeAlarmsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilAlarmExists", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(MetricAlarms[]) > `0`", + Expected: true, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeAlarmsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAlarmsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilCompositeAlarmExists uses the CloudWatch API operation +// DescribeAlarms to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *CloudWatch) WaitUntilCompositeAlarmExists(input *DescribeAlarmsInput) error { + return c.WaitUntilCompositeAlarmExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilCompositeAlarmExistsWithContext is an extended version of WaitUntilCompositeAlarmExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatch) WaitUntilCompositeAlarmExistsWithContext(ctx aws.Context, input *DescribeAlarmsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilCompositeAlarmExists", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(CompositeAlarms[]) > `0`", + Expected: true, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeAlarmsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAlarmsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} From 4cc51c026172beb35fb1c9fdd7fbebfc203d806e Mon Sep 17 00:00:00 2001 From: Jason Polanco Date: Mon, 23 Sep 2024 14:40:29 -0400 Subject: [PATCH 09/47] Convert collectd and statsd service and environment attributes to telegraf tags (#805) --- translator/processRuleToApplyAndMerge.go | 20 ++++++++++ .../sampleConfig/collectd_config_linux.conf | 2 + .../sampleConfig/compass_linux_config.conf | 8 ++-- .../sampleConfig/complete_darwin_config.conf | 4 ++ .../sampleConfig/complete_linux_config.conf | 4 ++ .../sampleConfig/complete_windows_config.conf | 2 + .../sampleConfig/statsd_config_linux.conf | 2 + .../sampleConfig/statsd_config_windows.conf | 2 + .../totomlconfig/testdata/agentToml.conf | 4 ++ .../tomlConfigTemplate/tomlConfig.go | 10 ----- .../metrics_collect/collectd/collectd.go | 2 +- .../metrics_collect/collectd/collectd_test.go | 10 ++--- .../collectd/ruleDeploymentEnvironment.go | 8 ++-- .../collectd/ruleServiceName.go | 8 ++-- .../statsd/ruleDeploymentEnvironment.go | 8 ++-- .../metrics_collect/statsd/ruleServiceName.go | 8 ++-- .../metrics/metrics_collect/statsd/statsd.go | 2 +- .../metrics_collect/statsd/statsd_test.go | 40 +++++++++---------- 18 files changed, 84 insertions(+), 60 deletions(-) create mode 100644 translator/processRuleToApplyAndMerge.go diff --git a/translator/processRuleToApplyAndMerge.go b/translator/processRuleToApplyAndMerge.go new file mode 100644 index 0000000000..267f8de1dd --- /dev/null +++ b/translator/processRuleToApplyAndMerge.go @@ -0,0 +1,20 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package translator + +import ( + "golang.org/x/exp/maps" +) + +func ProcessRuleToMergeAndApply(input interface{}, childRule map[string]Rule, result map[string]interface{}) map[string]interface{} { + for _, rule := range childRule { + key, val := rule.ApplyRule(input) + if _, ok := result[key]; ok { + maps.Copy(result[key].(map[string]interface{}), val.(map[string]interface{})) + } else if key != "" { + result[key] = val + } + } + return result +} diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.conf b/translator/tocwconfig/sampleConfig/collectd_config_linux.conf index 371c03de4f..84caa16782 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.conf +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.conf @@ -25,6 +25,8 @@ service_address = "udp://127.0.0.1:25826" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "" + "service.name" = "" [outputs] diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.conf b/translator/tocwconfig/sampleConfig/compass_linux_config.conf index 01bca78370..f9c82ac5f3 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.conf +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.conf @@ -50,22 +50,22 @@ collectd_security_level = "encrypt" collectd_typesdb = ["/usr/share/collectd/types.db"] data_format = "collectd" - deployment_environment = "plugin-level-environment" name_prefix = "collectd_" service_address = "udp://127.0.0.1:25826" - service_name = "plugin-level-service" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "plugin-level-environment" + "service.name" = "plugin-level-service" [[inputs.statsd]] - deployment_environment = "agent-level-environment" interval = "10s" metric_separator = "_" parse_data_dog_tags = true service_address = ":8125" - service_name = "metric-level-service" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "agent-level-environment" + "service.name" = "metric-level-service" [outputs] diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.conf b/translator/tocwconfig/sampleConfig/complete_darwin_config.conf index dd69467e71..db97be5d77 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.conf @@ -103,6 +103,8 @@ service_address = "udp://127.0.0.1:25826" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "" + "service.name" = "" [[inputs.statsd]] interval = "10s" @@ -111,6 +113,8 @@ service_address = ":8125" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "" + "service.name" = "" [[inputs.swap]] fieldpass = ["used", "free", "used_percent"] diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.conf b/translator/tocwconfig/sampleConfig/complete_linux_config.conf index aef5f442db..5702fcd8e7 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.conf @@ -103,6 +103,8 @@ service_address = "udp://127.0.0.1:25826" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "" + "service.name" = "" [[inputs.statsd]] interval = "10s" @@ -111,6 +113,8 @@ service_address = ":8125" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "" + "service.name" = "" [[inputs.swap]] fieldpass = ["used", "free", "used_percent"] diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.conf b/translator/tocwconfig/sampleConfig/complete_windows_config.conf index 9d06e7e62b..5a4add2254 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.conf @@ -50,6 +50,8 @@ service_address = ":8125" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "" + "service.name" = "" [[inputs.win_perf_counters]] DisableReplacer = true diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.conf b/translator/tocwconfig/sampleConfig/statsd_config_linux.conf index cbf53c502d..ab7867a003 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.conf +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.conf @@ -23,6 +23,8 @@ service_address = ":8125" [inputs.statsd.tags] "aws:StorageResolution" = "true" + "deployment.environment" = "" + "service.name" = "" [outputs] diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.conf b/translator/tocwconfig/sampleConfig/statsd_config_windows.conf index 7db7ad9c19..bb3b7fd983 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.conf +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.conf @@ -23,6 +23,8 @@ service_address = ":8125" [inputs.statsd.tags] "aws:StorageResolution" = "true" + "deployment.environment" = "" + "service.name" = "" [outputs] diff --git a/translator/tocwconfig/totomlconfig/testdata/agentToml.conf b/translator/tocwconfig/totomlconfig/testdata/agentToml.conf index 1bbd040d06..82fa874fce 100644 --- a/translator/tocwconfig/totomlconfig/testdata/agentToml.conf +++ b/translator/tocwconfig/totomlconfig/testdata/agentToml.conf @@ -103,6 +103,8 @@ service_address = "udp://127.0.0.1:25826" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "" + "service.name" = "" [[inputs.statsd]] interval = "10s" @@ -111,6 +113,8 @@ service_address = ":8125" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" + "deployment.environment" = "" + "service.name" = "" [[inputs.swap]] fieldpass = ["used", "free", "used_percent"] diff --git a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go index e0eee0978f..2a26ad9bb2 100644 --- a/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go +++ b/translator/tocwconfig/totomlconfig/tomlConfigTemplate/tomlConfig.go @@ -219,11 +219,6 @@ type ( NameOverride string `toml:"name_override"` ServiceAddress string `toml:"service_address"` Tags map[string]string - - //Customer specified service.name - ServiceName string `toml:"service_name"` - //Customer specified deployment.environment - DeploymentEnvironment string `toml:"deployment_environment"` } statsdConfig struct { @@ -233,11 +228,6 @@ type ( ParseDataDogTags bool `toml:"parse_data_dog_tags"` ServiceAddress string `toml:"service_address"` Tags map[string]string - - //Customer specified service.name - ServiceName string `toml:"service_name"` - //Customer specified deployment.environment - DeploymentEnvironment string `toml:"deployment_environment"` } swapConfig struct { diff --git a/translator/translate/metrics/metrics_collect/collectd/collectd.go b/translator/translate/metrics/metrics_collect/collectd/collectd.go index 3ebb6496c7..d59ae70756 100644 --- a/translator/translate/metrics/metrics_collect/collectd/collectd.go +++ b/translator/translate/metrics/metrics_collect/collectd/collectd.go @@ -51,7 +51,7 @@ func (obj *CollectD) ApplyRule(input interface{}) (returnKey string, returnVal i } else { //If exists, process it //Check if there are some config entry with rules applied - result = translator.ProcessRuleToApply(m[SectionKey], ChildRule, result) + result = translator.ProcessRuleToMergeAndApply(m[SectionKey], ChildRule, result) resArray = append(resArray, result) returnKey = SectionMappedKey returnVal = resArray diff --git a/translator/translate/metrics/metrics_collect/collectd/collectd_test.go b/translator/translate/metrics/metrics_collect/collectd/collectd_test.go index 2407eabac1..ed11e36471 100644 --- a/translator/translate/metrics/metrics_collect/collectd/collectd_test.go +++ b/translator/translate/metrics/metrics_collect/collectd/collectd_test.go @@ -33,9 +33,8 @@ func TestCollectD_HappyCase(t *testing.T) { "collectd_auth_file": "/etc/collectd/_auth_file", "collectd_security_level": "none", "collectd_typesdb": []interface{}{"/usr/share/collectd/types.db", "/custom_location/types.db"}, - "tags": map[string]interface{}{"aws:AggregationInterval": "30s"}, - "service_name": "", - "deployment_environment": "", + "tags": map[string]interface{}{"aws:AggregationInterval": "30s", + "deployment.environment": "", "service.name": ""}, }, } @@ -58,9 +57,8 @@ func TestCollectD_MinimumConfig(t *testing.T) { "collectd_auth_file": "/etc/collectd/auth_file", "collectd_security_level": "encrypt", "collectd_typesdb": []interface{}{"/usr/share/collectd/types.db"}, - "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, - "service_name": "", - "deployment_environment": "", + "tags": map[string]interface{}{"aws:AggregationInterval": "60s", + "deployment.environment": "", "service.name": ""}, }, } diff --git a/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go b/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go index ab627fe041..dc674fc1af 100644 --- a/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go +++ b/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go @@ -13,14 +13,14 @@ type DeploymentEnvironment struct { const SectionkeyDeploymentEnvironment = "deployment.environment" -func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - _, returnVal = translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) - returnKey = "deployment_environment" +func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (string, interface{}) { + _, returnVal := translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) + returnKey := "deployment.environment" if returnVal == "" { returnVal = metrics.GlobalMetricConfig.DeploymentEnvironment } - return + return "tags", map[string]interface{}{returnKey: returnVal} } func init() { diff --git a/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go b/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go index 412cc920d1..de083dcbf1 100644 --- a/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go +++ b/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go @@ -13,14 +13,14 @@ type ServiceName struct { const SectionkeyServicename = "service.name" -func (obj *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - _, returnVal = translator.DefaultCase(SectionkeyServicename, "", input) - returnKey = "service_name" +func (obj *ServiceName) ApplyRule(input interface{}) (string, interface{}) { + _, returnVal := translator.DefaultCase(SectionkeyServicename, "", input) + returnKey := "service.name" if returnVal == "" { returnVal = metrics.GlobalMetricConfig.ServiceName } - return + return "tags", map[string]interface{}{returnKey: returnVal} } func init() { diff --git a/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go b/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go index 48b3b38fc8..d1b58ee5ab 100644 --- a/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go +++ b/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go @@ -13,14 +13,14 @@ type DeploymentEnvironment struct { const SectionkeyDeploymentEnvironment = "deployment.environment" -func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - _, returnVal = translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) - returnKey = "deployment_environment" +func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (string, interface{}) { + _, returnVal := translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) + returnKey := "deployment.environment" if returnVal == "" { returnVal = metrics.GlobalMetricConfig.DeploymentEnvironment } - return + return "tags", map[string]interface{}{returnKey: returnVal} } func init() { diff --git a/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go b/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go index d6f1b88eb5..d45214da7e 100644 --- a/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go +++ b/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go @@ -13,14 +13,14 @@ type ServiceName struct { const SectionkeyServicename = "service.name" -func (obj *ServiceName) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { - _, returnVal = translator.DefaultCase(SectionkeyServicename, "", input) - returnKey = "service_name" +func (obj *ServiceName) ApplyRule(input interface{}) (string, interface{}) { + _, returnVal := translator.DefaultCase(SectionkeyServicename, "", input) + returnKey := "service.name" if returnVal == "" { returnVal = metrics.GlobalMetricConfig.ServiceName } - return + return "tags", map[string]interface{}{returnKey: returnVal} } func init() { diff --git a/translator/translate/metrics/metrics_collect/statsd/statsd.go b/translator/translate/metrics/metrics_collect/statsd/statsd.go index d979692832..6f3e3cdde6 100644 --- a/translator/translate/metrics/metrics_collect/statsd/statsd.go +++ b/translator/translate/metrics/metrics_collect/statsd/statsd.go @@ -47,7 +47,7 @@ func (obj *StatsD) ApplyRule(input interface{}) (returnKey string, returnVal int } else { //If exists, process it //Check if there are some config entry with rules applied - result = translator.ProcessRuleToApply(m[SectionKey], ChildRule, result) + result = translator.ProcessRuleToMergeAndApply(m[SectionKey], ChildRule, result) resArray = append(resArray, result) returnKey = SectionKey returnVal = resArray diff --git a/translator/translate/metrics/metrics_collect/statsd/statsd_test.go b/translator/translate/metrics/metrics_collect/statsd/statsd_test.go index 9d5705a1ee..d4914b8e43 100644 --- a/translator/translate/metrics/metrics_collect/statsd/statsd_test.go +++ b/translator/translate/metrics/metrics_collect/statsd/statsd_test.go @@ -29,9 +29,8 @@ func TestStatsD_HappyCase(t *testing.T) { "service_address": ":12345", "interval": "5s", "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:AggregationInterval": "30s"}, - "service_name": "", - "deployment_environment": "", + "tags": map[string]interface{}{"aws:AggregationInterval": "30s", + "deployment.environment": "", "service.name": ""}, }, } @@ -48,12 +47,11 @@ func TestStatsD_MinimumConfig(t *testing.T) { expect := []interface{}{ map[string]interface{}{ - "service_address": ":8125", - "interval": "10s", - "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, - "service_name": "", - "deployment_environment": "", + "service_address": ":8125", + "interval": "10s", + "parse_data_dog_tags": true, + "tags": map[string]interface{}{"aws:AggregationInterval": "60s", + "deployment.environment": "", "service.name": ""}, }, } @@ -72,12 +70,11 @@ func TestStatsD_DisableAggregation(t *testing.T) { expect := []interface{}{ map[string]interface{}{ - "service_address": ":8125", - "interval": "10s", - "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:StorageResolution": "true"}, - "service_name": "", - "deployment_environment": "", + "service_address": ":8125", + "interval": "10s", + "parse_data_dog_tags": true, + "tags": map[string]interface{}{"aws:StorageResolution": "true", + "deployment.environment": "", "service.name": ""}, }, } @@ -96,13 +93,12 @@ func TestStatsD_MetricSeparator(t *testing.T) { expect := []interface{}{ map[string]interface{}{ - "service_address": ":8125", - "interval": "10s", - "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, - "metric_separator": ".", - "service_name": "", - "deployment_environment": "", + "service_address": ":8125", + "interval": "10s", + "parse_data_dog_tags": true, + "tags": map[string]interface{}{"aws:AggregationInterval": "60s", + "deployment.environment": "", "service.name": ""}, + "metric_separator": ".", }, } From 9751e88fe592f4454f3b8e571de1509ab02a7bda Mon Sep 17 00:00:00 2001 From: POOJA REDDY NATHALA Date: Tue, 24 Sep 2024 16:29:43 -0400 Subject: [PATCH 10/47] Implemented mtls to the agent server (#803) --- extension/server/extension.go | 59 ++++++++++---- extension/server/extension_test.go | 124 +++++++++++++++++++++++------ internal/tls/config.go | 1 + 3 files changed, 145 insertions(+), 39 deletions(-) diff --git a/extension/server/extension.go b/extension/server/extension.go index bc90a44546..514db40641 100644 --- a/extension/server/extension.go +++ b/extension/server/extension.go @@ -5,7 +5,9 @@ package server import ( "context" + "crypto/tls" "net/http" + "time" "github.com/gin-gonic/gin" jsoniter "github.com/json-iterator/go" @@ -14,13 +16,20 @@ import ( "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" + tlsInternal "github.com/aws/amazon-cloudwatch-agent/internal/tls" +) + +const ( + tlsServerCertFilePath = "/etc/amazon-cloudwatch-observability-agent-server-cert/server.crt" + tlsServerKeyFilePath = "/etc/amazon-cloudwatch-observability-agent-server-cert/server.key" + caFilePath = "/etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt" ) type Server struct { logger *zap.Logger config *Config - server *http.Server jsonMarshaller jsoniter.API + httpsServer *http.Server } var _ extension.Extension = (*Server)(nil) @@ -40,29 +49,49 @@ func NewServer(logger *zap.Logger, config *Config) *Server { jsonMarshaller: jsoniter.ConfigCompatibleWithStandardLibrary, } gin.SetMode(gin.ReleaseMode) - router := gin.New() - s.setRouter(router) - s.server = &http.Server{ - Addr: config.ListenAddress, - Handler: router, + + tlsConfig, err := getTlsConfig() + if tlsConfig == nil { + s.logger.Error("failed to create TLS config", zap.Error(err)) + return s } + + httpsRouter := gin.New() + s.setRouter(httpsRouter) + + s.httpsServer = &http.Server{Addr: config.ListenAddress, Handler: httpsRouter, ReadHeaderTimeout: 90 * time.Second, TLSConfig: tlsConfig} + return s } +var getTlsConfig = func() (*tls.Config, error) { + serverConfig := &tlsInternal.ServerConfig{ + TLSCert: tlsServerCertFilePath, + TLSKey: tlsServerKeyFilePath, + TLSAllowedCACerts: []string{caFilePath}, + } + return serverConfig.TLSConfig() +} + func (s *Server) Start(context.Context, component.Host) error { - s.logger.Info("Starting server ...") - go func() { - err := s.server.ListenAndServe() - if err != nil { - s.logger.Error("failed to serve and listen", zap.Error(err)) - } - }() + if s.httpsServer != nil { + s.logger.Info("Starting HTTPS server...") + go func() { + err := s.httpsServer.ListenAndServeTLS("", "") + if err != nil { + s.logger.Error("failed to serve and listen", zap.Error(err)) + } + }() + } return nil } func (s *Server) Shutdown(ctx context.Context) error { - s.logger.Info("Shutting down server...") - return s.server.Shutdown(ctx) + if s.httpsServer != nil { + s.logger.Info("Shutting down HTTPS server...") + return s.httpsServer.Shutdown(ctx) + } + return nil } func (s *Server) k8sPodToServiceMapHandler(c *gin.Context) { diff --git a/extension/server/extension_test.go b/extension/server/extension_test.go index 02a6f6acf6..3b14347d29 100644 --- a/extension/server/extension_test.go +++ b/extension/server/extension_test.go @@ -5,6 +5,7 @@ package server import ( "context" + "crypto/tls" "encoding/json" "net/http" "net/http/httptest" @@ -40,17 +41,83 @@ func newMockGetPodServiceEnvironmentMapping(es *mockEntityStore) func() map[stri return es.podToServiceEnvironmentMap } } + +type mockServerConfig struct { + TLSCert string + TLSKey string + TLSAllowedCACerts []string +} + +func newMockTLSConfig(c *mockServerConfig) func() (*tls.Config, error) { + return func() (*tls.Config, error) { + if c.TLSCert == "" && c.TLSKey == "" && len(c.TLSAllowedCACerts) == 0 { + return nil, nil + } + // Mock implementation for testing purposes + return &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS12, + }, nil + } +} + func TestNewServer(t *testing.T) { logger, _ := zap.NewProduction() config := &Config{ ListenAddress: ":8080", } - server := NewServer(logger, config) + tests := []struct { + name string + want *Server + mockSvrCfg *mockServerConfig + isTLS bool + }{ + { + name: "HTTPSServer", + want: &Server{ + config: config, + logger: logger, + }, + mockSvrCfg: &mockServerConfig{ + TLSCert: "cert", + TLSKey: "key", + TLSAllowedCACerts: []string{"ca"}, + }, + isTLS: true, + }, + { + name: "EmptyHTTPSServer", + want: &Server{ + config: config, + logger: logger, + }, + mockSvrCfg: &mockServerConfig{}, + isTLS: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + getTlsConfig = newMockTLSConfig(tt.mockSvrCfg) + + server := NewServer(logger, config) + assert.NotNil(t, server) + assert.Equal(t, config, server.config) + assert.NotNil(t, server.logger) + if tt.isTLS { + assert.NotNil(t, server.httpsServer) + assert.Equal(t, ":8080", server.httpsServer.Addr) + assert.NotNil(t, server.httpsServer.TLSConfig) + assert.Equal(t, uint16(tls.VersionTLS12), server.httpsServer.TLSConfig.MinVersion) + assert.Equal(t, tls.RequireAndVerifyClientCert, server.httpsServer.TLSConfig.ClientAuth) + assert.NotNil(t, server.httpsServer.Handler) + assert.Equal(t, 90*time.Second, server.httpsServer.ReadHeaderTimeout) + } else { + assert.Nil(t, server.httpsServer) + } + }) + } - assert.NotNil(t, server) - assert.Equal(t, config, server.config) - assert.NotNil(t, server.logger) - assert.NotNil(t, server.server) } func TestK8sPodToServiceMapHandler(t *testing.T) { @@ -148,31 +215,40 @@ func TestServerStartAndShutdown(t *testing.T) { config := &Config{ ListenAddress: ":8080", } - server := NewServer(logger, config) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - err := server.Start(ctx, nil) - assert.NoError(t, err) - - time.Sleep(1 * time.Second) - - // Make a request to the server to check if it's running - resp, err := http.Get("http://localhost:8080") - assert.NoError(t, err) - defer resp.Body.Close() + tests := []struct { + name string + mockSvrCfg *mockServerConfig + }{ + { + name: "HTTPSServer", + mockSvrCfg: &mockServerConfig{ + TLSCert: "cert", + TLSKey: "key", + TLSAllowedCACerts: []string{"ca"}, + }, + }, + { + name: "EmptyHTTPSServer", + mockSvrCfg: &mockServerConfig{}, + }, + } - // Check if the response status code is 404 (default route) - assert.Equal(t, http.StatusNotFound, resp.StatusCode) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + getTlsConfig = newMockTLSConfig(tt.mockSvrCfg) + server := NewServer(logger, config) - err = server.Shutdown(ctx) - assert.NoError(t, err) + err := server.Start(ctx, nil) + assert.NoError(t, err) - // Wait for the server to shut down - time.Sleep(1 * time.Second) + time.Sleep(1 * time.Second) - // Make a request to the server to check if it's shutdown - _, err = http.Get("http://localhost:8080") - assert.Error(t, err) + err = server.Shutdown(ctx) + assert.NoError(t, err) + }) + } } diff --git a/internal/tls/config.go b/internal/tls/config.go index 8d4247c0a4..627d75c3f4 100644 --- a/internal/tls/config.go +++ b/internal/tls/config.go @@ -91,6 +91,7 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { } tlsConfig.ClientCAs = pool tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.MinVersion = tls.VersionTLS12 } if c.TLSCert != "" && c.TLSKey != "" { From cedf98626493b5056c6bffc3950d5bada922413f Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Tue, 24 Sep 2024 18:32:07 -0400 Subject: [PATCH 11/47] Add service name source for AppSignal in EKS scenario (#811) --- extension/entitystore/eksInfo.go | 12 ++-- extension/entitystore/eksInfo_test.go | 36 ++++++++---- extension/entitystore/extension.go | 4 +- extension/entitystore/extension_test.go | 7 ++- extension/entitystore/serviceprovider.go | 1 + .../entityattributes/entityattributes.go | 14 +++-- .../k8sattributescraper.go} | 23 ++++---- .../k8sattributescraper_test.go} | 40 ++++++------- plugins/processors/awsentity/processor.go | 34 +++++++---- .../processors/awsentity/processor_test.go | 56 +++++++++++++++---- 10 files changed, 149 insertions(+), 78 deletions(-) rename plugins/processors/awsentity/internal/{eksattributescraper/eksattributescraper.go => k8sattributescraper/k8sattributescraper.go} (78%) rename plugins/processors/awsentity/internal/{eksattributescraper/eksattributescraper_test.go => k8sattributescraper/k8sattributescraper_test.go} (91%) diff --git a/extension/entitystore/eksInfo.go b/extension/entitystore/eksInfo.go index 371f07f765..b8929b771d 100644 --- a/extension/entitystore/eksInfo.go +++ b/extension/entitystore/eksInfo.go @@ -6,8 +6,9 @@ package entitystore import "go.uber.org/zap" type ServiceEnvironment struct { - ServiceName string - Environment string + ServiceName string + Environment string + ServiceNameSource string } type eksInfo struct { @@ -23,11 +24,12 @@ func newEKSInfo(logger *zap.Logger) *eksInfo { } } -func (eks *eksInfo) AddPodServiceEnvironmentMapping(podName string, serviceName string, environmentName string) { +func (eks *eksInfo) AddPodServiceEnvironmentMapping(podName string, serviceName string, environmentName string, serviceNameSource string) { if eks.podToServiceEnvMap != nil { eks.podToServiceEnvMap[podName] = ServiceEnvironment{ - ServiceName: serviceName, - Environment: environmentName, + ServiceName: serviceName, + Environment: environmentName, + ServiceNameSource: serviceNameSource, } } } diff --git a/extension/entitystore/eksInfo_test.go b/extension/entitystore/eksInfo_test.go index 11804efef6..75e0924cb7 100644 --- a/extension/entitystore/eksInfo_test.go +++ b/extension/entitystore/eksInfo_test.go @@ -12,12 +12,13 @@ import ( func TestAddPodServiceEnvironmentMapping(t *testing.T) { tests := []struct { - name string - want map[string]ServiceEnvironment - podName string - service string - env string - mapNil bool + name string + want map[string]ServiceEnvironment + podName string + service string + env string + serviceNameSource string + mapNil bool }{ { name: "AddPodWithServiceMapping", @@ -41,6 +42,20 @@ func TestAddPodServiceEnvironmentMapping(t *testing.T) { service: "test-service", env: "test-env", }, + { + name: "AddPodWithServiceEnvMapping", + want: map[string]ServiceEnvironment{ + "test-pod": { + ServiceName: "test-service", + Environment: "test-env", + ServiceNameSource: ServiceNameSourceInstrumentation, + }, + }, + podName: "test-pod", + service: "test-service", + env: "test-env", + serviceNameSource: "Instrumentation", + }, { name: "AddWhenPodToServiceMapIsNil", mapNil: true, @@ -53,7 +68,7 @@ func TestAddPodServiceEnvironmentMapping(t *testing.T) { if tt.mapNil { ei.podToServiceEnvMap = nil } - ei.AddPodServiceEnvironmentMapping(tt.podName, tt.service, tt.env) + ei.AddPodServiceEnvironmentMapping(tt.podName, tt.service, tt.env, tt.serviceNameSource) assert.Equal(t, tt.want, ei.podToServiceEnvMap) }) } @@ -69,8 +84,9 @@ func TestGetPodServiceEnvironmentMapping(t *testing.T) { name: "GetPodWithServiceEnvMapping", want: map[string]ServiceEnvironment{ "test-pod": { - ServiceName: "test-service", - Environment: "test-env", + ServiceName: "test-service", + Environment: "test-env", + ServiceNameSource: "test-service-name-source", }, }, addMap: true, @@ -85,7 +101,7 @@ func TestGetPodServiceEnvironmentMapping(t *testing.T) { logger, _ := zap.NewDevelopment() ei := newEKSInfo(logger) if tt.addMap { - ei.AddPodServiceEnvironmentMapping("test-pod", "test-service", "test-env") + ei.AddPodServiceEnvironmentMapping("test-pod", "test-service", "test-env", "test-service-name-source") } assert.Equal(t, tt.want, ei.GetPodServiceEnvironmentMapping()) }) diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index fe296d9be6..6122ff7175 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -163,9 +163,9 @@ func (e *EntityStore) AddServiceAttrEntryForLogGroup(logGroupName LogGroupName, }) } -func (e *EntityStore) AddPodServiceEnvironmentMapping(podName string, serviceName string, environmentName string) { +func (e *EntityStore) AddPodServiceEnvironmentMapping(podName string, serviceName string, environmentName string, serviceNameSource string) { if e.eksInfo != nil { - e.eksInfo.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName) + e.eksInfo.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName, serviceNameSource) } } diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 30179b6b6f..a79d1ced61 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -371,8 +371,9 @@ func TestEntityStore_AddAndGetPodServiceEnvironmentMapping(t *testing.T) { name: "HappyPath", want: map[string]ServiceEnvironment{ "pod1": { - ServiceName: "service1", - Environment: "env1", + ServiceName: "service1", + Environment: "env1", + ServiceNameSource: ServiceNameSourceK8sWorkload, }, }, eks: newEKSInfo(logger), @@ -386,7 +387,7 @@ func TestEntityStore_AddAndGetPodServiceEnvironmentMapping(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := EntityStore{eksInfo: tt.eks} - e.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") + e.AddPodServiceEnvironmentMapping("pod1", "service1", "env1", ServiceNameSourceK8sWorkload) assert.Equal(t, tt.want, e.GetPodServiceEnvironmentMapping()) }) } diff --git a/extension/entitystore/serviceprovider.go b/extension/entitystore/serviceprovider.go index ee962f21a8..40eaffca24 100644 --- a/extension/entitystore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -38,6 +38,7 @@ const ( ServiceNameSourceResourceTags = "ResourceTags" ServiceNameSourceUnknown = "Unknown" ServiceNameSourceUserConfiguration = "UserConfiguration" + ServiceNameSourceK8sWorkload = "K8sWorkload" jitterMax = 180 jitterMin = 60 diff --git a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go index a12bd28027..0d95595905 100644 --- a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go @@ -4,12 +4,14 @@ package entityattributes const ( - AttributeEntityServiceName = "aws.entity.service.name" - AttributeEntityDeploymentEnvironment = "aws.entity.deployment.environment" - AttributeEntityCluster = "aws.entity.k8s.cluster.name" - AttributeEntityNamespace = "aws.entity.k8s.namespace.name" - AttributeEntityWorkload = "aws.entity.k8s.workload.name" - AttributeEntityNode = "aws.entity.k8s.node.name" + AWSEntityPrefix = "com.amazonaws.cloudwatch.entity.internal." + AttributeEntityServiceName = AWSEntityPrefix + "service.name" + AttributeEntityDeploymentEnvironment = AWSEntityPrefix + "deployment.environment" + AttributeEntityCluster = AWSEntityPrefix + "k8s.cluster.name" + AttributeEntityNamespace = AWSEntityPrefix + "k8s.namespace.name" + AttributeEntityWorkload = AWSEntityPrefix + "k8s.workload.name" + AttributeEntityNode = AWSEntityPrefix + "k8s.node.name" + AttributeEntityServiceNameSource = AWSEntityPrefix + "service.name.source" ) // Container Insights attributes used for scraping EKS related information diff --git a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go similarity index 78% rename from plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go rename to plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go index 87eeb65528..151332024a 100644 --- a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package eksattributescraper +package k8sattributescraper import ( "go.opentelemetry.io/collector/pdata/pcommon" @@ -30,29 +30,28 @@ var ( } ) -type eksattributescraper struct { +type K8sAttributeScraper struct { Cluster string Namespace string Workload string Node string } -func NewEKSAttributeScraper(clusterName string) *eksattributescraper { - return &eksattributescraper{ +func NewK8sAttributeScraper(clusterName string) *K8sAttributeScraper { + return &K8sAttributeScraper{ Cluster: clusterName, } } -func (e *eksattributescraper) Scrape(rm pcommon.Resource) { +func (e *K8sAttributeScraper) Scrape(rm pcommon.Resource) { resourceAttrs := rm.Attributes() e.scrapeNamespace(resourceAttrs) e.scrapeWorkload(resourceAttrs) e.scrapeNode(resourceAttrs) e.decorateEntityAttributes(resourceAttrs) - e.reset() } -func (e *eksattributescraper) scrapeNamespace(p pcommon.Map) { +func (e *K8sAttributeScraper) scrapeNamespace(p pcommon.Map) { for _, namespace := range namespaceAllowlist { if namespaceAttr, ok := p.Get(namespace); ok { e.Namespace = namespaceAttr.Str() @@ -61,7 +60,7 @@ func (e *eksattributescraper) scrapeNamespace(p pcommon.Map) { } } -func (e *eksattributescraper) scrapeWorkload(p pcommon.Map) { +func (e *K8sAttributeScraper) scrapeWorkload(p pcommon.Map) { for _, workload := range workloadAllowlist { if workloadAttr, ok := p.Get(workload); ok { e.Workload = workloadAttr.Str() @@ -71,7 +70,7 @@ func (e *eksattributescraper) scrapeWorkload(p pcommon.Map) { } -func (e *eksattributescraper) scrapeNode(p pcommon.Map) { +func (e *K8sAttributeScraper) scrapeNode(p pcommon.Map) { for _, node := range nodeAllowlist { if nodeAttr, ok := p.Get(node); ok { e.Node = nodeAttr.Str() @@ -80,15 +79,15 @@ func (e *eksattributescraper) scrapeNode(p pcommon.Map) { } } -func (e *eksattributescraper) decorateEntityAttributes(p pcommon.Map) { +func (e *K8sAttributeScraper) decorateEntityAttributes(p pcommon.Map) { addAttributeIfNonEmpty(p, entityattributes.AttributeEntityCluster, e.Cluster) addAttributeIfNonEmpty(p, entityattributes.AttributeEntityNamespace, e.Namespace) addAttributeIfNonEmpty(p, entityattributes.AttributeEntityWorkload, e.Workload) addAttributeIfNonEmpty(p, entityattributes.AttributeEntityNode, e.Node) } -func (e *eksattributescraper) reset() { - *e = eksattributescraper{ +func (e *K8sAttributeScraper) Reset() { + *e = K8sAttributeScraper{ Cluster: e.Cluster, } } diff --git a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go similarity index 91% rename from plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go rename to plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go index 809d96cddd..2e69862b0a 100644 --- a/plugins/processors/awsentity/internal/eksattributescraper/eksattributescraper_test.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: MIT -package eksattributescraper +package k8sattributescraper import ( "testing" @@ -14,12 +14,12 @@ import ( "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" ) -func TestNewEKSAttributeScraper(t *testing.T) { - scraper := NewEKSAttributeScraper("test") +func TestNewK8sAttributeScraper(t *testing.T) { + scraper := NewK8sAttributeScraper("test") assert.Equal(t, "test", scraper.Cluster) } -func Test_eksattributescraper_Scrape(t *testing.T) { +func Test_k8sattributescraper_Scrape(t *testing.T) { tests := []struct { name string @@ -72,14 +72,14 @@ func Test_eksattributescraper_Scrape(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - e := NewEKSAttributeScraper(tt.clusterName) + e := NewK8sAttributeScraper(tt.clusterName) e.Scrape(tt.args) assert.Equal(t, tt.want.AsRaw(), tt.args.Attributes().AsRaw()) }) } } -func Test_eksattributescraper_decorateEntityAttributes(t *testing.T) { +func Test_k8sattributescraper_decorateEntityAttributes(t *testing.T) { type fields struct { Cluster string Namespace string @@ -124,7 +124,7 @@ func Test_eksattributescraper_decorateEntityAttributes(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := pcommon.NewMap() - e := &eksattributescraper{ + e := &K8sAttributeScraper{ Cluster: tt.fields.Cluster, Namespace: tt.fields.Namespace, Workload: tt.fields.Workload, @@ -136,7 +136,7 @@ func Test_eksattributescraper_decorateEntityAttributes(t *testing.T) { } } -func Test_eksattributescraper_reset(t *testing.T) { +func Test_k8sattributescraper_reset(t *testing.T) { type fields struct { Cluster string Namespace string @@ -146,19 +146,19 @@ func Test_eksattributescraper_reset(t *testing.T) { tests := []struct { name string fields fields - want *eksattributescraper + want *K8sAttributeScraper }{ { name: "Empty", fields: fields{}, - want: &eksattributescraper{}, + want: &K8sAttributeScraper{}, }, { name: "ClusterExists", fields: fields{ Cluster: "test-cluster", }, - want: &eksattributescraper{ + want: &K8sAttributeScraper{ Cluster: "test-cluster", }, }, @@ -170,26 +170,26 @@ func Test_eksattributescraper_reset(t *testing.T) { Workload: "test-workload", Node: "test-node", }, - want: &eksattributescraper{ + want: &K8sAttributeScraper{ Cluster: "test-cluster", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - e := &eksattributescraper{ + e := &K8sAttributeScraper{ Cluster: tt.fields.Cluster, Namespace: tt.fields.Namespace, Workload: tt.fields.Workload, Node: tt.fields.Node, } - e.reset() + e.Reset() assert.Equal(t, tt.want, e) }) } } -func Test_eksattributescraper_scrapeNamespace(t *testing.T) { +func Test_k8sattributescraper_scrapeNamespace(t *testing.T) { tests := []struct { name string args pcommon.Map @@ -218,14 +218,14 @@ func Test_eksattributescraper_scrapeNamespace(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - e := &eksattributescraper{} + e := &K8sAttributeScraper{} e.scrapeNamespace(tt.args) assert.Equal(t, tt.want, e.Namespace) }) } } -func Test_eksattributescraper_scrapeNode(t *testing.T) { +func Test_k8sattributescraper_scrapeNode(t *testing.T) { tests := []struct { name string args pcommon.Map @@ -254,14 +254,14 @@ func Test_eksattributescraper_scrapeNode(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - e := &eksattributescraper{} + e := &K8sAttributeScraper{} e.scrapeNode(tt.args) assert.Equal(t, tt.want, e.Node) }) } } -func Test_eksattributescraper_scrapeWorkload(t *testing.T) { +func Test_k8sattributescraper_scrapeWorkload(t *testing.T) { tests := []struct { name string args pcommon.Map @@ -312,7 +312,7 @@ func Test_eksattributescraper_scrapeWorkload(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - e := &eksattributescraper{} + e := &K8sAttributeScraper{} e.scrapeWorkload(tt.args) assert.Equal(t, tt.want, e.Workload) }) diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index d4cdec45f8..42888a44e7 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -13,8 +13,8 @@ import ( "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/eksattributescraper" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/k8sattributescraper" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) @@ -28,6 +28,7 @@ const ( type scraper interface { Scrape(rm pcommon.Resource) + Reset() } // exposed as a variable for unit testing @@ -39,12 +40,12 @@ var addToEntityStore = func(logGroupName entitystore.LogGroupName, serviceName s es.AddServiceAttrEntryForLogGroup(logGroupName, serviceName, environmentName) } -var addPodToServiceEnvironmentMap = func(podName string, serviceName string, environmentName string) { +var addPodToServiceEnvironmentMap = func(podName string, serviceName string, environmentName string, serviceNameSource string) { es := entitystore.GetEntityStore() if es == nil { return } - es.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName) + es.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName, serviceNameSource) } // awsEntityProcessor looks for metrics that have the aws.log.group.names and either the service.name or @@ -52,28 +53,32 @@ var addPodToServiceEnvironmentMap = func(podName string, serviceName string, env // service/environment names to the entitystore extension. type awsEntityProcessor struct { config *Config - eksscraper scraper + k8sscraper scraper logger *zap.Logger } func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcessor { return &awsEntityProcessor{ config: config, - eksscraper: eksattributescraper.NewEKSAttributeScraper(config.ClusterName), + k8sscraper: k8sattributescraper.NewK8sAttributeScraper(config.ClusterName), logger: logger, } } func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { + var entityServiceNameSource string rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { - if p.config.KubernetesMode == config.ModeEKS { - p.eksscraper.Scrape(rm.At(i).Resource()) + if p.config.KubernetesMode != "" { + p.k8sscraper.Scrape(rm.At(i).Resource()) } resourceAttrs := rm.At(i).Resource().Attributes() logGroupNames, _ := resourceAttrs.Get(attributeAwsLogGroupNames) serviceName, _ := resourceAttrs.Get(attributeServiceName) environmentName, _ := resourceAttrs.Get(attributeDeploymentEnvironment) + if serviceNameSource, sourceExists := resourceAttrs.Get(entityattributes.AttributeEntityServiceNameSource); sourceExists { + entityServiceNameSource = serviceNameSource.Str() + } entityServiceName := getServiceAttributes(resourceAttrs) entityEnvironmentName := environmentName.Str() @@ -87,11 +92,21 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric resourceAttrs.PutStr(entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) } if p.config.KubernetesMode != "" { + fallbackEnvironment := entityEnvironmentName + podInfo, ok := p.k8sscraper.(*k8sattributescraper.K8sAttributeScraper) + if fallbackEnvironment == EMPTY && p.config.KubernetesMode == config.ModeEKS && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { + fallbackEnvironment = "eks:" + p.config.ClusterName + "/" + podInfo.Namespace + } else if fallbackEnvironment == EMPTY && (p.config.KubernetesMode == config.ModeK8sEC2 || p.config.KubernetesMode == config.ModeK8sOnPrem) && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { + fallbackEnvironment = "k8s:" + p.config.ClusterName + "/" + podInfo.Namespace + } fullPodName := scrapeK8sPodName(resourceAttrs) - if fullPodName != EMPTY && (entityServiceName != EMPTY || entityEnvironmentName != EMPTY) { - addPodToServiceEnvironmentMap(fullPodName, entityServiceName, entityEnvironmentName) + if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource != EMPTY { + addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entityServiceNameSource) + } else if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource == EMPTY { + addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entitystore.ServiceNameSourceUnknown) } } + p.k8sscraper.Reset() if logGroupNames.Str() == EMPTY || (serviceName.Str() == EMPTY && environmentName.Str() == EMPTY) { continue } @@ -104,7 +119,6 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric addToEntityStore(entitystore.LogGroupName(logGroupName), serviceName.Str(), environmentName.Str()) } } - return md, nil } diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 475766389a..2800473c5c 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -35,9 +35,9 @@ func newMockEntityStore() *mockEntityStore { } } -func newMockAddPodServiceEnvironmentMapping(es *mockEntityStore) func(string, string, string) { - return func(podName string, serviceName string, deploymentName string) { - es.podToServiceEnvironmentMap[podName] = entitystore.ServiceEnvironment{ServiceName: serviceName, Environment: deploymentName} +func newMockAddPodServiceEnvironmentMapping(es *mockEntityStore) func(string, string, string, string) { + return func(podName string, serviceName string, deploymentName string, serviceNameSource string) { + es.podToServiceEnvironmentMap[podName] = entitystore.ServiceEnvironment{ServiceName: serviceName, Environment: deploymentName, ServiceNameSource: serviceNameSource} } } @@ -138,7 +138,7 @@ func TestProcessMetricsLogGroupAssociation(t *testing.T) { func TestProcessMetricsForAddingPodToServiceMap(t *testing.T) { logger, _ := zap.NewDevelopment() - p := newAwsEntityProcessor(&Config{}, logger) + p := newAwsEntityProcessor(&Config{ClusterName: "test-cluster"}, logger) ctx := context.Background() tests := []struct { name string @@ -147,23 +147,59 @@ func TestProcessMetricsForAddingPodToServiceMap(t *testing.T) { want map[string]entitystore.ServiceEnvironment }{ { - name: "WithPodNameAndServiceName", + name: "WithPodNameAndServiceNameNoSource", metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf"), - want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service"}}, + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", ServiceNameSource: entitystore.ServiceNameSourceUnknown}}, + k8sMode: config.ModeEKS, + }, + { + name: "WithPodNameAndServiceNameHasSource", + metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", entityattributes.AttributeEntityServiceNameSource, "Instrumentation"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", ServiceNameSource: entitystore.ServiceNameSourceInstrumentation}}, + k8sMode: config.ModeEKS, + }, + { + name: "WithPodNameAndServiceNameHasSourceDefaultEnvironmentEKS", + metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", semconv.AttributeK8SNamespaceName, "test-namespace", entityattributes.AttributeEntityServiceNameSource, "Instrumentation"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", Environment: "eks:test-cluster/test-namespace", ServiceNameSource: entitystore.ServiceNameSourceInstrumentation}}, k8sMode: config.ModeEKS, }, { - name: "WithPodNameAndServiceEnvironmentName", + name: "WithPodNameAndServiceNameHasSourceDefaultEnvironmentK8SEC2", + metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", semconv.AttributeK8SNamespaceName, "test-namespace", entityattributes.AttributeEntityServiceNameSource, "Instrumentation"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", Environment: "k8s:test-cluster/test-namespace", ServiceNameSource: entitystore.ServiceNameSourceInstrumentation}}, + k8sMode: config.ModeK8sEC2, + }, + { + name: "WithPodNameAndServiceNameHasSourceDefaultEnvironmentK8SOnPrem", + metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", semconv.AttributeK8SNamespaceName, "test-namespace", entityattributes.AttributeEntityServiceNameSource, "Instrumentation"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", Environment: "k8s:test-cluster/test-namespace", ServiceNameSource: entitystore.ServiceNameSourceInstrumentation}}, + k8sMode: config.ModeK8sOnPrem, + }, + { + name: "WithPodNameAndServiceEnvironmentNameNoSource", metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", attributeDeploymentEnvironment, "test-deployment"), - want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", Environment: "test-deployment"}}, + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", Environment: "test-deployment", ServiceNameSource: entitystore.ServiceNameSourceUnknown}}, + k8sMode: config.ModeK8sEC2, + }, + { + name: "WithPodNameAndServiceEnvironmentNameHasSource", + metrics: generateMetrics(attributeServiceName, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", attributeDeploymentEnvironment, "test-deployment", entityattributes.AttributeEntityServiceNameSource, "Instrumentation"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", Environment: "test-deployment", ServiceNameSource: entitystore.ServiceNameSourceInstrumentation}}, k8sMode: config.ModeK8sEC2, }, { name: "WithPodNameAndAttributeService", - metrics: generateMetrics(attributeService, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf"), - want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service"}}, + metrics: generateMetrics(attributeService, "test-service", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", entityattributes.AttributeEntityServiceNameSource, "Instrumentation"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "test-service", ServiceNameSource: entitystore.ServiceNameSourceInstrumentation}}, k8sMode: config.ModeK8sOnPrem, }, + { + name: "WithPodNameAndWorkload", + metrics: generateMetrics(attributeServiceName, "cloudwatch-agent-adhgaf", semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf", entityattributes.AttributeEntityServiceNameSource, "K8sWorkload"), + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "cloudwatch-agent-adhgaf", ServiceNameSource: entitystore.ServiceNameSourceK8sWorkload}}, + k8sMode: config.ModeEKS, + }, { name: "WithPodNameAndEmptyServiceAndEnvironmentName", metrics: generateMetrics(semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf"), From 3cdbeca5eaa725a151078c264d76ecb9a130d997 Mon Sep 17 00:00:00 2001 From: Jason Polanco Date: Wed, 25 Sep 2024 10:08:07 -0400 Subject: [PATCH 12/47] [Compass] Add entity data to service metrics by adding entity process to metrics pipeline (#809) --- extension/entitystore/extension.go | 6 +++ plugins/processors/awsentity/config.go | 2 + .../entityattributes/entityattributes.go | 3 ++ plugins/processors/awsentity/processor.go | 34 +++++++++++++ .../processors/awsentity/processor_test.go | 48 +++++++++++++++++-- .../sampleConfig/advanced_config_darwin.yaml | 7 ++- .../sampleConfig/advanced_config_linux.yaml | 7 ++- .../sampleConfig/advanced_config_windows.yaml | 12 +++-- .../appsignals_and_eks_config.yaml | 1 + .../appsignals_and_k8s_config.yaml | 2 + .../appsignals_fallback_and_eks_config.yaml | 1 + .../appsignals_over_fallback_config.yaml | 1 + .../sampleConfig/basic_config_linux.yaml | 6 ++- .../sampleConfig/basic_config_windows.yaml | 4 ++ .../sampleConfig/collectd_config_linux.conf | 2 - .../sampleConfig/collectd_config_linux.yaml | 7 ++- .../sampleConfig/compass_linux_config.yaml | 6 ++- .../sampleConfig/complete_darwin_config.conf | 4 -- .../sampleConfig/complete_darwin_config.yaml | 21 ++++---- .../sampleConfig/complete_linux_config.conf | 4 -- .../sampleConfig/complete_linux_config.yaml | 37 +++++++------- .../sampleConfig/complete_windows_config.conf | 2 - .../sampleConfig/complete_windows_config.yaml | 4 ++ .../sampleConfig/delta_config_linux.yaml | 16 ++++--- .../sampleConfig/delta_net_config_linux.yaml | 6 ++- .../sampleConfig/drop_origin_linux.yaml | 14 ++++-- .../ignore_append_dimensions.yaml | 6 ++- .../sampleConfig/invalid_input_linux.yaml | 6 ++- .../sampleConfig/jmx_config_linux.yaml | 22 +++++---- .../sampleConfig/standard_config_linux.yaml | 9 +++- ...ndard_config_linux_with_common_config.yaml | 7 ++- .../sampleConfig/standard_config_windows.yaml | 8 +++- ...ard_config_windows_with_common_config.yaml | 10 ++-- .../sampleConfig/statsd_config_linux.conf | 2 - .../sampleConfig/statsd_config_linux.yaml | 7 ++- .../sampleConfig/statsd_config_windows.conf | 2 - .../sampleConfig/statsd_config_windows.yaml | 7 ++- .../totomlconfig/testdata/agentToml.conf | 6 +-- .../metrics_collect/collectd/collectd_test.go | 6 +-- .../collectd/ruleDeploymentEnvironment.go | 13 +++-- .../collectd/ruleServiceName.go | 13 +++-- .../statsd/ruleDeploymentEnvironment.go | 13 +++-- .../metrics_collect/statsd/ruleServiceName.go | 13 +++-- .../metrics_collect/statsd/statsd_test.go | 14 ++---- translator/translate/otel/common/common.go | 44 +++++++++++++++++ .../otel/pipeline/host/translator.go | 3 +- .../otel/pipeline/host/translator_test.go | 8 ++-- .../otel/processor/awsentity/translator.go | 31 +++++++++++- .../processor/awsentity/translator_test.go | 1 + 49 files changed, 377 insertions(+), 131 deletions(-) diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 6122ff7175..99297a48d9 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -143,6 +143,12 @@ func (e *EntityStore) CreateLogFileEntity(logFileGlob LogFileGlob, logGroupName } } +// GetServiceMetricAttributesMap creates the attribute map for service metrics. This will be expanded upon in a later PR'S, +// but for now is just covering the EC2 attributes for service metrics. +func (e *EntityStore) GetServiceMetricAttributesMap() map[string]*string { + return e.createAttributeMap() +} + // AddServiceAttrEntryForLogFile adds an entry to the entity store for the provided file glob -> (serviceName, environmentName) key-value pair func (e *EntityStore) AddServiceAttrEntryForLogFile(fileGlob LogFileGlob, serviceName string, environmentName string) { if e.serviceprovider != nil { diff --git a/plugins/processors/awsentity/config.go b/plugins/processors/awsentity/config.go index 8916376bda..fdf9966df6 100644 --- a/plugins/processors/awsentity/config.go +++ b/plugins/processors/awsentity/config.go @@ -18,6 +18,8 @@ type Config struct { ClusterName string `mapstructure:"cluster_name,omitempty"` // KubernetesMode KubernetesMode string `mapstructure:"kubernetes_mode,omitempty"` + // Specific Mode agent is running on (i.e. EC2, EKS, ECS etc) + Platform string `mapstructure:"platform,omitempty"` } // Verify Config implements Processor interface. diff --git a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go index 0d95595905..b92d8c72c1 100644 --- a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go @@ -12,6 +12,9 @@ const ( AttributeEntityWorkload = AWSEntityPrefix + "k8s.workload.name" AttributeEntityNode = AWSEntityPrefix + "k8s.node.name" AttributeEntityServiceNameSource = AWSEntityPrefix + "service.name.source" + AttributeEntityPlatformType = AWSEntityPrefix + "platform.type" + AttributeEntityInstanceID = AWSEntityPrefix + "instance.id" + AttributeEntityAutoScalingGroup = AWSEntityPrefix + "auto.scaling.group" ) // Container Insights attributes used for scraping EKS related information diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 42888a44e7..9c05937e58 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -48,6 +48,15 @@ var addPodToServiceEnvironmentMap = func(podName string, serviceName string, env es.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName, serviceNameSource) } +var getMetricAttributesFromEntityStore = func() map[string]*string { + es := entitystore.GetEntityStore() + if es == nil { + return map[string]*string{} + } + + return es.GetServiceMetricAttributesMap() +} + // awsEntityProcessor looks for metrics that have the aws.log.group.names and either the service.name or // deployment.environment resource attributes set, then adds the association between the log group(s) and the // service/environment names to the entitystore extension. @@ -67,6 +76,20 @@ func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcess func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { var entityServiceNameSource string + // Get the following metric attributes from the EntityStore: PlatformType, EC2.InstanceId, EC2.AutoScalingGroup + metricAttributes := getMetricAttributesFromEntityStore() + + var platformType, instanceID, autoScalingGroup string + if metricAttributes[entitystore.PlatformType] != nil { + platformType = *metricAttributes[entitystore.PlatformType] + } + if metricAttributes[entitystore.InstanceIDKey] != nil { + instanceID = *metricAttributes[entitystore.InstanceIDKey] + } + if metricAttributes[entitystore.ASGKey] != nil { + autoScalingGroup = *metricAttributes[entitystore.ASGKey] + } + rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { if p.config.KubernetesMode != "" { @@ -91,6 +114,17 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric if entityEnvironmentName != EMPTY { resourceAttrs.PutStr(entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) } + if p.config.Platform == config.ModeEC2 { + if platformType != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityPlatformType, platformType) + } + if instanceID != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityInstanceID, instanceID) + } + if autoScalingGroup != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityAutoScalingGroup, autoScalingGroup) + } + } if p.config.KubernetesMode != "" { fallbackEnvironment := entityEnvironmentName podInfo, ok := p.k8sscraper.(*k8sattributescraper.K8sAttributeScraper) diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 2800473c5c..f5ccdb633f 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -51,6 +51,25 @@ func newAddToMockEntityStore(rs *mockEntityStore) func(entitystore.LogGroupName, } } +func newMockGetMetricAttributesFromEntityStore() func() map[string]*string { + mockPlatform := "AWS::EC2" + mockInstanceID := "i-123456789" + mockAutoScalingGroup := "auto-scaling" + return func() map[string]*string { + return map[string]*string{ + entitystore.PlatformType: &mockPlatform, + entitystore.InstanceIDKey: &mockInstanceID, + entitystore.ASGKey: &mockAutoScalingGroup, + } + } +} + +func newMockGetMetricAttributesFromEntityStoreReset() func() map[string]*string { + return func() map[string]*string { + return map[string]*string{} + } +} + func TestProcessMetricsLogGroupAssociation(t *testing.T) { logger, _ := zap.NewDevelopment() p := newAwsEntityProcessor(&Config{}, logger) @@ -235,9 +254,10 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() tests := []struct { - name string - metrics pmetric.Metrics - want map[string]any + name string + metrics pmetric.Metrics + want map[string]any + containsMetrics bool }{ { name: "EmptyMetrics", @@ -248,9 +268,13 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { name: "ResourceAttributeServiceNameOnly", metrics: generateMetrics(attributeServiceName, "test-service"), want: map[string]any{ - entityattributes.AttributeEntityServiceName: "test-service", - attributeServiceName: "test-service", + entityattributes.AttributeEntityServiceName: "test-service", + attributeServiceName: "test-service", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", }, + containsMetrics: true, }, { name: "ResourceAttributeEnvironmentOnly", @@ -258,7 +282,11 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { want: map[string]any{ entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", attributeDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", }, + containsMetrics: true, }, { name: "ResourceAttributeServiceNameAndEnvironment", @@ -268,19 +296,28 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", attributeServiceName: "test-service", attributeDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", }, + containsMetrics: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + if tt.containsMetrics { + getMetricAttributesFromEntityStore = newMockGetMetricAttributesFromEntityStore() + } p := newAwsEntityProcessor(&Config{}, logger) + p.config.Platform = config.ModeEC2 _, err := p.processMetrics(ctx, tt.metrics) assert.NoError(t, err) rm := tt.metrics.ResourceMetrics() if rm.Len() > 0 { assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) } + getMetricAttributesFromEntityStore = newMockGetMetricAttributesFromEntityStoreReset() }) } } @@ -325,6 +362,7 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := newAwsEntityProcessor(&Config{ScrapeDatapointAttribute: true}, logger) + p.config.Platform = config.ModeEC2 _, err := p.processMetrics(ctx, tt.metrics) assert.NoError(t, err) rm := tt.metrics.ResourceMetrics() diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index 34513b31a1..30359e7c85 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: match_type: strict @@ -74,17 +77,19 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: + - telegraf_disk - telegraf_mem - telegraf_netstat - telegraf_swap - telegraf_cpu - - telegraf_disk metrics/hostDeltaMetrics: exporters: - awscloudwatch processors: + - awsentity - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index 52e6b7a2f3..bd8d7e88ae 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: match_type: strict @@ -35,9 +38,9 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -82,6 +85,7 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - telegraf_cpu @@ -95,6 +99,7 @@ service: exporters: - awscloudwatch processors: + - awsentity - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index b5ba1b9e1a..ac7fbc286f 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -21,13 +21,16 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - ImageId - InstanceId - InstanceType - - ImageId imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -75,15 +78,16 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - - telegraf_win_perf_counters/3446270237 - - telegraf_win_perf_counters/3762679655 - - telegraf_win_perf_counters/2073218482 - telegraf_win_perf_counters/2039663244 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 - telegraf_win_perf_counters/3610923661 + - telegraf_win_perf_counters/3446270237 + - telegraf_win_perf_counters/3762679655 + - telegraf_win_perf_counters/2073218482 telemetry: logs: development: false diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 1af07fe757..8ebde3435c 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -305,6 +305,7 @@ processors: awsentity: cluster_name: TestCluster kubernetes_mode: EKS + platform: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index fa9ce39e16..71aa5cde75 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -303,7 +303,9 @@ processors: - name: TestCluster platform: k8s awsentity: + cluster_name: TestCluster kubernetes_mode: K8sEC2 + platform: K8sEC2 batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index acde2a898e..dba8a125b2 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -305,6 +305,7 @@ processors: awsentity: cluster_name: TestCluster kubernetes_mode: EKS + platform: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index d386b7f6ce..32ca642c27 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -305,6 +305,7 @@ processors: awsentity: cluster_name: TestCluster kubernetes_mode: EKS + platform: EKS batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index f6293f8035..c8ba47c880 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-east-1 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName @@ -48,10 +51,11 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - - telegraf_disk - telegraf_mem + - telegraf_disk telemetry: logs: development: false diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index 9e7e5a8ca6..2ca73c3375 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName @@ -50,6 +53,7 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - telegraf_win_perf_counters/1492679118 diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.conf b/translator/tocwconfig/sampleConfig/collectd_config_linux.conf index 84caa16782..371c03de4f 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.conf +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.conf @@ -25,8 +25,6 @@ service_address = "udp://127.0.0.1:25826" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" - "deployment.environment" = "" - "service.name" = "" [outputs] diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index 102cabaf4c..ecfcdceca0 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -20,6 +20,10 @@ extensions: entitystore: mode: ec2 region: us-west-2 +processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 receivers: telegraf_socket_listener: collection_interval: 1m0s @@ -33,7 +37,8 @@ service: metrics/host: exporters: - awscloudwatch - processors: [] + processors: + - awsentity receivers: - telegraf_socket_listener telemetry: diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml index 1f62ed41bd..5a2eccf32a 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml @@ -32,13 +32,16 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - ImageId - InstanceId - InstanceType + - ImageId imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -59,6 +62,7 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - telegraf_socket_listener diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.conf b/translator/tocwconfig/sampleConfig/complete_darwin_config.conf index db97be5d77..dd69467e71 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.conf @@ -103,8 +103,6 @@ service_address = "udp://127.0.0.1:25826" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" - "deployment.environment" = "" - "service.name" = "" [[inputs.statsd]] interval = "10s" @@ -113,8 +111,6 @@ service_address = ":8125" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" - "deployment.environment" = "" - "service.name" = "" [[inputs.swap]] fieldpass = ["used", "free", "used_percent"] diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index b169f90b0d..4f032c0c92 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -95,6 +95,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 batch/emf_logs: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -131,11 +134,11 @@ processors: metric_statements: - context: metric statements: + - set(unit, "unit") where name == "disk_free" + - set(name, "DISK_FREE") where name == "disk_free" - set(unit, "unit") where name == "cpu_usage_idle" - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" - set(unit, "unit") where name == "cpu_usage_nice" - - set(unit, "unit") where name == "disk_free" - - set(name, "DISK_FREE") where name == "disk_free" trace_statements: [] receivers: awsxray: @@ -264,28 +267,30 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger - transform receivers: - - telegraf_statsd - - telegraf_netstat - - telegraf_disk - - telegraf_socket_listener - - telegraf_swap - telegraf_procstat/1917393364 + - telegraf_socket_listener - telegraf_cpu - telegraf_mem + - telegraf_netstat - telegraf_processes + - telegraf_statsd + - telegraf_swap + - telegraf_disk metrics/hostDeltaMetrics: exporters: - awscloudwatch processors: + - awsentity - cumulativetodelta/hostDeltaMetrics - ec2tagger - transform receivers: - - telegraf_net - telegraf_diskio + - telegraf_net traces/xray: exporters: - awsxray diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.conf b/translator/tocwconfig/sampleConfig/complete_linux_config.conf index 5702fcd8e7..aef5f442db 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.conf @@ -103,8 +103,6 @@ service_address = "udp://127.0.0.1:25826" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" - "deployment.environment" = "" - "service.name" = "" [[inputs.statsd]] interval = "10s" @@ -113,8 +111,6 @@ service_address = ":8125" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" - "deployment.environment" = "" - "service.name" = "" [[inputs.swap]] fieldpass = ["used", "free", "used_percent"] diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index b4d9e42a32..41456c9b94 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -100,6 +100,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 batch/emf_logs: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -131,9 +134,9 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceType - ImageId - InstanceId + - InstanceType imds_retries: 1 refresh_interval_seconds: 0s filter/jmx/0: @@ -182,11 +185,11 @@ processors: metric_statements: - context: metric statements: - - set(unit, "unit") where name == "disk_free" - - set(name, "DISK_FREE") where name == "disk_free" - - set(unit, "unit") where name == "cpu_usage_idle" - - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" - - set(unit, "unit") where name == "cpu_usage_nice" + - set(unit, "unit") where name == "cpu_usage_idle" + - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" + - set(unit, "unit") where name == "cpu_usage_nice" + - set(unit, "unit") where name == "disk_free" + - set(name, "DISK_FREE") where name == "disk_free" trace_statements: [] transform/jmx/0: error_mode: propagate @@ -195,9 +198,9 @@ processors: metric_statements: - context: metric statements: - - set(unit, "unit") where name == "jvm.memory.heap.used" - - set(name, "JVM_MEM_HEAP_USED") where name == "jvm.memory.heap.used" - - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" + - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" + - set(unit, "unit") where name == "jvm.memory.heap.used" + - set(name, "JVM_MEM_HEAP_USED") where name == "jvm.memory.heap.used" trace_statements: [] transform/jmx/1: error_mode: propagate @@ -206,7 +209,7 @@ processors: metric_statements: - context: metric statements: - - set(name, "TC_ERR") where name == "tomcat.errors" + - set(name, "TC_ERR") where name == "tomcat.errors" trace_statements: [] receivers: awsxray: @@ -370,28 +373,30 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger - transform receivers: - - telegraf_cpu + - telegraf_mem + - telegraf_netstat - telegraf_socket_listener - - telegraf_procstat/1917393364 - - telegraf_disk - telegraf_statsd + - telegraf_procstat/1917393364 - telegraf_swap - - telegraf_mem - - telegraf_netstat + - telegraf_cpu + - telegraf_disk - telegraf_processes metrics/hostDeltaMetrics: exporters: - awscloudwatch processors: + - awsentity - cumulativetodelta/hostDeltaMetrics - ec2tagger - transform receivers: - - telegraf_net - telegraf_diskio + - telegraf_net metrics/jmx/0: exporters: - awscloudwatch diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.conf b/translator/tocwconfig/sampleConfig/complete_windows_config.conf index 5a4add2254..9d06e7e62b 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.conf @@ -50,8 +50,6 @@ service_address = ":8125" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" - "deployment.environment" = "" - "service.name" = "" [[inputs.win_perf_counters]] DisableReplacer = true diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index 74a98804b2..9b4bc107d7 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -95,6 +95,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 batch/emf_logs: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -251,6 +254,7 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger - transform receivers: diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index cfdab94ac0..53498d833c 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-east-1 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: match_type: strict @@ -47,12 +50,12 @@ processors: metric_statements: - context: metric statements: - - set(unit, "Count") where name == "diskio_iops_in_progress" - - set(name, "DRIVER_DISKIO_IOPS_IN_PROGRESS") where name == "diskio_iops_in_progress" - - set(unit, "Milliseconds") where name == "diskio_read_time" - - set(name, "DRIVER_DISKIO_READ_TIME") where name == "diskio_read_time" - - set(unit, "Milliseconds") where name == "diskio_write_time" - - set(name, "DRIVER_DISKIO_WRITE_TIME") where name == "diskio_write_time" + - set(unit, "Count") where name == "diskio_iops_in_progress" + - set(name, "DRIVER_DISKIO_IOPS_IN_PROGRESS") where name == "diskio_iops_in_progress" + - set(unit, "Milliseconds") where name == "diskio_read_time" + - set(name, "DRIVER_DISKIO_READ_TIME") where name == "diskio_read_time" + - set(unit, "Milliseconds") where name == "diskio_write_time" + - set(name, "DRIVER_DISKIO_WRITE_TIME") where name == "diskio_write_time" trace_statements: [] receivers: telegraf_diskio: @@ -68,6 +71,7 @@ service: exporters: - awscloudwatch processors: + - awsentity - cumulativetodelta/hostDeltaMetrics - ec2tagger - transform diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index 04a0fb0a7e..b85e0e47a1 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-east-1 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: match_type: "" @@ -32,9 +35,9 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceId - InstanceType - ImageId - - InstanceId imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -51,6 +54,7 @@ service: exporters: - awscloudwatch processors: + - awsentity - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index 7561138dc2..219fd3d93a 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -26,13 +26,16 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceId - InstanceType - ImageId + - InstanceId imds_retries: 1 refresh_interval_seconds: 0s transform: @@ -42,9 +45,9 @@ processors: metric_statements: - context: metric statements: - - set(unit, "unit") where name == "cpu_usage_idle" - - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" - - set(unit, "unit") where name == "cpu_usage_nice" + - set(unit, "unit") where name == "cpu_usage_idle" + - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" + - set(unit, "unit") where name == "cpu_usage_nice" trace_statements: [] receivers: telegraf_cpu: @@ -68,12 +71,13 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger - transform receivers: + - telegraf_nvidia_smi - telegraf_cpu - telegraf_disk - - telegraf_nvidia_smi telemetry: logs: development: false diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index df878013c5..96989cc17e 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-east-1 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: imds_retries: 1 refresh_interval_seconds: 0s @@ -42,10 +45,11 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - - telegraf_mem - telegraf_disk + - telegraf_mem telemetry: logs: development: false diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index 5c3f09a94a..c8ba47c880 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -21,13 +21,16 @@ extensions: mode: ec2 region: us-east-1 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - ImageId - InstanceId - InstanceType - - ImageId imds_retries: 1 refresh_interval_seconds: 0s receivers: @@ -48,6 +51,7 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - telegraf_mem diff --git a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml index 1177a5de08..e2c318a2b4 100644 --- a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml @@ -24,6 +24,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 cumulativetodelta/jmx: exclude: match_type: "" @@ -66,11 +69,11 @@ processors: metric_statements: - context: metric statements: - - set(unit, "unit") where name == "cpu_usage_idle" - - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" - - set(unit, "unit") where name == "cpu_usage_nice" - - set(unit, "unit") where name == "disk_free" - - set(name, "DISK_FREE") where name == "disk_free" + - set(unit, "unit") where name == "cpu_usage_idle" + - set(name, "CPU_USAGE_IDLE") where name == "cpu_usage_idle" + - set(unit, "unit") where name == "cpu_usage_nice" + - set(unit, "unit") where name == "disk_free" + - set(name, "DISK_FREE") where name == "disk_free" trace_statements: [] transform/jmx: error_mode: propagate @@ -79,9 +82,9 @@ processors: metric_statements: - context: metric statements: - - set(unit, "unit") where name == "jvm.memory.heap.used" - - set(name, "JVM_MEM_HEAP_USED") where name == "jvm.memory.heap.used" - - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" + - set(unit, "unit") where name == "jvm.memory.heap.used" + - set(name, "JVM_MEM_HEAP_USED") where name == "jvm.memory.heap.used" + - set(name, "kafka.fetch-rate") where name == "kafka.consumer.fetch-rate" trace_statements: [] receivers: jmx: @@ -109,10 +112,11 @@ service: exporters: - awscloudwatch processors: + - awsentity - transform receivers: - - telegraf_cpu - telegraf_disk + - telegraf_cpu metrics/jmx: exporters: - awscloudwatch diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index e6d6d9b989..68eea45ead 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: match_type: strict @@ -35,9 +38,9 @@ processors: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: - - InstanceType - ImageId - InstanceId + - InstanceType refresh_interval_seconds: 0s receivers: telegraf_cpu: @@ -69,16 +72,18 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: + - telegraf_cpu - telegraf_disk - telegraf_mem - telegraf_swap - - telegraf_cpu metrics/hostDeltaMetrics: exporters: - awscloudwatch processors: + - awsentity - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index 7d9a81a244..8c82bf9553 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -25,6 +25,9 @@ extensions: region: us-west-2 shared_credential_file: fake-path processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: match_type: strict @@ -76,16 +79,18 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: + - telegraf_cpu - telegraf_disk - telegraf_mem - telegraf_swap - - telegraf_cpu metrics/hostDeltaMetrics: exporters: - awscloudwatch processors: + - awsentity - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index 47fab061ef..966aa73064 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -21,6 +21,9 @@ extensions: mode: ec2 region: us-west-2 processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName @@ -64,13 +67,14 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - - telegraf_win_perf_counters/3446270237 - - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 - telegraf_win_perf_counters/3610923661 + - telegraf_win_perf_counters/3446270237 + - telegraf_win_perf_counters/3762679655 telemetry: logs: development: false diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index 901e719956..0b99767a19 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -25,13 +25,16 @@ extensions: region: us-west-2 shared_credential_file: fake-path processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 ec2tagger: ec2_instance_tag_keys: - AutoScalingGroupName ec2_metadata_tags: + - InstanceType - ImageId - InstanceId - - InstanceType imds_retries: 2 profile: AmazonCloudWatchAgent refresh_interval_seconds: 0s @@ -71,13 +74,14 @@ service: exporters: - awscloudwatch processors: + - awsentity - ec2tagger receivers: - - telegraf_win_perf_counters/3610923661 - - telegraf_win_perf_counters/3446270237 - telegraf_win_perf_counters/3762679655 - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/1492679118 + - telegraf_win_perf_counters/3610923661 + - telegraf_win_perf_counters/3446270237 telemetry: logs: development: false diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.conf b/translator/tocwconfig/sampleConfig/statsd_config_linux.conf index ab7867a003..cbf53c502d 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.conf +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.conf @@ -23,8 +23,6 @@ service_address = ":8125" [inputs.statsd.tags] "aws:StorageResolution" = "true" - "deployment.environment" = "" - "service.name" = "" [outputs] diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index fe837f5718..a45eb107ee 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -20,6 +20,10 @@ extensions: entitystore: mode: ec2 region: us-west-2 +processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 receivers: telegraf_statsd: collection_interval: 10s @@ -33,7 +37,8 @@ service: metrics/host: exporters: - awscloudwatch - processors: [] + processors: + - awsentity receivers: - telegraf_statsd telemetry: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.conf b/translator/tocwconfig/sampleConfig/statsd_config_windows.conf index bb3b7fd983..7db7ad9c19 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.conf +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.conf @@ -23,8 +23,6 @@ service_address = ":8125" [inputs.statsd.tags] "aws:StorageResolution" = "true" - "deployment.environment" = "" - "service.name" = "" [outputs] diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index 57c2a820bd..d05b9a0c4e 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -20,6 +20,10 @@ extensions: entitystore: mode: ec2 region: us-west-2 +processors: + awsentity: + scrape_datapoint_attribute: true + platform: ec2 receivers: telegraf_statsd: collection_interval: 10s @@ -33,7 +37,8 @@ service: metrics/host: exporters: - awscloudwatch - processors: [] + processors: + - awsentity receivers: - telegraf_statsd telemetry: diff --git a/translator/tocwconfig/totomlconfig/testdata/agentToml.conf b/translator/tocwconfig/totomlconfig/testdata/agentToml.conf index 82fa874fce..4b9a68b829 100644 --- a/translator/tocwconfig/totomlconfig/testdata/agentToml.conf +++ b/translator/tocwconfig/totomlconfig/testdata/agentToml.conf @@ -103,8 +103,7 @@ service_address = "udp://127.0.0.1:25826" [inputs.socket_listener.tags] "aws:AggregationInterval" = "60s" - "deployment.environment" = "" - "service.name" = "" + [[inputs.statsd]] interval = "10s" @@ -113,8 +112,7 @@ service_address = ":8125" [inputs.statsd.tags] "aws:AggregationInterval" = "60s" - "deployment.environment" = "" - "service.name" = "" + [[inputs.swap]] fieldpass = ["used", "free", "used_percent"] diff --git a/translator/translate/metrics/metrics_collect/collectd/collectd_test.go b/translator/translate/metrics/metrics_collect/collectd/collectd_test.go index ed11e36471..a0ad8d3865 100644 --- a/translator/translate/metrics/metrics_collect/collectd/collectd_test.go +++ b/translator/translate/metrics/metrics_collect/collectd/collectd_test.go @@ -33,8 +33,7 @@ func TestCollectD_HappyCase(t *testing.T) { "collectd_auth_file": "/etc/collectd/_auth_file", "collectd_security_level": "none", "collectd_typesdb": []interface{}{"/usr/share/collectd/types.db", "/custom_location/types.db"}, - "tags": map[string]interface{}{"aws:AggregationInterval": "30s", - "deployment.environment": "", "service.name": ""}, + "tags": map[string]interface{}{"aws:AggregationInterval": "30s"}, }, } @@ -57,8 +56,7 @@ func TestCollectD_MinimumConfig(t *testing.T) { "collectd_auth_file": "/etc/collectd/auth_file", "collectd_security_level": "encrypt", "collectd_typesdb": []interface{}{"/usr/share/collectd/types.db"}, - "tags": map[string]interface{}{"aws:AggregationInterval": "60s", - "deployment.environment": "", "service.name": ""}, + "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, }, } diff --git a/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go b/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go index dc674fc1af..ed9cae0df8 100644 --- a/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go +++ b/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go @@ -6,6 +6,7 @@ package collected import ( "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) type DeploymentEnvironment struct { @@ -14,13 +15,15 @@ type DeploymentEnvironment struct { const SectionkeyDeploymentEnvironment = "deployment.environment" func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (string, interface{}) { - _, returnVal := translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) - returnKey := "deployment.environment" + returnKey, returnVal := translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) - if returnVal == "" { - returnVal = metrics.GlobalMetricConfig.DeploymentEnvironment + parentKeyVal := metrics.GlobalMetricConfig.DeploymentEnvironment + if returnVal != "" { + return common.Tags, map[string]interface{}{returnKey: returnVal} + } else if parentKeyVal != "" { + return common.Tags, map[string]interface{}{returnKey: parentKeyVal} } - return "tags", map[string]interface{}{returnKey: returnVal} + return "", nil } func init() { diff --git a/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go b/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go index de083dcbf1..45fe4ddee2 100644 --- a/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go +++ b/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go @@ -6,6 +6,7 @@ package collected import ( "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) type ServiceName struct { @@ -14,13 +15,15 @@ type ServiceName struct { const SectionkeyServicename = "service.name" func (obj *ServiceName) ApplyRule(input interface{}) (string, interface{}) { - _, returnVal := translator.DefaultCase(SectionkeyServicename, "", input) - returnKey := "service.name" + returnKey, returnVal := translator.DefaultCase(SectionkeyServicename, "", input) - if returnVal == "" { - returnVal = metrics.GlobalMetricConfig.ServiceName + parentKeyVal := metrics.GlobalMetricConfig.ServiceName + if returnVal != "" { + return common.Tags, map[string]interface{}{returnKey: returnVal} + } else if parentKeyVal != "" { + return common.Tags, map[string]interface{}{returnKey: parentKeyVal} } - return "tags", map[string]interface{}{returnKey: returnVal} + return "", nil } func init() { diff --git a/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go b/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go index d1b58ee5ab..02817e5eb5 100644 --- a/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go +++ b/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go @@ -6,6 +6,7 @@ package statsd import ( "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) type DeploymentEnvironment struct { @@ -14,13 +15,15 @@ type DeploymentEnvironment struct { const SectionkeyDeploymentEnvironment = "deployment.environment" func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (string, interface{}) { - _, returnVal := translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) - returnKey := "deployment.environment" + returnKey, returnVal := translator.DefaultCase(SectionkeyDeploymentEnvironment, "", input) - if returnVal == "" { - returnVal = metrics.GlobalMetricConfig.DeploymentEnvironment + parentKeyVal := metrics.GlobalMetricConfig.DeploymentEnvironment + if returnVal != "" { + return common.Tags, map[string]interface{}{returnKey: returnVal} + } else if parentKeyVal != "" { + return common.Tags, map[string]interface{}{returnKey: parentKeyVal} } - return "tags", map[string]interface{}{returnKey: returnVal} + return "", nil } func init() { diff --git a/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go b/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go index d45214da7e..cd80e706c2 100644 --- a/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go +++ b/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go @@ -6,6 +6,7 @@ package statsd import ( "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) type ServiceName struct { @@ -14,13 +15,15 @@ type ServiceName struct { const SectionkeyServicename = "service.name" func (obj *ServiceName) ApplyRule(input interface{}) (string, interface{}) { - _, returnVal := translator.DefaultCase(SectionkeyServicename, "", input) - returnKey := "service.name" + returnKey, returnVal := translator.DefaultCase(SectionkeyServicename, "", input) - if returnVal == "" { - returnVal = metrics.GlobalMetricConfig.ServiceName + parentKeyVal := metrics.GlobalMetricConfig.ServiceName + if returnVal != "" { + return common.Tags, map[string]interface{}{returnKey: returnVal} + } else if parentKeyVal != "" { + return common.Tags, map[string]interface{}{returnKey: parentKeyVal} } - return "tags", map[string]interface{}{returnKey: returnVal} + return "", nil } func init() { diff --git a/translator/translate/metrics/metrics_collect/statsd/statsd_test.go b/translator/translate/metrics/metrics_collect/statsd/statsd_test.go index d4914b8e43..9b878277bf 100644 --- a/translator/translate/metrics/metrics_collect/statsd/statsd_test.go +++ b/translator/translate/metrics/metrics_collect/statsd/statsd_test.go @@ -29,8 +29,7 @@ func TestStatsD_HappyCase(t *testing.T) { "service_address": ":12345", "interval": "5s", "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:AggregationInterval": "30s", - "deployment.environment": "", "service.name": ""}, + "tags": map[string]interface{}{"aws:AggregationInterval": "30s"}, }, } @@ -50,8 +49,7 @@ func TestStatsD_MinimumConfig(t *testing.T) { "service_address": ":8125", "interval": "10s", "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:AggregationInterval": "60s", - "deployment.environment": "", "service.name": ""}, + "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, }, } @@ -73,8 +71,7 @@ func TestStatsD_DisableAggregation(t *testing.T) { "service_address": ":8125", "interval": "10s", "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:StorageResolution": "true", - "deployment.environment": "", "service.name": ""}, + "tags": map[string]interface{}{"aws:StorageResolution": "true"}, }, } @@ -96,9 +93,8 @@ func TestStatsD_MetricSeparator(t *testing.T) { "service_address": ":8125", "interval": "10s", "parse_data_dog_tags": true, - "tags": map[string]interface{}{"aws:AggregationInterval": "60s", - "deployment.environment": "", "service.name": ""}, - "metric_separator": ".", + "tags": map[string]interface{}{"aws:AggregationInterval": "60s"}, + "metric_separator": ".", }, } diff --git a/translator/translate/otel/common/common.go b/translator/translate/otel/common/common.go index 705f93b781..3bb7f49085 100644 --- a/translator/translate/otel/common/common.go +++ b/translator/translate/otel/common/common.go @@ -14,6 +14,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "gopkg.in/yaml.v3" + + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" ) const ( @@ -61,6 +63,7 @@ const ( Udp = "udp" Tcp = "tcp" TlsKey = "tls" + Tags = "tags" Region = "region" LogGroupName = "log_group_name" LogStreamName = "log_stream_name" @@ -69,6 +72,31 @@ const ( UnitKey = "unit" ) +const ( + CollectDMetricKey = "collectd" + CPUMetricKey = "cpu" + DiskMetricKey = "disk" + DiskIoMetricKey = "diskio" + StatsDMetricKey = "statsd" + SwapMetricKey = "swap" + MemMetricKey = "mem" + NetMetricKey = "net" + NetStatMetricKey = "netstat" + ProcessMetricKey = "process" + ProcStatMetricKey = "procstat" + + //Windows Plugins + MemMetricKeyWindows = "Memory" + LogicalDiskMetricKeyWindows = "LogicalDisk" + NetworkMetricKeyWindows = "Network Interface" + PagingMetricKeyWindows = "Paging" + PhysicalDiskMetricKeyWindows = "PhysicalDisk" + ProcessorMetricKeyWindows = "Processor" + SystemMetricKeyWindows = "System" + TCPv4MetricKeyWindows = "TCPv4" + TCPv6MetricKeyWindows = "TCPv6" +) + const ( PipelineNameHost = "host" PipelineNameHostDeltaMetrics = "hostDeltaMetrics" @@ -93,6 +121,12 @@ var ( JmxTargets = []string{"activemq", "cassandra", "hbase", "hadoop", "jetty", "jvm", "kafka", "kafka-consumer", "kafka-producer", "solr", "tomcat", "wildfly"} AgentDebugConfigKey = ConfigKey(AgentKey, DebugKey) + + TelegrafPlugins = collections.NewSet[string](CollectDMetricKey, CPUMetricKey, DiskMetricKey, DiskIoMetricKey, + StatsDMetricKey, SwapMetricKey, MemMetricKey, NetMetricKey, NetStatMetricKey, ProcessMetricKey, ProcStatMetricKey, + //Windows Plugins + MemMetricKeyWindows, LogicalDiskMetricKeyWindows, NetworkMetricKeyWindows, PagingMetricKeyWindows, PhysicalDiskMetricKeyWindows, + ProcessorMetricKeyWindows, SystemMetricKeyWindows, TCPv4MetricKeyWindows, TCPv6MetricKeyWindows) ) // Translator is used to translate the JSON config into an @@ -403,3 +437,13 @@ func IsAnySet(conf *confmap.Conf, keys []string) bool { } return false } + +// TelegrafMetricsEnabled checks if any telegraf plugin is present in the configuration. +func TelegrafMetricsEnabled(conf *confmap.Conf) bool { + for plugin := range TelegrafPlugins { + if conf.IsSet(ConfigKey(MetricsKey, MetricsCollectedKey, plugin)) { + return true + } + } + return false +} diff --git a/translator/translate/otel/pipeline/host/translator.go b/translator/translate/otel/pipeline/host/translator.go index 1b7b01598f..e4faf97f1a 100644 --- a/translator/translate/otel/pipeline/host/translator.go +++ b/translator/translate/otel/pipeline/host/translator.go @@ -12,6 +12,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awscloudwatch" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsentity" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/cumulativetodeltaprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/ec2taggerprocessor" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/metricsdecorator" @@ -66,7 +67,7 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, translators := common.ComponentTranslators{ Receivers: t.receivers, - Processors: common.NewTranslatorMap[component.Config](), + Processors: common.NewTranslatorMap(awsentity.NewTranslator()), Exporters: common.NewTranslatorMap(awscloudwatch.NewTranslator()), Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeMetrics, []string{agenthealth.OperationPutMetricData})), } diff --git a/translator/translate/otel/pipeline/host/translator_test.go b/translator/translate/otel/pipeline/host/translator_test.go index f41a3e1d83..4ed90b8270 100644 --- a/translator/translate/otel/pipeline/host/translator_test.go +++ b/translator/translate/otel/pipeline/host/translator_test.go @@ -59,7 +59,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, - processors: []string{}, + processors: []string{"awsentity"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, @@ -76,7 +76,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/hostDeltaMetrics", receivers: []string{"nop", "other"}, - processors: []string{"cumulativetodelta/hostDeltaMetrics"}, + processors: []string{"awsentity", "cumulativetodelta/hostDeltaMetrics"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, @@ -100,7 +100,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, - processors: []string{"transform"}, + processors: []string{"awsentity", "transform"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, @@ -121,7 +121,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, - processors: []string{}, + processors: []string{"awsentity"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go index dc6892bfb0..17f9a50d5e 100644 --- a/translator/translate/otel/processor/awsentity/translator.go +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -13,6 +13,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/util" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" + "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) const name = "awsentity" @@ -34,6 +35,10 @@ func (t *translator) ID() component.ID { func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*awsentity.Config) + if common.TelegrafMetricsEnabled(conf) { + cfg.ScrapeDatapointAttribute = true + } + hostedInConfigKey := common.ConfigKey(common.LogsKey, common.MetricsCollectedKey, common.AppSignals, "hosted_in") hostedIn, hostedInConfigured := common.GetString(conf, hostedInConfigKey) if !hostedInConfigured { @@ -46,10 +51,32 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { } } - cfg.KubernetesMode = context.CurrentContext().KubernetesMode() - switch cfg.KubernetesMode { + //TODO: This logic is more or less identical to what AppSignals does. This should be moved to a common place for reuse + ctx := context.CurrentContext() + mode := ctx.KubernetesMode() + cfg.KubernetesMode = mode + if mode == "" { + mode = ctx.Mode() + } + if mode == config.ModeEC2 { + if ecsutil.GetECSUtilSingleton().IsECS() { + mode = config.ModeECS + } + } + + switch mode { case config.ModeEKS: cfg.ClusterName = hostedIn + cfg.Platform = config.ModeEKS + case config.ModeK8sEC2: + cfg.ClusterName = hostedIn + cfg.Platform = config.ModeK8sEC2 + case config.ModeK8sOnPrem: + cfg.Platform = config.ModeK8sOnPrem + case config.ModeEC2: + cfg.Platform = config.ModeEC2 + case config.ModeECS: + cfg.Platform = config.ModeECS } return cfg, nil } diff --git a/translator/translate/otel/processor/awsentity/translator_test.go b/translator/translate/otel/processor/awsentity/translator_test.go index e80ffda76e..27991b390e 100644 --- a/translator/translate/otel/processor/awsentity/translator_test.go +++ b/translator/translate/otel/processor/awsentity/translator_test.go @@ -32,6 +32,7 @@ func TestTranslate(t *testing.T) { want: &awsentity.Config{ ClusterName: "test", KubernetesMode: config.ModeEKS, + Platform: config.ModeEKS, }, }, } From 68a041bb96855065c541dd94cabbb62e921378a0 Mon Sep 17 00:00:00 2001 From: Lisa Guo Date: Wed, 25 Sep 2024 14:49:10 -0400 Subject: [PATCH 13/47] Prometheus translation for entity emission on EC2 and K8s (#812) --- .../sampleConfig/prometheus_config_linux.yaml | 291 ++++++++++++++++++ .../prometheus_config_windows.yaml | 291 ++++++++++++++++++ .../otel/pipeline/prometheus/translator.go | 30 +- .../pipeline/prometheus/translator_test.go | 31 +- 4 files changed, 634 insertions(+), 9 deletions(-) diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index ce4ad17041..e83a7ccc20 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -81,11 +81,300 @@ extensions: mode: ec2 region: us-east-1 processors: + awsentity: + platform: ec2 batch/prometheus: metadata_cardinality_limit: 1000 send_batch_max_size: 0 send_batch_size: 8192 timeout: 30s + resourcedetection: + aks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + azure: + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + tags: [] + compression: "" + consul: + address: "" + datacenter: "" + namespace: "" + resource_attributes: + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + token_file: "" + detectors: + - eks + - env + - ec2 + disable_keep_alives: false + docker: + resource_attributes: + host.name: + enabled: true + os.type: + enabled: true + ec2: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.image.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + tags: + - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName + ecs: + resource_attributes: + aws.ecs.cluster.arn: + enabled: true + aws.ecs.launchtype: + enabled: true + aws.ecs.task.arn: + enabled: true + aws.ecs.task.family: + enabled: true + aws.ecs.task.id: + enabled: true + aws.ecs.task.revision: + enabled: true + aws.log.group.arns: + enabled: true + aws.log.group.names: + enabled: true + aws.log.stream.arns: + enabled: true + aws.log.stream.names: + enabled: true + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + eks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + elasticbeanstalk: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + deployment.environment: + enabled: true + service.instance.id: + enabled: true + service.version: + enabled: true + endpoint: "" + gcp: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.id: + enabled: true + faas.instance: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + gcp.cloud_run.job.execution: + enabled: true + gcp.cloud_run.job.task_index: + enabled: true + gcp.gce.instance.hostname: + enabled: false + gcp.gce.instance.name: + enabled: false + host.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + k8s.cluster.name: + enabled: true + heroku: + resource_attributes: + cloud.provider: + enabled: true + heroku.app.id: + enabled: true + heroku.dyno.id: + enabled: true + heroku.release.commit: + enabled: true + heroku.release.creation_timestamp: + enabled: true + service.instance.id: + enabled: true + service.name: + enabled: true + service.version: + enabled: true + http2_ping_timeout: 0s + http2_read_idle_timeout: 0s + idle_conn_timeout: 1m30s + k8snode: + auth_type: serviceAccount + context: "" + kube_config_path: "" + node_from_env_var: "" + resource_attributes: + k8s.node.name: + enabled: true + k8s.node.uid: + enabled: true + lambda: + resource_attributes: + aws.log.group.names: + enabled: true + aws.log.stream.names: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.instance: + enabled: true + faas.max_memory: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + max_idle_conns: 100 + openshift: + address: "" + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + k8s.cluster.name: + enabled: true + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + token: "" + override: true + proxy_url: "" + read_buffer_size: 0 + system: + resource_attributes: + host.arch: + enabled: false + host.cpu.cache.l2.size: + enabled: false + host.cpu.family: + enabled: false + host.cpu.model.id: + enabled: false + host.cpu.model.name: + enabled: false + host.cpu.stepping: + enabled: false + host.cpu.vendor.id: + enabled: false + host.id: + enabled: false + host.ip: + enabled: false + host.mac: + enabled: false + host.name: + enabled: true + os.description: + enabled: false + os.type: + enabled: true + timeout: 2s + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + write_buffer_size: 0 receivers: telegraf_prometheus: collection_interval: 1m0s @@ -101,6 +390,8 @@ service: - awsemf/prometheus processors: - batch/prometheus + - resourcedetection + - awsentity receivers: - telegraf_prometheus telemetry: diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index 2a47f34ae3..9879b1355d 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -63,11 +63,300 @@ extensions: mode: ec2 region: us-east-1 processors: + awsentity: + platform: ec2 batch/prometheus: metadata_cardinality_limit: 1000 send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s + resourcedetection: + aks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + azure: + resource_attributes: + azure.resourcegroup.name: + enabled: true + azure.vm.name: + enabled: true + azure.vm.scaleset.name: + enabled: true + azure.vm.size: + enabled: true + cloud.account.id: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + tags: [] + compression: "" + consul: + address: "" + datacenter: "" + namespace: "" + resource_attributes: + cloud.region: + enabled: true + host.id: + enabled: true + host.name: + enabled: true + token_file: "" + detectors: + - eks + - env + - ec2 + disable_keep_alives: false + docker: + resource_attributes: + host.name: + enabled: true + os.type: + enabled: true + ec2: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + host.id: + enabled: true + host.image.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + tags: + - ^kubernetes.io/cluster/.*$ + - ^aws:autoscaling:groupName + ecs: + resource_attributes: + aws.ecs.cluster.arn: + enabled: true + aws.ecs.launchtype: + enabled: true + aws.ecs.task.arn: + enabled: true + aws.ecs.task.family: + enabled: true + aws.ecs.task.id: + enabled: true + aws.ecs.task.revision: + enabled: true + aws.log.group.arns: + enabled: true + aws.log.group.names: + enabled: true + aws.log.stream.arns: + enabled: true + aws.log.stream.names: + enabled: true + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + eks: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + k8s.cluster.name: + enabled: false + elasticbeanstalk: + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + deployment.environment: + enabled: true + service.instance.id: + enabled: true + service.version: + enabled: true + endpoint: "" + gcp: + resource_attributes: + cloud.account.id: + enabled: true + cloud.availability_zone: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.id: + enabled: true + faas.instance: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + gcp.cloud_run.job.execution: + enabled: true + gcp.cloud_run.job.task_index: + enabled: true + gcp.gce.instance.hostname: + enabled: false + gcp.gce.instance.name: + enabled: false + host.id: + enabled: true + host.name: + enabled: true + host.type: + enabled: true + k8s.cluster.name: + enabled: true + heroku: + resource_attributes: + cloud.provider: + enabled: true + heroku.app.id: + enabled: true + heroku.dyno.id: + enabled: true + heroku.release.commit: + enabled: true + heroku.release.creation_timestamp: + enabled: true + service.instance.id: + enabled: true + service.name: + enabled: true + service.version: + enabled: true + http2_ping_timeout: 0s + http2_read_idle_timeout: 0s + idle_conn_timeout: 1m30s + k8snode: + auth_type: serviceAccount + context: "" + kube_config_path: "" + node_from_env_var: "" + resource_attributes: + k8s.node.name: + enabled: true + k8s.node.uid: + enabled: true + lambda: + resource_attributes: + aws.log.group.names: + enabled: true + aws.log.stream.names: + enabled: true + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + faas.instance: + enabled: true + faas.max_memory: + enabled: true + faas.name: + enabled: true + faas.version: + enabled: true + max_idle_conns: 100 + openshift: + address: "" + resource_attributes: + cloud.platform: + enabled: true + cloud.provider: + enabled: true + cloud.region: + enabled: true + k8s.cluster.name: + enabled: true + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + token: "" + override: true + proxy_url: "" + read_buffer_size: 0 + system: + resource_attributes: + host.arch: + enabled: false + host.cpu.cache.l2.size: + enabled: false + host.cpu.family: + enabled: false + host.cpu.model.id: + enabled: false + host.cpu.model.name: + enabled: false + host.cpu.stepping: + enabled: false + host.cpu.vendor.id: + enabled: false + host.id: + enabled: false + host.ip: + enabled: false + host.mac: + enabled: false + host.name: + enabled: true + os.description: + enabled: false + os.type: + enabled: true + timeout: 2s + tls: + ca_file: "" + cert_file: "" + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + write_buffer_size: 0 receivers: telegraf_prometheus: collection_interval: 1m0s @@ -83,6 +372,8 @@ service: - awsemf/prometheus processors: - batch/prometheus + - resourcedetection + - awsentity receivers: - telegraf_prometheus telemetry: diff --git a/translator/translate/otel/pipeline/prometheus/translator.go b/translator/translate/otel/pipeline/prometheus/translator.go index 2943273c59..da8366dcf5 100644 --- a/translator/translate/otel/pipeline/prometheus/translator.go +++ b/translator/translate/otel/pipeline/prometheus/translator.go @@ -9,12 +9,16 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/metrics_collected/prometheus" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsentity" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/batchprocessor" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/resourcedetection" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/adapter" + "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) const ( @@ -41,12 +45,30 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators if conf == nil || !conf.IsSet(key) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: key} } + return &common.ComponentTranslators{ - Receivers: common.NewTranslatorMap(adapter.NewTranslator(prometheus.SectionKey, key, time.Minute)), - Processors: common.NewTranslatorMap( - batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" - ), + Receivers: common.NewTranslatorMap(adapter.NewTranslator(prometheus.SectionKey, key, time.Minute)), + Processors: t.translateProcessors(), Exporters: common.NewTranslatorMap(awsemf.NewTranslatorWithName(pipelineName)), Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})), }, nil } + +func (t *translator) translateProcessors() common.TranslatorMap[component.Config] { + mode := context.CurrentContext().KubernetesMode() + if mode != "" || ecsutil.GetECSUtilSingleton().IsECS() { + // we are on kubernetes or ECS we do not want resource detection processor + return common.NewTranslatorMap( + batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" + awsentity.NewTranslator(), + ) + } else { + // we are on ec2/onprem + return common.NewTranslatorMap( + batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" + resourcedetection.NewTranslator(), + awsentity.NewTranslator(), + ) + } + +} diff --git a/translator/translate/otel/pipeline/prometheus/translator_test.go b/translator/translate/otel/pipeline/prometheus/translator_test.go index b83aa7eea0..c479e1f559 100644 --- a/translator/translate/otel/pipeline/prometheus/translator_test.go +++ b/translator/translate/otel/pipeline/prometheus/translator_test.go @@ -12,6 +12,8 @@ import ( "go.opentelemetry.io/collector/confmap" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" + translatorConfig "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -25,15 +27,16 @@ func TestTranslator(t *testing.T) { cit := NewTranslator() require.EqualValues(t, "metrics/prometheus", cit.ID().String()) testCases := map[string]struct { - input map[string]interface{} - want *want - wantErr error + input map[string]interface{} + kubernetesMode string + want *want + wantErr error }{ "WithoutPrometheusKey": { input: map[string]interface{}{}, wantErr: &common.MissingKeyError{ID: cit.ID(), JsonKey: "logs::metrics_collected::prometheus"}, }, - "WithPrometheusKey": { + "WithPrometheusKeyAndOnK8s": { input: map[string]interface{}{ "logs": map[string]interface{}{ "metrics_collected": map[string]interface{}{ @@ -41,9 +44,26 @@ func TestTranslator(t *testing.T) { }, }, }, + kubernetesMode: translatorConfig.ModeEKS, want: &want{ receivers: []string{"telegraf_prometheus"}, - processors: []string{"batch/prometheus"}, + processors: []string{"batch/prometheus", "awsentity"}, + exporters: []string{"awsemf/prometheus"}, + extensions: []string{"agenthealth/logs"}, + }, + }, + "WithPrometheusKeyAndNotOnK8s": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "prometheus": nil, + }, + }, + }, + kubernetesMode: "", + want: &want{ + receivers: []string{"telegraf_prometheus"}, + processors: []string{"batch/prometheus", "resourcedetection", "awsentity"}, exporters: []string{"awsemf/prometheus"}, extensions: []string{"agenthealth/logs"}, }, @@ -52,6 +72,7 @@ func TestTranslator(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { conf := confmap.NewFromStringMap(testCase.input) + context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) got, err := cit.Translate(conf) assert.Equal(t, testCase.wantErr, err) if testCase.want == nil { From fafeb3eec691877f0cbc0d7c7ebd136e1bd7dbe8 Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Thu, 26 Sep 2024 09:39:04 -0400 Subject: [PATCH 14/47] Clear terminated pods from PodToServiceEnvironment map using a TTL of 5 minutes (#804) --- extension/entitystore/eksInfo.go | 24 ++++--- extension/entitystore/eksInfo_test.go | 90 +++++++++++++++++++---- extension/entitystore/extension.go | 45 +++++++++--- extension/entitystore/extension_test.go | 94 +++++++++++++++++++++++-- extension/server/extension.go | 17 ++++- extension/server/extension_test.go | 53 +++++++++++--- 6 files changed, 272 insertions(+), 51 deletions(-) diff --git a/extension/entitystore/eksInfo.go b/extension/entitystore/eksInfo.go index b8929b771d..3c885e032f 100644 --- a/extension/entitystore/eksInfo.go +++ b/extension/entitystore/eksInfo.go @@ -3,7 +3,14 @@ package entitystore -import "go.uber.org/zap" +import ( + "time" + + "github.com/jellydator/ttlcache/v3" + "go.uber.org/zap" +) + +const ttlDuration = 5 * time.Minute type ServiceEnvironment struct { ServiceName string @@ -13,27 +20,28 @@ type ServiceEnvironment struct { type eksInfo struct { logger *zap.Logger - podToServiceEnvMap map[string]ServiceEnvironment + podToServiceEnvMap *ttlcache.Cache[string, ServiceEnvironment] } func newEKSInfo(logger *zap.Logger) *eksInfo { - podToServiceEnvMap := make(map[string]ServiceEnvironment) return &eksInfo{ - logger: logger, - podToServiceEnvMap: podToServiceEnvMap, + logger: logger, + podToServiceEnvMap: ttlcache.New[string, ServiceEnvironment]( + ttlcache.WithTTL[string, ServiceEnvironment](ttlDuration), + ), } } func (eks *eksInfo) AddPodServiceEnvironmentMapping(podName string, serviceName string, environmentName string, serviceNameSource string) { if eks.podToServiceEnvMap != nil { - eks.podToServiceEnvMap[podName] = ServiceEnvironment{ + eks.podToServiceEnvMap.Set(podName, ServiceEnvironment{ ServiceName: serviceName, Environment: environmentName, ServiceNameSource: serviceNameSource, - } + }, ttlcache.DefaultTTL) } } -func (eks *eksInfo) GetPodServiceEnvironmentMapping() map[string]ServiceEnvironment { +func (eks *eksInfo) GetPodServiceEnvironmentMapping() *ttlcache.Cache[string, ServiceEnvironment] { return eks.podToServiceEnvMap } diff --git a/extension/entitystore/eksInfo_test.go b/extension/entitystore/eksInfo_test.go index 75e0924cb7..42b66a116e 100644 --- a/extension/entitystore/eksInfo_test.go +++ b/extension/entitystore/eksInfo_test.go @@ -5,7 +5,9 @@ package entitystore import ( "testing" + "time" + "github.com/jellydator/ttlcache/v3" "github.com/stretchr/testify/assert" "go.uber.org/zap" ) @@ -13,7 +15,7 @@ import ( func TestAddPodServiceEnvironmentMapping(t *testing.T) { tests := []struct { name string - want map[string]ServiceEnvironment + want *ttlcache.Cache[string, ServiceEnvironment] podName string service string env string @@ -22,35 +24,35 @@ func TestAddPodServiceEnvironmentMapping(t *testing.T) { }{ { name: "AddPodWithServiceMapping", - want: map[string]ServiceEnvironment{ + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{ "test-pod": { ServiceName: "test-service", }, - }, + }, ttlDuration), podName: "test-pod", service: "test-service", }, { name: "AddPodWithServiceEnvMapping", - want: map[string]ServiceEnvironment{ + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{ "test-pod": { ServiceName: "test-service", Environment: "test-env", }, - }, + }, ttlDuration), podName: "test-pod", service: "test-service", env: "test-env", }, { name: "AddPodWithServiceEnvMapping", - want: map[string]ServiceEnvironment{ + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{ "test-pod": { ServiceName: "test-service", Environment: "test-env", ServiceNameSource: ServiceNameSourceInstrumentation, }, - }, + }, ttlDuration), podName: "test-pod", service: "test-service", env: "test-env", @@ -69,31 +71,59 @@ func TestAddPodServiceEnvironmentMapping(t *testing.T) { ei.podToServiceEnvMap = nil } ei.AddPodServiceEnvironmentMapping(tt.podName, tt.service, tt.env, tt.serviceNameSource) - assert.Equal(t, tt.want, ei.podToServiceEnvMap) + if tt.mapNil { + assert.Nil(t, ei.podToServiceEnvMap) + } else { + for pod, se := range tt.want.Items() { + assert.Equal(t, se.Value(), ei.GetPodServiceEnvironmentMapping().Get(pod).Value()) + } + assert.Equal(t, tt.want.Len(), ei.GetPodServiceEnvironmentMapping().Len()) + } }) } } +func TestAddPodServiceEnvironmentMapping_TtlRefresh(t *testing.T) { + logger, _ := zap.NewDevelopment() + ei := newEKSInfo(logger) + + //adds new pod to service environment mapping + ei.AddPodServiceEnvironmentMapping("test-pod", "test-service", "test-environment", "Instrumentation") + assert.Equal(t, 1, ei.podToServiceEnvMap.Len()) + expiration := ei.podToServiceEnvMap.Get("test-pod").ExpiresAt() + + //sleep for 1 second to simulate ttl refresh + time.Sleep(1 * time.Second) + + // simulate adding the same pod to service environment mapping + ei.AddPodServiceEnvironmentMapping("test-pod", "test-service", "test-environment", "Instrumentation") + newExpiration := ei.podToServiceEnvMap.Get("test-pod").ExpiresAt() + + // assert that the expiration time is updated + assert.True(t, newExpiration.After(expiration)) + assert.Equal(t, 1, ei.podToServiceEnvMap.Len()) +} + func TestGetPodServiceEnvironmentMapping(t *testing.T) { tests := []struct { name string - want map[string]ServiceEnvironment + want *ttlcache.Cache[string, ServiceEnvironment] addMap bool }{ { name: "GetPodWithServiceEnvMapping", - want: map[string]ServiceEnvironment{ + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{ "test-pod": { ServiceName: "test-service", Environment: "test-env", ServiceNameSource: "test-service-name-source", }, - }, + }, ttlDuration), addMap: true, }, { name: "GetWhenPodToServiceMapIsEmpty", - want: map[string]ServiceEnvironment{}, + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{}, ttlDuration), }, } for _, tt := range tests { @@ -103,7 +133,41 @@ func TestGetPodServiceEnvironmentMapping(t *testing.T) { if tt.addMap { ei.AddPodServiceEnvironmentMapping("test-pod", "test-service", "test-env", "test-service-name-source") } - assert.Equal(t, tt.want, ei.GetPodServiceEnvironmentMapping()) + for pod, se := range tt.want.Items() { + assert.Equal(t, se.Value(), ei.GetPodServiceEnvironmentMapping().Get(pod).Value()) + } + assert.Equal(t, tt.want.Len(), ei.GetPodServiceEnvironmentMapping().Len()) }) } } + +func TestTTLServicePodEnvironmentMapping(t *testing.T) { + logger, _ := zap.NewDevelopment() + ei := newEKSInfo(logger) + + ei.podToServiceEnvMap = setupTTLCacheForTesting(map[string]ServiceEnvironment{ + "pod": { + ServiceName: "service", + Environment: "environment", + }, + }, time.Microsecond) + assert.Equal(t, 1, ei.podToServiceEnvMap.Len()) + + //starting the ttl cache like we do in code. This will automatically evict expired pods. + go ei.podToServiceEnvMap.Start() + defer ei.podToServiceEnvMap.Stop() + + //sleep for 1 second to simulate ttl refresh + time.Sleep(1 * time.Second) + + //stops the ttl cache. + assert.Equal(t, 0, ei.podToServiceEnvMap.Len()) +} + +func setupTTLCacheForTesting(podToServiceMap map[string]ServiceEnvironment, ttlDuration time.Duration) *ttlcache.Cache[string, ServiceEnvironment] { + cache := ttlcache.New[string, ServiceEnvironment](ttlcache.WithTTL[string, ServiceEnvironment](ttlDuration)) + for pod, serviceEnv := range podToServiceMap { + cache.Set(pod, serviceEnv, ttlcache.DefaultTTL) + } + return cache +} diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 99297a48d9..69a077cb27 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -5,12 +5,14 @@ package entitystore import ( "context" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/jellydator/ttlcache/v3" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension" "go.uber.org/zap" @@ -23,15 +25,16 @@ import ( ) const ( - Service = "Service" - InstanceIDKey = "EC2.InstanceId" - ASGKey = "EC2.AutoScalingGroup" - ServiceNameSourceKey = "AWS.ServiceNameSource" - PlatformType = "PlatformType" - EC2PlatForm = "AWS::EC2" - Type = "Type" - Name = "Name" - Environment = "Environment" + Service = "Service" + InstanceIDKey = "EC2.InstanceId" + ASGKey = "EC2.AutoScalingGroup" + ServiceNameSourceKey = "AWS.ServiceNameSource" + PlatformType = "PlatformType" + EC2PlatForm = "AWS::EC2" + Type = "Type" + Name = "Name" + Environment = "Environment" + podTerminationCheckInterval = 5 * time.Minute ) type ec2ProviderType func(string, *configaws.CredentialConfig) ec2iface.EC2API @@ -71,6 +74,8 @@ type EntityStore struct { metadataprovider ec2metadataprovider.MetadataProvider stsClient stsiface.STSAPI + + podTerminationCheckInterval time.Duration } var _ extension.Extension = (*EntityStore)(nil) @@ -83,6 +88,7 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { e.metadataprovider = getMetaDataProvider() e.mode = e.config.Mode e.kubernetesMode = e.config.KubernetesMode + e.podTerminationCheckInterval = podTerminationCheckInterval ec2CredentialConfig := &configaws.CredentialConfig{ Profile: e.config.Profile, Filename: e.config.Filename, @@ -94,6 +100,8 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { } if e.kubernetesMode != "" { e.eksInfo = newEKSInfo(e.logger) + // Starting the ttl cache will automatically evict all expired pods from the map + go e.StartPodToServiceEnvironmentMappingTtlCache(e.done) } e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) go e.serviceprovider.startServiceProvider() @@ -175,11 +183,26 @@ func (e *EntityStore) AddPodServiceEnvironmentMapping(podName string, serviceNam } } -func (e *EntityStore) GetPodServiceEnvironmentMapping() map[string]ServiceEnvironment { +func (e *EntityStore) StartPodToServiceEnvironmentMappingTtlCache(done chan struct{}) { + if e.eksInfo != nil { + e.eksInfo.podToServiceEnvMap.Start() + + // Start a goroutine to stop the cache when done channel is closed + go func() { + <-done + e.eksInfo.podToServiceEnvMap.Stop() + e.logger.Info("Pod to Service Environment Mapping TTL Cache stopped") + }() + } +} + +func (e *EntityStore) GetPodServiceEnvironmentMapping() *ttlcache.Cache[string, ServiceEnvironment] { if e.eksInfo != nil { return e.eksInfo.GetPodServiceEnvironmentMapping() } - return map[string]ServiceEnvironment{} + return ttlcache.New[string, ServiceEnvironment]( + ttlcache.WithTTL[string, ServiceEnvironment](ttlDuration), + ) } func (e *EntityStore) createAttributeMap() map[string]*string { diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index a79d1ced61..66a879fb1c 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -8,12 +8,14 @@ import ( "errors" "reflect" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/jellydator/ttlcache/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "go.uber.org/zap" @@ -364,23 +366,23 @@ func TestEntityStore_AddAndGetPodServiceEnvironmentMapping(t *testing.T) { logger, _ := zap.NewProduction() tests := []struct { name string - want map[string]ServiceEnvironment + want *ttlcache.Cache[string, ServiceEnvironment] eks *eksInfo }{ { name: "HappyPath", - want: map[string]ServiceEnvironment{ + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{ "pod1": { ServiceName: "service1", Environment: "env1", ServiceNameSource: ServiceNameSourceK8sWorkload, }, - }, + }, ttlDuration), eks: newEKSInfo(logger), }, { name: "Empty EKS Info", - want: map[string]ServiceEnvironment{}, + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{}, ttlDuration), eks: nil, }, } @@ -388,7 +390,89 @@ func TestEntityStore_AddAndGetPodServiceEnvironmentMapping(t *testing.T) { t.Run(tt.name, func(t *testing.T) { e := EntityStore{eksInfo: tt.eks} e.AddPodServiceEnvironmentMapping("pod1", "service1", "env1", ServiceNameSourceK8sWorkload) - assert.Equal(t, tt.want, e.GetPodServiceEnvironmentMapping()) + for pod, se := range tt.want.Items() { + assert.Equal(t, se.Value(), e.GetPodServiceEnvironmentMapping().Get(pod).Value()) + } + assert.Equal(t, tt.want.Len(), e.GetPodServiceEnvironmentMapping().Len()) }) } } + +func TestEntityStore_ClearTerminatedPodsFromServiceMap(t *testing.T) { + logger, _ := zap.NewProduction() + tests := []struct { + name string + podToServiceMap *ttlcache.Cache[string, ServiceEnvironment] + want *ttlcache.Cache[string, ServiceEnvironment] + eks *eksInfo + }{ + { + name: "HappyPath_NoClear", + podToServiceMap: setupTTLCacheForTesting(map[string]ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + }, ttlDuration), + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + }, ttlDuration), + eks: newEKSInfo(logger), + }, + { + name: "HappyPath_Clear", + podToServiceMap: setupTTLCacheForTesting(map[string]ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + }, time.Nanosecond), + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{}, time.Nanosecond), + eks: newEKSInfo(logger), + }, + { + name: "Empty EKS Info", + want: setupTTLCacheForTesting(map[string]ServiceEnvironment{}, ttlDuration), + eks: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := EntityStore{eksInfo: tt.eks} + if tt.eks != nil { + e.eksInfo.podToServiceEnvMap = tt.podToServiceMap + go e.eksInfo.podToServiceEnvMap.Start() + } + //sleep for 1 second to allow the cache to update + time.Sleep(1 * time.Second) + for pod, se := range tt.want.Items() { + assert.Equal(t, se.Value(), e.GetPodServiceEnvironmentMapping().Get(pod).Value()) + } + if tt.eks != nil { + e.eksInfo.podToServiceEnvMap.Stop() + } + assert.Equal(t, tt.want.Len(), e.GetPodServiceEnvironmentMapping().Len()) + }) + } +} + +func TestEntityStore_StartPodToServiceEnvironmentMappingTtlCache(t *testing.T) { + e := EntityStore{eksInfo: newEKSInfo(zap.NewExample())} + e.done = make(chan struct{}) + e.eksInfo.podToServiceEnvMap = setupTTLCacheForTesting(map[string]ServiceEnvironment{}, time.Microsecond) + + go e.StartPodToServiceEnvironmentMappingTtlCache(e.done) + assert.Equal(t, 0, e.GetPodServiceEnvironmentMapping().Len()) + e.AddPodServiceEnvironmentMapping("pod", "service", "env", "Instrumentation") + assert.Equal(t, 1, e.GetPodServiceEnvironmentMapping().Len()) + + // sleep for 1 second to allow the cache to update + time.Sleep(time.Second) + + //cache should be cleared + assert.Equal(t, 0, e.GetPodServiceEnvironmentMapping().Len()) + +} diff --git a/extension/server/extension.go b/extension/server/extension.go index 514db40641..351439075b 100644 --- a/extension/server/extension.go +++ b/extension/server/extension.go @@ -10,6 +10,7 @@ import ( "time" "github.com/gin-gonic/gin" + "github.com/jellydator/ttlcache/v3" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension" @@ -95,17 +96,19 @@ func (s *Server) Shutdown(ctx context.Context) error { } func (s *Server) k8sPodToServiceMapHandler(c *gin.Context) { - podServiceEnvironmentMap := getPodServiceEnvironmentMapping() + podServiceEnvironmentMap := convertTtlCacheToMap(getPodServiceEnvironmentMapping()) s.jsonHandler(c.Writer, podServiceEnvironmentMap) } // Added this for testing purpose -var getPodServiceEnvironmentMapping = func() map[string]entitystore.ServiceEnvironment { +var getPodServiceEnvironmentMapping = func() *ttlcache.Cache[string, entitystore.ServiceEnvironment] { es := entitystore.GetEntityStore() if es != nil && es.GetPodServiceEnvironmentMapping() != nil { return es.GetPodServiceEnvironmentMapping() } - return map[string]entitystore.ServiceEnvironment{} + return ttlcache.New[string, entitystore.ServiceEnvironment]( + ttlcache.WithTTL[string, entitystore.ServiceEnvironment](time.Hour * 1), + ) } func (s *Server) jsonHandler(w http.ResponseWriter, data interface{}) { @@ -115,3 +118,11 @@ func (s *Server) jsonHandler(w http.ResponseWriter, data interface{}) { s.logger.Error("failed to encode data for http response", zap.Error(err)) } } + +func convertTtlCacheToMap(cache *ttlcache.Cache[string, entitystore.ServiceEnvironment]) map[string]entitystore.ServiceEnvironment { + m := make(map[string]entitystore.ServiceEnvironment) + for pod, se := range cache.Items() { + m[pod] = se.Value() + } + return m +} diff --git a/extension/server/extension_test.go b/extension/server/extension_test.go index 3b14347d29..a60ad842f8 100644 --- a/extension/server/extension_test.go +++ b/extension/server/extension_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/gin-gonic/gin" + "github.com/jellydator/ttlcache/v3" "github.com/stretchr/testify/assert" "go.uber.org/zap" @@ -20,24 +21,26 @@ import ( ) type mockEntityStore struct { - podToServiceEnvironmentMap map[string]entitystore.ServiceEnvironment + podToServiceEnvironmentMap *ttlcache.Cache[string, entitystore.ServiceEnvironment] } func newMockEntityStore() *mockEntityStore { return &mockEntityStore{ - podToServiceEnvironmentMap: make(map[string]entitystore.ServiceEnvironment), + podToServiceEnvironmentMap: ttlcache.New[string, entitystore.ServiceEnvironment]( + ttlcache.WithTTL[string, entitystore.ServiceEnvironment](time.Hour), + ), } } func (es *mockEntityStore) AddPodServiceEnvironmentMapping(podName string, service string, env string) { - es.podToServiceEnvironmentMap[podName] = entitystore.ServiceEnvironment{ + es.podToServiceEnvironmentMap.Set(podName, entitystore.ServiceEnvironment{ ServiceName: service, Environment: env, - } + }, time.Hour) } -func newMockGetPodServiceEnvironmentMapping(es *mockEntityStore) func() map[string]entitystore.ServiceEnvironment { - return func() map[string]entitystore.ServiceEnvironment { +func newMockGetPodServiceEnvironmentMapping(es *mockEntityStore) func() *ttlcache.Cache[string, entitystore.ServiceEnvironment] { + return func() *ttlcache.Cache[string, entitystore.ServiceEnvironment] { return es.podToServiceEnvironmentMap } } @@ -127,12 +130,12 @@ func TestK8sPodToServiceMapHandler(t *testing.T) { } tests := []struct { name string - want map[string]entitystore.ServiceEnvironment + want *ttlcache.Cache[string, entitystore.ServiceEnvironment] emptyMap bool }{ { name: "HappyPath", - want: map[string]entitystore.ServiceEnvironment{ + want: setupTTLCacheForTesting(map[string]entitystore.ServiceEnvironment{ "pod1": { ServiceName: "service1", Environment: "env1", @@ -141,11 +144,11 @@ func TestK8sPodToServiceMapHandler(t *testing.T) { ServiceName: "service2", Environment: "env2", }, - }, + }), }, { name: "Empty Map", - want: map[string]entitystore.ServiceEnvironment{}, + want: setupTTLCacheForTesting(map[string]entitystore.ServiceEnvironment{}), emptyMap: true, }, } @@ -167,7 +170,11 @@ func TestK8sPodToServiceMapHandler(t *testing.T) { var actualMap map[string]entitystore.ServiceEnvironment err := json.Unmarshal(w.Body.Bytes(), &actualMap) assert.NoError(t, err) - assert.Equal(t, tt.want, actualMap) + actualTtlCache := setupTTLCacheForTesting(actualMap) + for pod, se := range tt.want.Items() { + assert.Equal(t, se.Value(), actualTtlCache.Get(pod).Value()) + } + assert.Equal(t, tt.want.Len(), actualTtlCache.Len()) }) } } @@ -252,3 +259,27 @@ func TestServerStartAndShutdown(t *testing.T) { }) } } + +func TestConvertTtlCacheToMap(t *testing.T) { + podToServiceMap := map[string]entitystore.ServiceEnvironment{ + "pod1": { + ServiceName: "service1", + Environment: "env1", + }, + "pod2": { + ServiceName: "service2", + Environment: "env2", + }, + } + ttlcache := setupTTLCacheForTesting(podToServiceMap) + convertedMap := convertTtlCacheToMap(ttlcache) + assert.Equal(t, convertedMap, podToServiceMap) +} + +func setupTTLCacheForTesting(podToServiceMap map[string]entitystore.ServiceEnvironment) *ttlcache.Cache[string, entitystore.ServiceEnvironment] { + cache := ttlcache.New[string, entitystore.ServiceEnvironment](ttlcache.WithTTL[string, entitystore.ServiceEnvironment](time.Minute)) + for pod, serviceEnv := range podToServiceMap { + cache.Set(pod, serviceEnv, ttlcache.DefaultTTL) + } + return cache +} From 60ceaaf6285a93b14868426c6a2b6fe121e5f638 Mon Sep 17 00:00:00 2001 From: Jason Polanco Date: Thu, 26 Sep 2024 09:46:04 -0400 Subject: [PATCH 15/47] remove entity store from ECS configs (#813) --- translator/tocwconfig/sampleConfig/log_ecs_metric_only.conf | 2 +- translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml | 6 +----- translator/tocwconfig/tocwconfig_test.go | 5 +++++ translator/translate/otel/translate_otel.go | 6 +++++- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.conf b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.conf index 1a25fe50c5..a3ef2aabaf 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.conf +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.conf @@ -21,6 +21,6 @@ [[outputs.cloudwatchlogs]] endpoint_override = "https://fake_endpoint" force_flush_interval = "5s" - log_stream_name = "fake-host-name" + log_stream_name = "i-UNKNOWN" region = "us-west-2" diff --git a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml index f8388b6a2b..e11863e4be 100644 --- a/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml +++ b/translator/tocwconfig/sampleConfig/log_ecs_metric_only.yaml @@ -7,7 +7,7 @@ exporters: local_mode: false log_group_name: emf/logs/default log_retention: 0 - log_stream_name: fake-host-name + log_stream_name: i-UNKNOWN max_retries: 2 middleware: agenthealth/logs no_verify_ssl: false @@ -97,9 +97,6 @@ extensions: usage_flags: mode: EC2 region_type: ACJ - entitystore: - mode: ec2 - region: us-west-2 processors: batch/containerinsights: metadata_cardinality_limit: 1000 @@ -169,7 +166,6 @@ receivers: service: extensions: - agenthealth/logs - - entitystore pipelines: logs/emf_logs: exporters: diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index 997d425e7f..b8d8eb0b92 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -38,6 +38,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/util" + "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" "github.com/aws/amazon-cloudwatch-agent/translator/util/eksdetector" ) @@ -554,12 +555,16 @@ func TestECSNodeMetricConfig(t *testing.T) { resetContext(t) context.CurrentContext().SetRunInContainer(true) context.CurrentContext().SetMode(config.ModeEC2) + ecsSingleton := ecsutil.GetECSUtilSingleton() + ecsSingleton.Region = "us-west-2" t.Setenv("RUN_IN_CONTAINER", "True") t.Setenv("HOST_NAME", "fake-host-name") t.Setenv("HOST_IP", "127.0.0.1") expectedEnvVars := map[string]string{} checkTranslation(t, "log_ecs_metric_only", "linux", expectedEnvVars, "") checkTranslation(t, "log_ecs_metric_only", "darwin", nil, "") + //Reset back to default value to not impact other tests + ecsSingleton.Region = "" } func TestLogFilterConfig(t *testing.T) { diff --git a/translator/translate/otel/translate_otel.go b/translator/translate/otel/translate_otel.go index 7d77d8f831..758d5c6815 100644 --- a/translator/translate/otel/translate_otel.go +++ b/translator/translate/otel/translate_otel.go @@ -31,6 +31,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/nop" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/prometheus" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/pipeline/xray" + "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) var registry = common.NewTranslatorMap[*common.ComponentTranslators]() @@ -73,7 +74,10 @@ func Translate(jsonConfig interface{}, os string) (*otelcol.Config, error) { return nil, err } } - pipelines.Translators.Extensions.Set(entitystore.NewTranslator()) + // ECS is not in scope for entity association, so we only add the entity store in non ECS platforms + if !ecsutil.GetECSUtilSingleton().IsECS() { + pipelines.Translators.Extensions.Set(entitystore.NewTranslator()) + } if context.CurrentContext().KubernetesMode() != "" { pipelines.Translators.Extensions.Set(server.NewTranslator()) } From 4500f3b98baa7fe4535edd7ba74ff816cdfd1ba2 Mon Sep 17 00:00:00 2001 From: Chad Patel Date: Thu, 26 Sep 2024 15:42:15 -0500 Subject: [PATCH 16/47] Update model for latest sdk changes on PutMetricData (#815) --- sdk/service/cloudwatch/api.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sdk/service/cloudwatch/api.go b/sdk/service/cloudwatch/api.go index db2109d91f..17db77ac6b 100644 --- a/sdk/service/cloudwatch/api.go +++ b/sdk/service/cloudwatch/api.go @@ -11268,6 +11268,8 @@ type PutMetricDataInput struct { // // Namespace is a required field Namespace *string `min:"1" type:"string" required:"true"` + + StrictEntityValidation *bool `type:"boolean"` } // String returns the string representation. @@ -11345,6 +11347,12 @@ func (s *PutMetricDataInput) SetNamespace(v string) *PutMetricDataInput { return s } +// SetStrictEntityValidation sets the StrictEntityValidation field's value. +func (s *PutMetricDataInput) SetStrictEntityValidation(v bool) *PutMetricDataInput { + s.StrictEntityValidation = &v + return s +} + type PutMetricDataOutput struct { _ struct{} `type:"structure"` } From 308b60b05672fc72117baaf794cdf0a6c41bafaa Mon Sep 17 00:00:00 2001 From: Jason Polanco Date: Fri, 27 Sep 2024 14:31:51 -0400 Subject: [PATCH 17/47] [Compass] Implement ServiceNameSource logic into aws entity processor (#817) --- extension/entitystore/extension.go | 6 ++ extension/entitystore/extension_test.go | 23 +++++ extension/entitystore/serviceprovider.go | 9 ++ extension/entitystore/serviceprovider_test.go | 22 +++++ plugins/processors/awsentity/processor.go | 34 ++++++- .../processors/awsentity/processor_test.go | 98 ++++++++++++------- 6 files changed, 152 insertions(+), 40 deletions(-) diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 69a077cb27..9809772a07 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -44,6 +44,7 @@ type serviceProviderInterface interface { addEntryForLogFile(LogFileGlob, ServiceAttribute) addEntryForLogGroup(LogGroupName, ServiceAttribute) logFileServiceAttribute(LogFileGlob, LogGroupName) ServiceAttribute + getServiceNameAndSource() (string, string) } type EntityStore struct { @@ -151,6 +152,11 @@ func (e *EntityStore) CreateLogFileEntity(logFileGlob LogFileGlob, logGroupName } } +// GetMetricServiceNameAndSource gets the service name source for service metrics if not customer provided +func (e *EntityStore) GetMetricServiceNameAndSource() (string, string) { + return e.serviceprovider.getServiceNameAndSource() +} + // GetServiceMetricAttributesMap creates the attribute map for service metrics. This will be expanded upon in a later PR'S, // but for now is just covering the EC2 attributes for service metrics. func (e *EntityStore) GetServiceMetricAttributesMap() map[string]*string { diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 66a879fb1c..f8f6e3dc1e 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -43,6 +43,10 @@ func (s *mockServiceProvider) logFileServiceAttribute(glob LogFileGlob, name Log return args.Get(0).(ServiceAttribute) } +func (s *mockServiceProvider) getServiceNameAndSource() (string, string) { + return "test-service-name", "UserConfiguration" +} + type mockSTSClient struct { stsiface.STSAPI accountId string @@ -476,3 +480,22 @@ func TestEntityStore_StartPodToServiceEnvironmentMappingTtlCache(t *testing.T) { assert.Equal(t, 0, e.GetPodServiceEnvironmentMapping().Len()) } + +func TestEntityStore_GetMetricServiceNameSource(t *testing.T) { + instanceId := "i-abcd1234" + accountId := "123456789012" + sp := new(mockServiceProvider) + e := EntityStore{ + mode: config.ModeEC2, + ec2Info: ec2Info{InstanceID: instanceId}, + serviceprovider: sp, + metadataprovider: mockMetadataProviderWithAccountId(accountId), + stsClient: &mockSTSClient{accountId: accountId}, + nativeCredential: &session.Session{}, + } + + serviceName, serviceNameSource := e.GetMetricServiceNameAndSource() + + assert.Equal(t, "test-service-name", serviceName) + assert.Equal(t, "UserConfiguration", serviceNameSource) +} diff --git a/extension/entitystore/serviceprovider.go b/extension/entitystore/serviceprovider.go index 40eaffca24..33516edb2d 100644 --- a/extension/entitystore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -149,6 +149,15 @@ func (s *serviceprovider) logFileServiceAttribute(logFile LogFileGlob, logGroup }) } +func (s *serviceprovider) getServiceNameAndSource() (string, string) { + sa := mergeServiceAttributes([]serviceAttributeProvider{ + s.serviceAttributeFromEc2Tags, + s.serviceAttributeFromIamRole, + s.serviceAttributeFallback, + }) + return sa.ServiceName, sa.ServiceNameSource +} + func (s *serviceprovider) serviceAttributeForLogGroup(logGroup LogGroupName) ServiceAttribute { if logGroup == "" { return ServiceAttribute{} diff --git a/extension/entitystore/serviceprovider_test.go b/extension/entitystore/serviceprovider_test.go index 678b9d15f6..55961250a2 100644 --- a/extension/entitystore/serviceprovider_test.go +++ b/extension/entitystore/serviceprovider_test.go @@ -246,6 +246,28 @@ func Test_serviceprovider_logFileServiceAttribute(t *testing.T) { assert.Equal(t, ServiceAttribute{ServiceName: "test-service-from-loggroup", ServiceNameSource: ServiceNameSourceInstrumentation, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) } +func Test_serviceprovider_getServiceNameSource(t *testing.T) { + s := &serviceprovider{ + mode: config.ModeEC2, + logGroups: make(map[LogGroupName]ServiceAttribute), + logFiles: make(map[LogFileGlob]ServiceAttribute), + } + + serviceName, serviceNameSource := s.getServiceNameAndSource() + assert.Equal(t, ServiceNameUnknown, serviceName) + assert.Equal(t, ServiceNameSourceUnknown, serviceNameSource) + + s.iamRole = "test-role" + serviceName, serviceNameSource = s.getServiceNameAndSource() + assert.Equal(t, s.iamRole, serviceName) + assert.Equal(t, ServiceNameSourceClientIamRole, serviceNameSource) + + s.ec2TagServiceName = "test-service-from-tags" + serviceName, serviceNameSource = s.getServiceNameAndSource() + assert.Equal(t, s.ec2TagServiceName, serviceName) + assert.Equal(t, ServiceNameSourceResourceTags, serviceNameSource) +} + func Test_serviceprovider_getIAMRole(t *testing.T) { type fields struct { metadataProvider ec2metadataprovider.MetadataProvider diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 9c05937e58..c21347bd6a 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -19,11 +19,12 @@ import ( ) const ( - attributeAwsLogGroupNames = "aws.log.group.names" - attributeDeploymentEnvironment = "deployment.environment" - attributeServiceName = "service.name" - attributeService = "Service" - EMPTY = "" + attributeAwsLogGroupNames = "aws.log.group.names" + attributeDeploymentEnvironment = "deployment.environment" + attributeServiceName = "service.name" + attributeService = "Service" + attributeServiceNameSourceUserConfig = "UserConfiguration" + EMPTY = "" ) type scraper interface { @@ -57,6 +58,14 @@ var getMetricAttributesFromEntityStore = func() map[string]*string { return es.GetServiceMetricAttributesMap() } +var getServiceNameSource = func() (string, string) { + es := entitystore.GetEntityStore() + if es == nil { + return EMPTY, EMPTY + } + return es.GetMetricServiceNameAndSource() +} + // awsEntityProcessor looks for metrics that have the aws.log.group.names and either the service.name or // deployment.environment resource attributes set, then adds the association between the log group(s) and the // service/environment names to the entitystore extension. @@ -107,6 +116,12 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric entityEnvironmentName := environmentName.Str() if (entityServiceName == EMPTY || entityEnvironmentName == EMPTY) && p.config.ScrapeDatapointAttribute { entityServiceName, entityEnvironmentName = p.scrapeServiceAttribute(rm.At(i).ScopeMetrics()) + // If the entityServiceNameSource is empty here, that means it was not configured via instrumentation + // If entityServiceName is a datapoint attribute, that means the service name is coming from the UserConfiguration source + if entityServiceNameSource == EMPTY && entityServiceName != EMPTY { + entityServiceNameSource = attributeServiceNameSourceUserConfig + resourceAttrs.PutStr(entityattributes.AttributeEntityServiceNameSource, attributeServiceNameSourceUserConfig) + } } if entityServiceName != EMPTY { resourceAttrs.PutStr(entityattributes.AttributeEntityServiceName, entityServiceName) @@ -115,6 +130,15 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric resourceAttrs.PutStr(entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) } if p.config.Platform == config.ModeEC2 { + //If entityServiceNameSource is empty, it was not configured via the config. Get the source in descending priority + // 1. Incoming telemetry attributes + // 2. CWA config + // 3. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app + // 4. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) + if entityServiceNameSource == EMPTY { + entityServiceName, entityServiceNameSource = getServiceNameSource() + resourceAttrs.PutStr(entityattributes.AttributeEntityServiceNameSource, entityServiceNameSource) + } if platformType != EMPTY { resourceAttrs.PutStr(entityattributes.AttributeEntityPlatformType, platformType) } diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index f5ccdb633f..745926ad3b 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -64,9 +64,9 @@ func newMockGetMetricAttributesFromEntityStore() func() map[string]*string { } } -func newMockGetMetricAttributesFromEntityStoreReset() func() map[string]*string { - return func() map[string]*string { - return map[string]*string{} +func newMockGetServiceNameAndSource(service, source string) func() (string, string) { + return func() (string, string) { + return service, source } } @@ -254,60 +254,66 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() tests := []struct { - name string - metrics pmetric.Metrics - want map[string]any - containsMetrics bool + name string + metrics pmetric.Metrics + mockServiceNameSource func() (string, string) + mockGetMetricAttributesFromEntityStore func() map[string]*string + want map[string]any }{ { name: "EmptyMetrics", metrics: pmetric.NewMetrics(), want: map[string]any{}, }, + //NOTE 2 SELF: These tests assume that we are on the EC2 platform, so make sure to mock the ServiceNameSource function { - name: "ResourceAttributeServiceNameOnly", - metrics: generateMetrics(attributeServiceName, "test-service"), + name: "ResourceAttributeServiceNameOnly", + metrics: generateMetrics(attributeServiceName, "test-service"), + mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), want: map[string]any{ - entityattributes.AttributeEntityServiceName: "test-service", - attributeServiceName: "test-service", - entityattributes.AttributeEntityPlatformType: "AWS::EC2", - entityattributes.AttributeEntityInstanceID: "i-123456789", - entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", + entityattributes.AttributeEntityServiceName: "test-service", + attributeServiceName: "test-service", + entityattributes.AttributeEntityServiceNameSource: "Instrumentation", }, - containsMetrics: true, }, { - name: "ResourceAttributeEnvironmentOnly", - metrics: generateMetrics(attributeDeploymentEnvironment, "test-environment"), + name: "ResourceAttributeEnvironmentOnly", + metrics: generateMetrics(attributeDeploymentEnvironment, "test-environment"), + mockServiceNameSource: newMockGetServiceNameAndSource("unknown_service", "Unknown"), want: map[string]any{ entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", attributeDeploymentEnvironment: "test-environment", - entityattributes.AttributeEntityPlatformType: "AWS::EC2", - entityattributes.AttributeEntityInstanceID: "i-123456789", - entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", + entityattributes.AttributeEntityServiceNameSource: "Unknown", }, - containsMetrics: true, }, { - name: "ResourceAttributeServiceNameAndEnvironment", - metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + name: "ResourceAttributeServiceNameAndEnvironment", + metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), + mockGetMetricAttributesFromEntityStore: newMockGetMetricAttributesFromEntityStore(), want: map[string]any{ entityattributes.AttributeEntityServiceName: "test-service", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", attributeServiceName: "test-service", attributeDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityServiceNameSource: "Instrumentation", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", }, - containsMetrics: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if tt.containsMetrics { - getMetricAttributesFromEntityStore = newMockGetMetricAttributesFromEntityStore() + // Make copy of original functions to use as resets later to prevent failing test when tests are ran in bulk + resetServiceNameSource := getServiceNameSource + resetGetMetricAttributesFromEntityStore := getMetricAttributesFromEntityStore + if tt.mockServiceNameSource != nil { + getServiceNameSource = tt.mockServiceNameSource + } + if tt.mockGetMetricAttributesFromEntityStore != nil { + getMetricAttributesFromEntityStore = tt.mockGetMetricAttributesFromEntityStore } p := newAwsEntityProcessor(&Config{}, logger) p.config.Platform = config.ModeEC2 @@ -317,7 +323,8 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { if rm.Len() > 0 { assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) } - getMetricAttributesFromEntityStore = newMockGetMetricAttributesFromEntityStoreReset() + getServiceNameSource = resetServiceNameSource + getMetricAttributesFromEntityStore = resetGetMetricAttributesFromEntityStore }) } } @@ -326,9 +333,11 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() tests := []struct { - name string - metrics pmetric.Metrics - want map[string]any + name string + metrics pmetric.Metrics + mockServiceNameAndSource func() (string, string) + mockGetMetricAttributesFromEntityStore func() map[string]*string + want map[string]any }{ { name: "EmptyMetrics", @@ -336,17 +345,24 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { want: map[string]any{}, }, { - name: "DatapointAttributeServiceNameOnly", - metrics: generateDatapointMetrics(attributeServiceName, "test-service"), + name: "DatapointAttributeServiceNameOnly", + metrics: generateDatapointMetrics(attributeServiceName, "test-service"), + mockGetMetricAttributesFromEntityStore: newMockGetMetricAttributesFromEntityStore(), want: map[string]any{ - entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", }, }, { - name: "DatapointAttributeEnvironmentOnly", - metrics: generateDatapointMetrics(attributeDeploymentEnvironment, "test-environment"), + name: "DatapointAttributeEnvironmentOnly", + metrics: generateDatapointMetrics(attributeDeploymentEnvironment, "test-environment"), + mockServiceNameAndSource: newMockGetServiceNameAndSource("test-service-name", "ClientIamRole"), want: map[string]any{ entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityServiceNameSource: "ClientIamRole", }, }, { @@ -355,12 +371,22 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { want: map[string]any{ entityattributes.AttributeEntityServiceName: "test-service", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + // Make copy of original functions to use as resets later to prevent failing test when tests are ran in bulk + resetServiceNameSource := getServiceNameSource + resetGetMetricAttributesFromEntityStore := getMetricAttributesFromEntityStore + if tt.mockServiceNameAndSource != nil { + getServiceNameSource = tt.mockServiceNameAndSource + } + if tt.mockGetMetricAttributesFromEntityStore != nil { + getMetricAttributesFromEntityStore = tt.mockGetMetricAttributesFromEntityStore + } p := newAwsEntityProcessor(&Config{ScrapeDatapointAttribute: true}, logger) p.config.Platform = config.ModeEC2 _, err := p.processMetrics(ctx, tt.metrics) @@ -369,6 +395,8 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { if rm.Len() > 0 { assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) } + getServiceNameSource = resetServiceNameSource + getMetricAttributesFromEntityStore = resetGetMetricAttributesFromEntityStore }) } } From c4219f57204925eac2238b864f3f768219c81b3b Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 27 Sep 2024 16:01:57 -0400 Subject: [PATCH 18/47] Add resource entity processing logics and adjust host pipeline (#818) --- extension/entitystore/ec2Info.go | 22 +-- extension/entitystore/ec2Info_test.go | 40 ++--- extension/entitystore/extension.go | 4 +- extension/entitystore/extension_test.go | 20 +-- extension/entitystore/serviceprovider.go | 4 +- extension/entitystore/serviceprovider_test.go | 6 +- plugins/processors/awsentity/config.go | 3 + .../entityattributes/entityattributes.go | 13 +- plugins/processors/awsentity/processor.go | 142 ++++++++++-------- .../processors/awsentity/processor_test.go | 65 +++++++- .../sampleConfig/advanced_config_darwin.yaml | 7 +- .../sampleConfig/advanced_config_linux.yaml | 7 +- .../sampleConfig/advanced_config_windows.yaml | 5 +- .../sampleConfig/basic_config_linux.yaml | 5 +- .../sampleConfig/basic_config_windows.yaml | 5 +- .../sampleConfig/collectd_config_linux.yaml | 7 +- .../sampleConfig/compass_linux_config.yaml | 7 +- .../sampleConfig/complete_darwin_config.yaml | 23 ++- .../sampleConfig/complete_linux_config.yaml | 23 ++- .../sampleConfig/complete_windows_config.yaml | 19 ++- .../sampleConfig/delta_config_linux.yaml | 5 +- .../sampleConfig/delta_net_config_linux.yaml | 5 +- .../sampleConfig/drop_origin_linux.yaml | 5 +- .../ignore_append_dimensions.yaml | 5 +- .../sampleConfig/invalid_input_linux.yaml | 5 +- .../sampleConfig/jmx_config_linux.yaml | 5 +- .../sampleConfig/standard_config_linux.yaml | 7 +- ...ndard_config_linux_with_common_config.yaml | 7 +- .../sampleConfig/standard_config_windows.yaml | 5 +- ...ard_config_windows_with_common_config.yaml | 5 +- .../sampleConfig/statsd_config_linux.yaml | 7 +- .../sampleConfig/statsd_config_windows.yaml | 7 +- translator/translate/otel/common/common.go | 16 +- .../otel/pipeline/host/translator.go | 8 +- .../otel/pipeline/host/translator_test.go | 25 ++- .../otel/pipeline/host/translators.go | 7 + .../otel/pipeline/host/translators_test.go | 14 ++ .../otel/processor/awsentity/translator.go | 26 +++- 38 files changed, 403 insertions(+), 188 deletions(-) diff --git a/extension/entitystore/ec2Info.go b/extension/entitystore/ec2Info.go index 7619d898b4..82eb6ea7b0 100644 --- a/extension/entitystore/ec2Info.go +++ b/extension/entitystore/ec2Info.go @@ -29,7 +29,7 @@ const ( autoScalingGroupSizeMax = 255 ) -type ec2Info struct { +type EC2Info struct { InstanceID string AutoScalingGroup string @@ -44,8 +44,8 @@ type ec2Info struct { done chan struct{} } -func (ei *ec2Info) initEc2Info() { - ei.logger.Debug("Initializing ec2Info") +func (ei *EC2Info) initEc2Info() { + ei.logger.Debug("Initializing EC2Info") if err := ei.setInstanceId(); err != nil { return } @@ -53,11 +53,11 @@ func (ei *ec2Info) initEc2Info() { if err := ei.setAutoScalingGroup(); err != nil { return } - ei.logger.Debug("Finished initializing ec2Info") + ei.logger.Debug("Finished initializing EC2Info") ei.ignoreInvalidFields() } -func (ei *ec2Info) setInstanceId() error { +func (ei *EC2Info) setInstanceId() error { for { metadataDoc, err := ei.metadataProvider.Get(context.Background()) if err != nil { @@ -77,7 +77,7 @@ func (ei *ec2Info) setInstanceId() error { } } -func (ei *ec2Info) setAutoScalingGroup() error { +func (ei *EC2Info) setAutoScalingGroup() error { retry := 0 for { var waitDuration time.Duration @@ -115,7 +115,7 @@ func (ei *ec2Info) setAutoScalingGroup() error { This can also be implemented by just calling the InstanceTagValue and then DescribeTags on failure. But preferred the current implementation as we need to distinguish the tags not being fetchable at all, from the ASG tag in particular not existing. */ -func (ei *ec2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { +func (ei *EC2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { tags, err := ei.metadataProvider.InstanceTags(context.Background()) if err != nil { ei.logger.Debug("Failed to get tags through metadata provider", zap.Error(err)) @@ -132,7 +132,7 @@ func (ei *ec2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { return nil } -func (ei *ec2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error { +func (ei *EC2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error { tagFilters := []*ec2.Filter{ { Name: aws.String("resource-type"), @@ -171,8 +171,8 @@ func (ei *ec2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error return nil } -func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}, region string, logger *zap.Logger) *ec2Info { - return &ec2Info{ +func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}, region string, logger *zap.Logger) *EC2Info { + return &EC2Info{ metadataProvider: metadataProvider, ec2Provider: providerType, ec2Credential: ec2Credential, @@ -182,7 +182,7 @@ func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerT } } -func (ei *ec2Info) ignoreInvalidFields() { +func (ei *EC2Info) ignoreInvalidFields() { if idLength := len(ei.InstanceID); idLength > instanceIdSizeMax { ei.logger.Warn("InstanceId length exceeds characters limit and will be ignored", zap.Int("length", idLength), zap.Int("character limit", instanceIdSizeMax)) ei.InstanceID = "" diff --git a/extension/entitystore/ec2Info_test.go b/extension/entitystore/ec2Info_test.go index f5ca24a084..566e4dad02 100644 --- a/extension/entitystore/ec2Info_test.go +++ b/extension/entitystore/ec2Info_test.go @@ -82,7 +82,7 @@ func TestSetInstanceIdAndRegion(t *testing.T) { name string args args wantErr bool - want ec2Info + want EC2Info }{ { name: "happy path", @@ -90,7 +90,7 @@ func TestSetInstanceIdAndRegion(t *testing.T) { metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc}, }, wantErr: false, - want: ec2Info{ + want: EC2Info{ InstanceID: mockedInstanceIdentityDoc.InstanceID, }, }, @@ -98,7 +98,7 @@ func TestSetInstanceIdAndRegion(t *testing.T) { for _, tt := range tests { logger, _ := zap.NewDevelopment() t.Run(tt.name, func(t *testing.T) { - ei := &ec2Info{ + ei := &EC2Info{ metadataProvider: tt.args.metadataProvider, logger: logger, } @@ -119,7 +119,7 @@ func TestRetrieveASGName(t *testing.T) { name string args args wantErr bool - want ec2Info + want EC2Info }{ { name: "happy path", @@ -128,7 +128,7 @@ func TestRetrieveASGName(t *testing.T) { metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName", TagValue: tagVal3}, }, wantErr: false, - want: ec2Info{ + want: EC2Info{ AutoScalingGroup: tagVal3, }, }, @@ -139,7 +139,7 @@ func TestRetrieveASGName(t *testing.T) { metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName\nenv\nname", TagValue: tagVal3}, }, wantErr: false, - want: ec2Info{ + want: EC2Info{ AutoScalingGroup: tagVal3, }, }, @@ -150,7 +150,7 @@ func TestRetrieveASGName(t *testing.T) { metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "name", TagValue: tagVal3}, }, wantErr: false, - want: ec2Info{ + want: EC2Info{ AutoScalingGroup: "", }, }, @@ -158,7 +158,7 @@ func TestRetrieveASGName(t *testing.T) { for _, tt := range tests { logger, _ := zap.NewDevelopment() t.Run(tt.name, func(t *testing.T) { - ei := &ec2Info{metadataProvider: tt.args.metadataProvider, logger: logger} + ei := &EC2Info{metadataProvider: tt.args.metadataProvider, logger: logger} if err := ei.retrieveAsgName(tt.args.ec2Client); (err != nil) != tt.wantErr { t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) } @@ -175,7 +175,7 @@ func TestRetrieveASGNameWithDescribeTags(t *testing.T) { name string args args wantErr bool - want ec2Info + want EC2Info }{ { name: "happy path", @@ -183,7 +183,7 @@ func TestRetrieveASGNameWithDescribeTags(t *testing.T) { ec2Client: &mockEC2Client{withASG: true}, }, wantErr: false, - want: ec2Info{ + want: EC2Info{ AutoScalingGroup: tagVal3, }, }, @@ -193,7 +193,7 @@ func TestRetrieveASGNameWithDescribeTags(t *testing.T) { ec2Client: &mockEC2Client{withASG: false}, }, wantErr: false, - want: ec2Info{ + want: EC2Info{ AutoScalingGroup: "", }, }, @@ -201,7 +201,7 @@ func TestRetrieveASGNameWithDescribeTags(t *testing.T) { for _, tt := range tests { logger, _ := zap.NewDevelopment() t.Run(tt.name, func(t *testing.T) { - ei := &ec2Info{logger: logger} + ei := &EC2Info{logger: logger} if err := ei.retrieveAsgNameWithDescribeTags(tt.args.ec2Client); (err != nil) != tt.wantErr { t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) } @@ -218,12 +218,12 @@ func TestIgnoreInvalidFields(t *testing.T) { } tests := []struct { name string - args *ec2Info + args *EC2Info want want }{ { name: "Happy path", - args: &ec2Info{ + args: &EC2Info{ InstanceID: "i-01d2417c27a396e44", AutoScalingGroup: "asg", logger: logger, @@ -235,7 +235,7 @@ func TestIgnoreInvalidFields(t *testing.T) { }, { name: "InstanceId too large", - args: &ec2Info{ + args: &EC2Info{ InstanceID: strings.Repeat("a", 20), AutoScalingGroup: "asg", logger: logger, @@ -247,7 +247,7 @@ func TestIgnoreInvalidFields(t *testing.T) { }, { name: "AutoScalingGroup too large", - args: &ec2Info{ + args: &EC2Info{ InstanceID: "i-01d2417c27a396e44", AutoScalingGroup: strings.Repeat("a", 256), logger: logger, @@ -274,14 +274,14 @@ func TestLogMessageDoesNotIncludeResourceInfo(t *testing.T) { tests := []struct { name string args args - want ec2Info + want EC2Info }{ { name: "AutoScalingGroupWithDescribeTags", args: args{ metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, InstanceTagError: true}, }, - want: ec2Info{ + want: EC2Info{ InstanceID: mockedInstanceIdentityDoc.InstanceID, }, }, @@ -290,7 +290,7 @@ func TestLogMessageDoesNotIncludeResourceInfo(t *testing.T) { args: args{ metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName", TagValue: tagVal3}, }, - want: ec2Info{ + want: EC2Info{ InstanceID: mockedInstanceIdentityDoc.InstanceID, }, }, @@ -308,7 +308,7 @@ func TestLogMessageDoesNotIncludeResourceInfo(t *testing.T) { logger := zap.New(core) done := make(chan struct{}) - ei := &ec2Info{ + ei := &EC2Info{ metadataProvider: tt.args.metadataProvider, ec2Provider: mockEC2Provider, logger: logger, diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 9809772a07..0eac021944 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -59,7 +59,7 @@ type EntityStore struct { // ec2Info stores information about EC2 instances such as instance ID and // auto scaling groups - ec2Info ec2Info + ec2Info EC2Info // eksInfo stores information about EKS such as pod to service Env map eksInfo *eksInfo @@ -126,7 +126,7 @@ func (e *EntityStore) EKSInfo() *eksInfo { return e.eksInfo } -func (e *EntityStore) EC2Info() ec2Info { +func (e *EntityStore) EC2Info() EC2Info { return e.ec2Info } diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index f8f6e3dc1e..163b481434 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -104,16 +104,16 @@ func (m *mockMetadataProvider) InstanceTagValue(ctx context.Context, tagKey stri func TestEntityStore_EC2Info(t *testing.T) { tests := []struct { name string - ec2InfoInput ec2Info - want ec2Info + ec2InfoInput EC2Info + want EC2Info }{ { name: "happypath", - ec2InfoInput: ec2Info{ + ec2InfoInput: EC2Info{ InstanceID: "i-1234567890", AutoScalingGroup: "test-asg", }, - want: ec2Info{ + want: EC2Info{ InstanceID: "i-1234567890", AutoScalingGroup: "test-asg", }, @@ -175,7 +175,7 @@ func TestEntityStore_KubernetesMode(t *testing.T) { func TestEntityStore_createAttributeMaps(t *testing.T) { type fields struct { - ec2Info ec2Info + ec2Info EC2Info mode string } tests := []struct { @@ -186,7 +186,7 @@ func TestEntityStore_createAttributeMaps(t *testing.T) { { name: "HappyPath", fields: fields{ - ec2Info: ec2Info{ + ec2Info: EC2Info{ InstanceID: "i-123456789", AutoScalingGroup: "test-asg", }, @@ -201,7 +201,7 @@ func TestEntityStore_createAttributeMaps(t *testing.T) { { name: "HappyPath_AsgMissing", fields: fields{ - ec2Info: ec2Info{ + ec2Info: EC2Info{ InstanceID: "i-123456789", }, mode: config.ModeEC2, @@ -223,7 +223,7 @@ func TestEntityStore_createAttributeMaps(t *testing.T) { { name: "NonEC2", fields: fields{ - ec2Info: ec2Info{ + ec2Info: EC2Info{ InstanceID: "i-123456789", AutoScalingGroup: "test-asg", }, @@ -297,7 +297,7 @@ func TestEntityStore_createLogFileRID(t *testing.T) { sp.On("logFileServiceAttribute", glob, group).Return(serviceAttr) e := EntityStore{ mode: config.ModeEC2, - ec2Info: ec2Info{InstanceID: instanceId}, + ec2Info: EC2Info{InstanceID: instanceId}, serviceprovider: sp, metadataprovider: mockMetadataProviderWithAccountId(accountId), stsClient: &mockSTSClient{accountId: accountId}, @@ -487,7 +487,7 @@ func TestEntityStore_GetMetricServiceNameSource(t *testing.T) { sp := new(mockServiceProvider) e := EntityStore{ mode: config.ModeEC2, - ec2Info: ec2Info{InstanceID: instanceId}, + ec2Info: EC2Info{InstanceID: instanceId}, serviceprovider: sp, metadataprovider: mockMetadataProviderWithAccountId(accountId), stsClient: &mockSTSClient{accountId: accountId}, diff --git a/extension/entitystore/serviceprovider.go b/extension/entitystore/serviceprovider.go index 33516edb2d..5c50405dc7 100644 --- a/extension/entitystore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -63,7 +63,7 @@ type LogFileGlob string type serviceprovider struct { mode string - ec2Info *ec2Info + ec2Info *EC2Info metadataProvider ec2metadataprovider.MetadataProvider ec2API ec2iface.EC2API ec2Provider ec2ProviderType @@ -303,7 +303,7 @@ func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { return tagFilters, nil } -func newServiceProvider(mode string, region string, ec2Info *ec2Info, metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) serviceProviderInterface { +func newServiceProvider(mode string, region string, ec2Info *EC2Info, metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) serviceProviderInterface { return &serviceprovider{ mode: mode, region: region, diff --git a/extension/entitystore/serviceprovider_test.go b/extension/entitystore/serviceprovider_test.go index 55961250a2..f63930f07f 100644 --- a/extension/entitystore/serviceprovider_test.go +++ b/extension/entitystore/serviceprovider_test.go @@ -202,10 +202,10 @@ func Test_serviceprovider_serviceAttributeFromAsg(t *testing.T) { s := &serviceprovider{} assert.Equal(t, ServiceAttribute{}, s.serviceAttributeFromAsg()) - s = &serviceprovider{ec2Info: &ec2Info{}} + s = &serviceprovider{ec2Info: &EC2Info{}} assert.Equal(t, ServiceAttribute{}, s.serviceAttributeFromAsg()) - s = &serviceprovider{ec2Info: &ec2Info{AutoScalingGroup: "test-asg"}} + s = &serviceprovider{ec2Info: &EC2Info{AutoScalingGroup: "test-asg"}} assert.Equal(t, ServiceAttribute{Environment: "ec2:test-asg"}, s.serviceAttributeFromAsg()) } @@ -230,7 +230,7 @@ func Test_serviceprovider_logFileServiceAttribute(t *testing.T) { assert.Equal(t, ServiceAttribute{ServiceName: ServiceNameUnknown, ServiceNameSource: ServiceNameSourceUnknown, Environment: "ec2:default"}, s.logFileServiceAttribute("glob", "group")) - s.ec2Info = &ec2Info{AutoScalingGroup: "test-asg"} + s.ec2Info = &EC2Info{AutoScalingGroup: "test-asg"} assert.Equal(t, ServiceAttribute{ServiceName: ServiceNameUnknown, ServiceNameSource: ServiceNameSourceUnknown, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) s.iamRole = "test-role" diff --git a/plugins/processors/awsentity/config.go b/plugins/processors/awsentity/config.go index fdf9966df6..54c662a088 100644 --- a/plugins/processors/awsentity/config.go +++ b/plugins/processors/awsentity/config.go @@ -20,6 +20,9 @@ type Config struct { KubernetesMode string `mapstructure:"kubernetes_mode,omitempty"` // Specific Mode agent is running on (i.e. EC2, EKS, ECS etc) Platform string `mapstructure:"platform,omitempty"` + // EntityType determines the type of entity processing done for + // telemetry. Possible values are Service and Resource + EntityType string `mapstructure:"entity_type,omitempty"` } // Verify Config implements Processor interface. diff --git a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go index b92d8c72c1..49e73281c0 100644 --- a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go @@ -4,7 +4,18 @@ package entityattributes const ( - AWSEntityPrefix = "com.amazonaws.cloudwatch.entity.internal." + // The following are the possible values for EntityType config options + Resource = "Resource" + Service = "Service" + + // The following are entity related attributes + AWSEntityPrefix = "com.amazonaws.cloudwatch.entity.internal." + AttributeEntityType = AWSEntityPrefix + "type" + AttributeEntityAWSResource = "AWS::Resource" + AttributeEntityResourceType = AWSEntityPrefix + "resource.type" + AttributeEntityEC2InstanceResource = "AWS::EC2::Instance" + AttributeEntityIdentifier = AWSEntityPrefix + "identifier" + AttributeEntityServiceName = AWSEntityPrefix + "service.name" AttributeEntityDeploymentEnvironment = AWSEntityPrefix + "deployment.environment" AttributeEntityCluster = AWSEntityPrefix + "k8s.cluster.name" diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index c21347bd6a..bd95d4b04a 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -58,6 +58,15 @@ var getMetricAttributesFromEntityStore = func() map[string]*string { return es.GetServiceMetricAttributesMap() } +var getEC2InfoFromEntityStore = func() entitystore.EC2Info { + es := entitystore.GetEntityStore() + if es == nil { + return entitystore.EC2Info{} + } + + return es.EC2Info() +} + var getServiceNameSource = func() (string, string) { es := entitystore.GetEntityStore() if es == nil { @@ -105,77 +114,88 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric p.k8sscraper.Scrape(rm.At(i).Resource()) } resourceAttrs := rm.At(i).Resource().Attributes() - logGroupNames, _ := resourceAttrs.Get(attributeAwsLogGroupNames) - serviceName, _ := resourceAttrs.Get(attributeServiceName) - environmentName, _ := resourceAttrs.Get(attributeDeploymentEnvironment) - if serviceNameSource, sourceExists := resourceAttrs.Get(entityattributes.AttributeEntityServiceNameSource); sourceExists { - entityServiceNameSource = serviceNameSource.Str() - } - - entityServiceName := getServiceAttributes(resourceAttrs) - entityEnvironmentName := environmentName.Str() - if (entityServiceName == EMPTY || entityEnvironmentName == EMPTY) && p.config.ScrapeDatapointAttribute { - entityServiceName, entityEnvironmentName = p.scrapeServiceAttribute(rm.At(i).ScopeMetrics()) - // If the entityServiceNameSource is empty here, that means it was not configured via instrumentation - // If entityServiceName is a datapoint attribute, that means the service name is coming from the UserConfiguration source - if entityServiceNameSource == EMPTY && entityServiceName != EMPTY { - entityServiceNameSource = attributeServiceNameSourceUserConfig - resourceAttrs.PutStr(entityattributes.AttributeEntityServiceNameSource, attributeServiceNameSourceUserConfig) + switch p.config.EntityType { + case entityattributes.Resource: + ec2Info := getEC2InfoFromEntityStore() + if p.config.Platform == config.ModeEC2 && ec2Info.InstanceID != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityType, entityattributes.AttributeEntityAWSResource) + resourceAttrs.PutStr(entityattributes.AttributeEntityResourceType, entityattributes.AttributeEntityEC2InstanceResource) + resourceAttrs.PutStr(entityattributes.AttributeEntityIdentifier, ec2Info.InstanceID) } - } - if entityServiceName != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityServiceName, entityServiceName) - } - if entityEnvironmentName != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) - } - if p.config.Platform == config.ModeEC2 { - //If entityServiceNameSource is empty, it was not configured via the config. Get the source in descending priority - // 1. Incoming telemetry attributes - // 2. CWA config - // 3. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app - // 4. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) - if entityServiceNameSource == EMPTY { - entityServiceName, entityServiceNameSource = getServiceNameSource() - resourceAttrs.PutStr(entityattributes.AttributeEntityServiceNameSource, entityServiceNameSource) + case entityattributes.Service: + logGroupNames, _ := resourceAttrs.Get(attributeAwsLogGroupNames) + serviceName, _ := resourceAttrs.Get(attributeServiceName) + environmentName, _ := resourceAttrs.Get(attributeDeploymentEnvironment) + if serviceNameSource, sourceExists := resourceAttrs.Get(entityattributes.AttributeEntityServiceNameSource); sourceExists { + entityServiceNameSource = serviceNameSource.Str() } - if platformType != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityPlatformType, platformType) + + entityServiceName := getServiceAttributes(resourceAttrs) + entityEnvironmentName := environmentName.Str() + if (entityServiceName == EMPTY || entityEnvironmentName == EMPTY) && p.config.ScrapeDatapointAttribute { + entityServiceName, entityEnvironmentName = p.scrapeServiceAttribute(rm.At(i).ScopeMetrics()) + // If the entityServiceNameSource is empty here, that means it was not configured via instrumentation + // If entityServiceName is a datapoint attribute, that means the service name is coming from the UserConfiguration source + if entityServiceNameSource == EMPTY && entityServiceName != EMPTY { + entityServiceNameSource = attributeServiceNameSourceUserConfig + resourceAttrs.PutStr(entityattributes.AttributeEntityServiceNameSource, attributeServiceNameSourceUserConfig) + } } - if instanceID != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityInstanceID, instanceID) + if entityServiceName != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityServiceName, entityServiceName) } - if autoScalingGroup != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityAutoScalingGroup, autoScalingGroup) + if entityEnvironmentName != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) } - } - if p.config.KubernetesMode != "" { - fallbackEnvironment := entityEnvironmentName - podInfo, ok := p.k8sscraper.(*k8sattributescraper.K8sAttributeScraper) - if fallbackEnvironment == EMPTY && p.config.KubernetesMode == config.ModeEKS && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { - fallbackEnvironment = "eks:" + p.config.ClusterName + "/" + podInfo.Namespace - } else if fallbackEnvironment == EMPTY && (p.config.KubernetesMode == config.ModeK8sEC2 || p.config.KubernetesMode == config.ModeK8sOnPrem) && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { - fallbackEnvironment = "k8s:" + p.config.ClusterName + "/" + podInfo.Namespace + if p.config.Platform == config.ModeEC2 { + //If entityServiceNameSource is empty, it was not configured via the config. Get the source in descending priority + // 1. Incoming telemetry attributes + // 2. CWA config + // 3. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app + // 4. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) + if entityServiceNameSource == EMPTY { + entityServiceName, entityServiceNameSource = getServiceNameSource() + resourceAttrs.PutStr(entityattributes.AttributeEntityServiceNameSource, entityServiceNameSource) + } + if platformType != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityPlatformType, platformType) + } + if instanceID != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityInstanceID, instanceID) + } + if autoScalingGroup != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityAutoScalingGroup, autoScalingGroup) + } + } + if p.config.KubernetesMode != "" { + fallbackEnvironment := entityEnvironmentName + podInfo, ok := p.k8sscraper.(*k8sattributescraper.K8sAttributeScraper) + if fallbackEnvironment == EMPTY && p.config.KubernetesMode == config.ModeEKS && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { + fallbackEnvironment = "eks:" + p.config.ClusterName + "/" + podInfo.Namespace + } else if fallbackEnvironment == EMPTY && (p.config.KubernetesMode == config.ModeK8sEC2 || p.config.KubernetesMode == config.ModeK8sOnPrem) && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { + fallbackEnvironment = "k8s:" + p.config.ClusterName + "/" + podInfo.Namespace + } + fullPodName := scrapeK8sPodName(resourceAttrs) + if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource != EMPTY { + addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entityServiceNameSource) + } else if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource == EMPTY { + addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entitystore.ServiceNameSourceUnknown) + } } - fullPodName := scrapeK8sPodName(resourceAttrs) - if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource != EMPTY { - addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entityServiceNameSource) - } else if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource == EMPTY { - addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entitystore.ServiceNameSourceUnknown) + p.k8sscraper.Reset() + if logGroupNames.Str() == EMPTY || (serviceName.Str() == EMPTY && environmentName.Str() == EMPTY) { + continue } - } - p.k8sscraper.Reset() - if logGroupNames.Str() == EMPTY || (serviceName.Str() == EMPTY && environmentName.Str() == EMPTY) { - continue - } - logGroupNamesSlice := strings.Split(logGroupNames.Str(), "&") - for _, logGroupName := range logGroupNamesSlice { - if logGroupName == EMPTY { - continue + logGroupNamesSlice := strings.Split(logGroupNames.Str(), "&") + for _, logGroupName := range logGroupNamesSlice { + if logGroupName == EMPTY { + continue + } + addToEntityStore(entitystore.LogGroupName(logGroupName), serviceName.Str(), environmentName.Str()) } - addToEntityStore(entitystore.LogGroupName(logGroupName), serviceName.Str(), environmentName.Str()) } + } return md, nil } diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 745926ad3b..5050caa655 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -70,9 +70,19 @@ func newMockGetServiceNameAndSource(service, source string) func() (string, stri } } +func newMockGetEC2InfoFromEntityStore(instance string) func() entitystore.EC2Info { + return func() entitystore.EC2Info { + return entitystore.EC2Info{ + InstanceID: instance, + } + } +} + func TestProcessMetricsLogGroupAssociation(t *testing.T) { logger, _ := zap.NewDevelopment() - p := newAwsEntityProcessor(&Config{}, logger) + p := newAwsEntityProcessor(&Config{ + EntityType: attributeService, + }, logger) ctx := context.Background() // empty metrics, no action @@ -157,7 +167,7 @@ func TestProcessMetricsLogGroupAssociation(t *testing.T) { func TestProcessMetricsForAddingPodToServiceMap(t *testing.T) { logger, _ := zap.NewDevelopment() - p := newAwsEntityProcessor(&Config{ClusterName: "test-cluster"}, logger) + p := newAwsEntityProcessor(&Config{ClusterName: "test-cluster", EntityType: attributeService}, logger) ctx := context.Background() tests := []struct { name string @@ -315,7 +325,7 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { if tt.mockGetMetricAttributesFromEntityStore != nil { getMetricAttributesFromEntityStore = tt.mockGetMetricAttributesFromEntityStore } - p := newAwsEntityProcessor(&Config{}, logger) + p := newAwsEntityProcessor(&Config{EntityType: attributeService}, logger) p.config.Platform = config.ModeEC2 _, err := p.processMetrics(ctx, tt.metrics) assert.NoError(t, err) @@ -329,6 +339,53 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { } } +func TestProcessMetricsResourceEntityProcessing(t *testing.T) { + logger, _ := zap.NewDevelopment() + ctx := context.Background() + tests := []struct { + name string + metrics pmetric.Metrics + want map[string]any + instance string + }{ + { + name: "EmptyMetrics", + metrics: pmetric.NewMetrics(), + want: map[string]any{}, + }, + { + name: "ResourceEntityEC2", + metrics: generateMetrics(), + instance: "i-123456789", + want: map[string]any{ + "com.amazonaws.cloudwatch.entity.internal.type": "AWS::Resource", + "com.amazonaws.cloudwatch.entity.internal.resource.type": "AWS::EC2::Instance", + "com.amazonaws.cloudwatch.entity.internal.identifier": "i-123456789", + }, + }, + { + name: "ResourceEntityEC2NoInstance", + metrics: generateMetrics(), + instance: "", + want: map[string]any{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + getEC2InfoFromEntityStore = newMockGetEC2InfoFromEntityStore(tt.instance) + p := newAwsEntityProcessor(&Config{EntityType: entityattributes.Resource}, logger) + p.config.Platform = config.ModeEC2 + _, err := p.processMetrics(ctx, tt.metrics) + assert.NoError(t, err) + rm := tt.metrics.ResourceMetrics() + if rm.Len() > 0 { + assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) + } + }) + } +} + func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() @@ -387,7 +444,7 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { if tt.mockGetMetricAttributesFromEntityStore != nil { getMetricAttributesFromEntityStore = tt.mockGetMetricAttributesFromEntityStore } - p := newAwsEntityProcessor(&Config{ScrapeDatapointAttribute: true}, logger) + p := newAwsEntityProcessor(&Config{ScrapeDatapointAttribute: true, EntityType: attributeService}, logger) p.config.Platform = config.ModeEC2 _, err := p.processMetrics(ctx, tt.metrics) assert.NoError(t, err) diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index 30359e7c85..0d263f3231 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: @@ -77,7 +78,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_disk @@ -89,7 +90,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index bd8d7e88ae..c8bfcc520f 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: @@ -85,7 +86,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_cpu @@ -99,7 +100,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index ac7fbc286f..30a234e297 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -78,7 +79,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_win_perf_counters/2039663244 diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index c8ba47c880..046ae18f0b 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-east-1 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -51,7 +52,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_mem diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index 2ca73c3375..af56afd903 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -53,7 +54,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_win_perf_counters/1492679118 diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index ecfcdceca0..b9506b7381 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/service: + entity_type: Service scrape_datapoint_attribute: true platform: ec2 receivers: @@ -34,11 +35,11 @@ service: - agenthealth/metrics - entitystore pipelines: - metrics/host: + metrics/hostCustomMetrics: exporters: - awscloudwatch processors: - - awsentity + - awsentity/service receivers: - telegraf_socket_listener telemetry: diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml index 5a2eccf32a..95bab71eeb 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml @@ -32,7 +32,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/service: + entity_type: Service scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -58,11 +59,11 @@ service: - agenthealth/metrics - entitystore pipelines: - metrics/host: + metrics/hostCustomMetrics: exporters: - awscloudwatch processors: - - awsentity + - awsentity/service - ec2tagger receivers: - telegraf_socket_listener diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index 4f032c0c92..a50514557a 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -95,7 +95,12 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource + scrape_datapoint_attribute: true + platform: ec2 + awsentity/service: + entity_type: Service scrape_datapoint_attribute: true platform: ec2 batch/emf_logs: @@ -267,24 +272,32 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger - transform receivers: - telegraf_procstat/1917393364 - - telegraf_socket_listener - telegraf_cpu - telegraf_mem - telegraf_netstat - telegraf_processes - - telegraf_statsd - telegraf_swap - telegraf_disk + metrics/hostCustomMetrics: + exporters: + - awscloudwatch + processors: + - awsentity/service + - ec2tagger + - transform + receivers: + - telegraf_socket_listener + - telegraf_statsd metrics/hostDeltaMetrics: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - cumulativetodelta/hostDeltaMetrics - ec2tagger - transform diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index 41456c9b94..c6203a2e74 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -100,9 +100,14 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 + awsentity/service: + entity_type: Service + scrape_datapoint_attribute: true + platform: ec2 batch/emf_logs: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -373,24 +378,32 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger - transform receivers: - telegraf_mem - telegraf_netstat - - telegraf_socket_listener - - telegraf_statsd - telegraf_procstat/1917393364 - telegraf_swap - telegraf_cpu - telegraf_disk - telegraf_processes + metrics/hostCustomMetrics: + exporters: + - awscloudwatch + processors: + - awsentity/service + - ec2tagger + - transform + receivers: + - telegraf_socket_listener + - telegraf_statsd metrics/hostDeltaMetrics: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - cumulativetodelta/hostDeltaMetrics - ec2tagger - transform diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index 9b4bc107d7..918a9f5d70 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -95,7 +95,12 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource + scrape_datapoint_attribute: true + platform: ec2 + awsentity/service: + entity_type: Service scrape_datapoint_attribute: true platform: ec2 batch/emf_logs: @@ -254,7 +259,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger - transform receivers: @@ -262,11 +267,19 @@ service: - telegraf_nvidia_smi - telegraf_win_perf_counters/4283769065 - telegraf_win_perf_counters/3081249416 - - telegraf_statsd - telegraf_win_perf_counters/1492679118 - telegraf_win_perf_counters/2402387132 - telegraf_win_perf_counters/3762679655 - telegraf_procstat/1340600742 + metrics/hostCustomMetrics: + exporters: + - awscloudwatch + processors: + - awsentity/service + - ec2tagger + - transform + receivers: + - telegraf_statsd traces/xray: exporters: - awsxray diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index 53498d833c..63eecf2382 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-east-1 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: @@ -71,7 +72,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - cumulativetodelta/hostDeltaMetrics - ec2tagger - transform diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index b85e0e47a1..d7eeffb216 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-east-1 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: @@ -54,7 +55,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index 219fd3d93a..1a73d782ca 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -26,7 +26,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -71,7 +72,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger - transform receivers: diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index 96989cc17e..b484ce567a 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-east-1 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -45,7 +46,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_disk diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index c8ba47c880..046ae18f0b 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-east-1 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -51,7 +52,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_mem diff --git a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml index e2c318a2b4..d3cfee9ce1 100644 --- a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml @@ -24,7 +24,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/jmx: @@ -112,7 +113,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - transform receivers: - telegraf_disk diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index 68eea45ead..616d7589dc 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: @@ -72,7 +73,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_cpu @@ -83,7 +84,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index 8c82bf9553..ff82b2f699 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -25,7 +25,8 @@ extensions: region: us-west-2 shared_credential_file: fake-path processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: @@ -79,7 +80,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_cpu @@ -90,7 +91,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - cumulativetodelta/hostDeltaMetrics - ec2tagger receivers: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index 966aa73064..682831e467 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -67,7 +68,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_win_perf_counters/4283769065 diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index 0b99767a19..fd788a5c17 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -25,7 +25,8 @@ extensions: region: us-west-2 shared_credential_file: fake-path processors: - awsentity: + awsentity/resource: + entity_type: Resource scrape_datapoint_attribute: true platform: ec2 ec2tagger: @@ -74,7 +75,7 @@ service: exporters: - awscloudwatch processors: - - awsentity + - awsentity/resource - ec2tagger receivers: - telegraf_win_perf_counters/3762679655 diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index a45eb107ee..88980baa0d 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/service: + entity_type: Service scrape_datapoint_attribute: true platform: ec2 receivers: @@ -34,11 +35,11 @@ service: - agenthealth/metrics - entitystore pipelines: - metrics/host: + metrics/hostCustomMetrics: exporters: - awscloudwatch processors: - - awsentity + - awsentity/service receivers: - telegraf_statsd telemetry: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index d05b9a0c4e..906f053a31 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -21,7 +21,8 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity: + awsentity/service: + entity_type: Service scrape_datapoint_attribute: true platform: ec2 receivers: @@ -34,11 +35,11 @@ service: - agenthealth/metrics - entitystore pipelines: - metrics/host: + metrics/hostCustomMetrics: exporters: - awscloudwatch processors: - - awsentity + - awsentity/service receivers: - telegraf_statsd telemetry: diff --git a/translator/translate/otel/common/common.go b/translator/translate/otel/common/common.go index 3bb7f49085..71d8dd1d85 100644 --- a/translator/translate/otel/common/common.go +++ b/translator/translate/otel/common/common.go @@ -74,6 +74,7 @@ const ( const ( CollectDMetricKey = "collectd" + CollectDPluginKey = "socket_listener" CPUMetricKey = "cpu" DiskMetricKey = "disk" DiskIoMetricKey = "diskio" @@ -98,13 +99,14 @@ const ( ) const ( - PipelineNameHost = "host" - PipelineNameHostDeltaMetrics = "hostDeltaMetrics" - PipelineNameJmx = "jmx" - PipelineNameEmfLogs = "emf_logs" - AppSignals = "application_signals" - AppSignalsFallback = "app_signals" - AppSignalsRules = "rules" + PipelineNameHost = "host" + PipelineNameHostCustomMetrics = "hostCustomMetrics" + PipelineNameHostDeltaMetrics = "hostDeltaMetrics" + PipelineNameJmx = "jmx" + PipelineNameEmfLogs = "emf_logs" + AppSignals = "application_signals" + AppSignalsFallback = "app_signals" + AppSignalsRules = "rules" ) var ( diff --git a/translator/translate/otel/pipeline/host/translator.go b/translator/translate/otel/pipeline/host/translator.go index e4faf97f1a..66763ab76f 100644 --- a/translator/translate/otel/pipeline/host/translator.go +++ b/translator/translate/otel/pipeline/host/translator.go @@ -64,10 +64,16 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, log.Printf("D! pipeline %s has no receivers", t.name) return nil, nil } + var entityProcessor common.Translator[component.Config] + if common.PipelineNameHost == t.name || common.PipelineNameHostDeltaMetrics == t.name { + entityProcessor = awsentity.NewTranslatorWithEntityType(awsentity.Resource) + } else { + entityProcessor = awsentity.NewTranslatorWithEntityType(awsentity.Service) + } translators := common.ComponentTranslators{ Receivers: t.receivers, - Processors: common.NewTranslatorMap(awsentity.NewTranslator()), + Processors: common.NewTranslatorMap(entityProcessor), Exporters: common.NewTranslatorMap(awscloudwatch.NewTranslator()), Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeMetrics, []string{agenthealth.OperationPutMetricData})), } diff --git a/translator/translate/otel/pipeline/host/translator_test.go b/translator/translate/otel/pipeline/host/translator_test.go index 4ed90b8270..3ec48c72f0 100644 --- a/translator/translate/otel/pipeline/host/translator_test.go +++ b/translator/translate/otel/pipeline/host/translator_test.go @@ -59,7 +59,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, - processors: []string{"awsentity"}, + processors: []string{"awsentity/resource"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, @@ -76,7 +76,24 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/hostDeltaMetrics", receivers: []string{"nop", "other"}, - processors: []string{"awsentity", "cumulativetodelta/hostDeltaMetrics"}, + processors: []string{"awsentity/resource", "cumulativetodelta/hostDeltaMetrics"}, + exporters: []string{"awscloudwatch"}, + extensions: []string{"agenthealth/metrics"}, + }, + }, + "WithMetricsKeyStatsD": { + input: map[string]interface{}{ + "metrics": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "statsd": map[string]interface{}{}, + }, + }, + }, + pipelineName: common.PipelineNameHostCustomMetrics, + want: &want{ + pipelineID: "metrics/hostCustomMetrics", + receivers: []string{"nop", "other"}, + processors: []string{"awsentity/service"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, @@ -100,7 +117,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, - processors: []string{"awsentity", "transform"}, + processors: []string{"awsentity/resource", "transform"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, @@ -121,7 +138,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, - processors: []string{"awsentity"}, + processors: []string{"awsentity/resource"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, diff --git a/translator/translate/otel/pipeline/host/translators.go b/translator/translate/otel/pipeline/host/translators.go index 11f2ad6217..4b8e94ecec 100644 --- a/translator/translate/otel/pipeline/host/translators.go +++ b/translator/translate/otel/pipeline/host/translators.go @@ -25,20 +25,27 @@ func NewTranslators(conf *confmap.Conf, os string) (pipeline.TranslatorMap, erro // split out delta receiver types deltaReceivers := common.NewTranslatorMap[component.Config]() hostReceivers := common.NewTranslatorMap[component.Config]() + hostCustomReceivers := common.NewTranslatorMap[component.Config]() adapterReceivers.Range(func(translator common.Translator[component.Config]) { if translator.ID().Type() == adapter.Type(common.DiskIOKey) || translator.ID().Type() == adapter.Type(common.NetKey) { deltaReceivers.Set(translator) + } else if translator.ID().Type() == adapter.Type(common.StatsDMetricKey) || translator.ID().Type() == adapter.Type(common.CollectDPluginKey) { + hostCustomReceivers.Set(translator) } else { hostReceivers.Set(translator) } }) hasHostPipeline := hostReceivers.Len() != 0 + hasHostCustomPipeline := hostCustomReceivers.Len() != 0 hasDeltaPipeline := deltaReceivers.Len() != 0 if hasHostPipeline { translators.Set(NewTranslator(common.PipelineNameHost, hostReceivers)) } + if hasHostCustomPipeline { + translators.Set(NewTranslator(common.PipelineNameHostCustomMetrics, hostCustomReceivers)) + } if hasDeltaPipeline { translators.Set(NewTranslator(common.PipelineNameHostDeltaMetrics, deltaReceivers)) } diff --git a/translator/translate/otel/pipeline/host/translators_test.go b/translator/translate/otel/pipeline/host/translators_test.go index 52ada63de8..88c03a6792 100644 --- a/translator/translate/otel/pipeline/host/translators_test.go +++ b/translator/translate/otel/pipeline/host/translators_test.go @@ -57,6 +57,20 @@ func TestTranslators(t *testing.T) { }, }, }, + "WithCustomMetrics": { + input: map[string]interface{}{ + "metrics": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "statsd": map[string]interface{}{}, + }, + }, + }, + want: map[string]want{ + "metrics/hostCustomMetrics": { + receivers: []string{"telegraf_statsd"}, + }, + }, + }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go index 17f9a50d5e..7e275e790b 100644 --- a/translator/translate/otel/processor/awsentity/translator.go +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -4,6 +4,8 @@ package awsentity import ( + "strings" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/processor" @@ -16,10 +18,16 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) -const name = "awsentity" +const ( + name = "awsentity" + Service = "Service" + Resource = "Resource" +) type translator struct { - factory processor.Factory + factory processor.Factory + entityType string + name string } func NewTranslator() common.Translator[component.Config] { @@ -28,13 +36,25 @@ func NewTranslator() common.Translator[component.Config] { } } +func NewTranslatorWithEntityType(entityType string) common.Translator[component.Config] { + return &translator{ + factory: awsentity.NewFactory(), + entityType: entityType, + name: strings.ToLower(entityType), + } +} + func (t *translator) ID() component.ID { - return component.NewIDWithName(t.factory.Type(), "") + return component.NewIDWithName(t.factory.Type(), t.name) } func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*awsentity.Config) + if t.entityType != "" { + cfg.EntityType = t.entityType + } + if common.TelegrafMetricsEnabled(conf) { cfg.ScrapeDatapointAttribute = true } From b2fa315522a7b893d53e32382a614a4c64aaab32 Mon Sep 17 00:00:00 2001 From: Lisa Guo Date: Fri, 27 Sep 2024 16:23:59 -0400 Subject: [PATCH 19/47] [Prometheus] Add relabel configs for pod service discovery to ingest k8s node, namespace, and pod entity data (#814) --- plugins/inputs/prometheus/start.go | 33 +++++++++++++++ .../k8sattributescraper.go | 21 ++++++++++ .../k8sattributescraper_test.go | 40 +++++++++++++++++++ 3 files changed, 94 insertions(+) diff --git a/plugins/inputs/prometheus/start.go b/plugins/inputs/prometheus/start.go index d5b81e1fe5..b3538eb73c 100644 --- a/plugins/inputs/prometheus/start.go +++ b/plugins/inputs/prometheus/start.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/install" + "github.com/prometheus/prometheus/discovery/kubernetes" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" @@ -286,6 +287,11 @@ const ( savedScrapeInstanceLabel = "cwagent_saved_scrape_instance" scrapeInstanceLabel = "__address__" savedScrapeNameLabel = "cwagent_saved_scrape_name" // just arbitrary name that end user won't override in relabel config + + // Labels for Entity population + EntityK8sPodLabel = "cwagent_entity_k8s_pod_name" + EntityK8sNamespaceLabel = "cwagent_entity_k8s_namespace" + EntityK8sNodeLabel = "cwagent_entity_K8s_node" ) func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config) error) (err error) { @@ -331,6 +337,33 @@ func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config }, } + for _, sdc := range sc.ServiceDiscoveryConfigs { + if sdc.(*kubernetes.SDConfig).Role == kubernetes.RolePod { + relabelConfigs = append(relabelConfigs, []*relabel.Config{ + { + Action: relabel.Replace, + Regex: relabel.MustNewRegexp("(.*)"), + Replacement: "$1", + SourceLabels: model.LabelNames{"__meta_kubernetes_pod_name"}, + TargetLabel: EntityK8sPodLabel, + }, + { + Action: relabel.Replace, + Regex: relabel.MustNewRegexp("(.*)"), + Replacement: "$1", + SourceLabels: model.LabelNames{"__meta_kubernetes_namespace"}, + TargetLabel: EntityK8sNamespaceLabel, + }, + { + Action: relabel.Replace, + Regex: relabel.MustNewRegexp("(.*)"), + Replacement: "$1", + SourceLabels: model.LabelNames{"__meta_kubernetes_pod_node_name"}, + TargetLabel: EntityK8sNodeLabel, + }, + }...) + } + } level.Info(logger).Log("msg", "Add extra relabel_configs and metric_relabel_configs to save job, instance and __name__ before user relabel") sc.RelabelConfigs = append(relabelConfigs, sc.RelabelConfigs...) diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go index 151332024a..597e48fc5b 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go @@ -7,6 +7,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/prometheus" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" ) @@ -45,12 +46,32 @@ func NewK8sAttributeScraper(clusterName string) *K8sAttributeScraper { func (e *K8sAttributeScraper) Scrape(rm pcommon.Resource) { resourceAttrs := rm.Attributes() + e.relabelPrometheus(resourceAttrs) + e.scrapeNamespace(resourceAttrs) e.scrapeWorkload(resourceAttrs) e.scrapeNode(resourceAttrs) e.decorateEntityAttributes(resourceAttrs) } +func (e *K8sAttributeScraper) relabelPrometheus(p pcommon.Map) { + // TODO: Retrieve workload from pod label + if podName, exists := p.Get(prometheus.EntityK8sPodLabel); exists { + p.PutStr(semconv.AttributeK8SPodName, podName.Str()) + p.Remove(prometheus.EntityK8sPodLabel) + } + + if namespace, exists := p.Get(prometheus.EntityK8sNamespaceLabel); exists { + p.PutStr(semconv.AttributeK8SNamespaceName, namespace.Str()) + p.Remove(prometheus.EntityK8sNamespaceLabel) + } + + if nodeName, exists := p.Get(prometheus.EntityK8sNodeLabel); exists { + p.PutStr(semconv.AttributeK8SNodeName, nodeName.Str()) + p.Remove(prometheus.EntityK8sNodeLabel) + } +} + func (e *K8sAttributeScraper) scrapeNamespace(p pcommon.Map) { for _, namespace := range namespaceAllowlist { if namespaceAttr, ok := p.Get(namespace); ok { diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go index 2e69862b0a..27e1e7e528 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go @@ -4,6 +4,7 @@ package k8sattributescraper import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -11,6 +12,7 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/prometheus" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" ) @@ -319,6 +321,44 @@ func Test_k8sattributescraper_scrapeWorkload(t *testing.T) { } } +func TestK8sAttributeScraper_relabelPrometheus(t *testing.T) { + tests := []struct { + name string + attributes pcommon.Map + want pcommon.Map + }{ + { + name: "PrometheusPod", + attributes: getAttributeMap(map[string]any{ + prometheus.EntityK8sPodLabel: "test-pod", + prometheus.EntityK8sNamespaceLabel: "test-namespace", + prometheus.EntityK8sNodeLabel: "test-node", + }), + want: getAttributeMap(map[string]any{ + semconv.AttributeK8SPodName: "test-pod", + semconv.AttributeK8SNamespaceName: "test-namespace", + semconv.AttributeK8SNodeName: "test-node", + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &K8sAttributeScraper{} + e.relabelPrometheus(tt.attributes) + assert.Equal(t, tt.attributes.Len(), tt.want.Len()) + tt.want.Range(func(k string, v pcommon.Value) bool { + actualValue, exists := tt.attributes.Get(k) + if !exists { + assert.Fail(t, fmt.Sprintf("%s does not exist in the attribute map", k)) + return false + } + assert.Equal(t, actualValue.Str(), v.Str()) + return true + }) + }) + } +} + func getAttributeMap(attributes map[string]any) pcommon.Map { attrMap := pcommon.NewMap() attrMap.FromRaw(attributes) From 35fa953cc16e7405acfd2f4cab8a075b3b650a81 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:15:50 -0400 Subject: [PATCH 20/47] Fix DescribeTags retry on high-frequency and delay refresh interval (#819) --- extension/entitystore/extension.go | 2 +- extension/entitystore/retryer.go | 103 +++++++++++++ extension/entitystore/retryer_test.go | 141 ++++++++++++++++++ extension/entitystore/serviceprovider.go | 81 +++------- extension/entitystore/serviceprovider_test.go | 71 ++------- plugins/processors/ec2tagger/constants.go | 1 + 6 files changed, 276 insertions(+), 123 deletions(-) create mode 100644 extension/entitystore/retryer.go create mode 100644 extension/entitystore/retryer_test.go diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 0eac021944..4928ee0000 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -104,7 +104,7 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { // Starting the ttl cache will automatically evict all expired pods from the map go e.StartPodToServiceEnvironmentMappingTtlCache(e.done) } - e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done) + e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done, e.logger) go e.serviceprovider.startServiceProvider() return nil } diff --git a/extension/entitystore/retryer.go b/extension/entitystore/retryer.go new file mode 100644 index 0000000000..cefa06d374 --- /dev/null +++ b/extension/entitystore/retryer.go @@ -0,0 +1,103 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package entitystore + +import ( + "math/rand" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "go.uber.org/zap" +) + +const ( + RequestLimitExceeded = "RequestLimitExceeded" + infRetry = -1 +) + +var ( + retryableErrorMap = map[string]bool{ + "RequestLimitExceeded": true, + } +) + +type Retryer struct { + oneTime bool + retryAnyError bool + successRetryMin int + successRetryMax int + backoffArray []time.Duration + maxRetry int + done chan struct{} + logger *zap.Logger +} + +func NewRetryer(onetime bool, retryAnyError bool, successRetryMin int, successRetryMax int, backoffArray []time.Duration, maxRetry int, done chan struct{}, logger *zap.Logger) *Retryer { + return &Retryer{ + oneTime: onetime, + retryAnyError: retryAnyError, + successRetryMin: successRetryMin, + successRetryMax: successRetryMax, + backoffArray: backoffArray, + maxRetry: maxRetry, + done: done, + logger: logger, + } +} + +func (r *Retryer) refreshLoop(updateFunc func() error) int { + // Offset retry by 1 so we can start with 1 minute wait time + // instead of immediately retrying + retry := 1 + for { + if r.maxRetry != -1 && retry > r.maxRetry { + return retry + } + err := updateFunc() + if err == nil && r.oneTime { + return retry + } else if awsErr, ok := err.(awserr.Error); ok && !r.retryAnyError && !retryableErrorMap[awsErr.Code()] { + return retry + } + + waitDuration := calculateWaitTime(retry-1, err, r.successRetryMin, r.successRetryMax, r.backoffArray) + wait := time.NewTimer(waitDuration) + select { + case <-r.done: + r.logger.Debug("Shutting down retryer") + wait.Stop() + return retry + case <-wait.C: + } + + if retry > 1 { + r.logger.Debug("attribute retrieval retry count", zap.Int("retry", retry-1)) + } + + if err != nil { + retry++ + r.logger.Debug("there was an error when retrieving service attribute.", zap.Error(err)) + } else { + retry = 1 + } + + } + return retry +} + +// calculateWaitTime returns different time based on whether if +// a function call was returned with error. If returned with error, +// follow exponential backoff wait time, otherwise, refresh with jitter +func calculateWaitTime(retry int, err error, successRetryMin int, successRetryMax int, backoffArray []time.Duration) time.Duration { + var waitDuration time.Duration + if err == nil { + return time.Duration(rand.Intn(successRetryMax-successRetryMin)+successRetryMin) * time.Second + } + if retry < len(backoffArray) { + waitDuration = backoffArray[retry] + } else { + waitDuration = backoffArray[len(backoffArray)-1] + } + return waitDuration +} diff --git a/extension/entitystore/retryer_test.go b/extension/entitystore/retryer_test.go new file mode 100644 index 0000000000..46521a36b7 --- /dev/null +++ b/extension/entitystore/retryer_test.go @@ -0,0 +1,141 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package entitystore + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" +) + +var ( + FastBackOffArray = []time.Duration{0, 0, 0} +) + +func TestRetryer_refreshLoop(t *testing.T) { + type fields struct { + metadataProvider ec2metadataprovider.MetadataProvider + ec2API ec2iface.EC2API + iamRole string + ec2TagServiceName string + oneTime bool + } + type expectedInfo struct { + iamRole string + ec2TagServiceName string + } + tests := []struct { + name string + fields fields + expectedInfo expectedInfo + }{ + { + name: "HappyPath_CorrectRefresh", + fields: fields{ + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789"}, + }, + ec2API: &mockServiceNameEC2Client{}, + iamRole: "original-role", + ec2TagServiceName: "original-tag-name", + }, + expectedInfo: expectedInfo{ + iamRole: "TestRole", + ec2TagServiceName: "test-service", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger, _ := zap.NewDevelopment() + done := make(chan struct{}) + s := &serviceprovider{ + metadataProvider: tt.fields.metadataProvider, + ec2API: tt.fields.ec2API, + ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { + return tt.fields.ec2API + }, + iamRole: tt.fields.iamRole, + ec2TagServiceName: tt.fields.ec2TagServiceName, + done: done, + } + limitedRetryer := NewRetryer(tt.fields.oneTime, false, describeTagsJitterMin, describeTagsJitterMax, ec2tagger.ThrottleBackOffArray, maxRetry, s.done, logger) + unlimitedRetryer := NewRetryer(tt.fields.oneTime, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, logger) + go limitedRetryer.refreshLoop(s.getEC2TagServiceName) + go unlimitedRetryer.refreshLoop(s.getIAMRole) + time.Sleep(time.Second) + close(done) + assert.Equal(t, tt.expectedInfo.iamRole, s.iamRole) + assert.Equal(t, tt.expectedInfo.ec2TagServiceName, s.ec2TagServiceName) + }) + } +} + +func TestRetryer_refreshLoopRetry(t *testing.T) { + type fields struct { + metadataProvider ec2metadataprovider.MetadataProvider + ec2API ec2iface.EC2API + oneTime bool + } + tests := []struct { + name string + fields fields + expectedRetry int + }{ + { + name: "ThrottleLimitError", + fields: fields{ + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789"}, + }, + ec2API: &mockServiceNameEC2Client{ + throttleError: true, + }, + }, + expectedRetry: 4, + }, + { + name: "AuthError", + fields: fields{ + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789"}, + }, + ec2API: &mockServiceNameEC2Client{ + authError: true, + }, + }, + expectedRetry: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logger, _ := zap.NewDevelopment() + done := make(chan struct{}) + s := &serviceprovider{ + metadataProvider: tt.fields.metadataProvider, + ec2API: tt.fields.ec2API, + ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { + return tt.fields.ec2API + }, + done: done, + } + limitedRetryer := NewRetryer(tt.fields.oneTime, false, describeTagsJitterMin, describeTagsJitterMax, FastBackOffArray, maxRetry, s.done, logger) + retry := limitedRetryer.refreshLoop(s.getEC2TagServiceName) + time.Sleep(time.Second) + close(done) + assert.Equal(t, tt.expectedRetry, retry) + }) + } +} diff --git a/extension/entitystore/serviceprovider.go b/extension/entitystore/serviceprovider.go index 5c50405dc7..ffc527feae 100644 --- a/extension/entitystore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -7,15 +7,13 @@ import ( "context" "errors" "fmt" - "log" - "math/rand" "strings" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + "go.uber.org/zap" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" @@ -40,8 +38,11 @@ const ( ServiceNameSourceUserConfiguration = "UserConfiguration" ServiceNameSourceK8sWorkload = "K8sWorkload" - jitterMax = 180 - jitterMin = 60 + describeTagsJitterMax = 3600 + describeTagsJitterMin = 3000 + defaultJitterMin = 60 + defaultJitterMax = 180 + maxRetry = 3 ) var ( @@ -72,7 +73,7 @@ type serviceprovider struct { ec2TagServiceName string region string done chan struct{} - + logger *zap.Logger // logFiles stores the service attributes that were configured for log files in CloudWatch Agent configuration. // Example: // "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {ServiceName: "cloudwatch-agent"} @@ -85,12 +86,15 @@ type serviceprovider struct { } func (s *serviceprovider) startServiceProvider() { + oneTimeRetryer := NewRetryer(true, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, s.logger) + unlimitedRetryer := NewRetryer(false, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, s.logger) + limitedRetryer := NewRetryer(false, false, describeTagsJitterMin, describeTagsJitterMax, ec2tagger.ThrottleBackOffArray, maxRetry, s.done, s.logger) err := s.getEC2Client() if err != nil { - go refreshLoop(s.done, s.getEC2Client, true) + go oneTimeRetryer.refreshLoop(s.getEC2Client) } - go refreshLoop(s.done, s.getIAMRole, false) - go refreshLoop(s.done, s.getEC2TagServiceName, false) + go unlimitedRetryer.refreshLoop(s.getIAMRole) + go limitedRetryer.refreshLoop(s.getEC2TagServiceName) } // addEntryForLogFile adds an association between a log file glob and a service attribute, as configured in the @@ -221,11 +225,11 @@ func (s *serviceprovider) serviceAttributeFallback() ServiceAttribute { func (s *serviceprovider) getIAMRole() error { iamRole, err := s.metadataProvider.InstanceProfileIAMRole() if err != nil { - return fmt.Errorf("failed to get instance profile role: %s", err) + return err } iamRoleArn, err := arn.Parse(iamRole) if err != nil { - return fmt.Errorf("failed to parse IAM Role Arn: %s", err) + return err } iamRoleResource := iamRoleArn.Resource if strings.HasPrefix(iamRoleResource, INSTANCE_PROFILE) { @@ -252,7 +256,7 @@ func (s *serviceprovider) getEC2TagServiceName() error { } result, err := s.ec2API.DescribeTags(input) if err != nil { - continue + return err } for _, tag := range result.Tags { key := *tag.Key @@ -303,7 +307,7 @@ func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { return tagFilters, nil } -func newServiceProvider(mode string, region string, ec2Info *EC2Info, metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}) serviceProviderInterface { +func newServiceProvider(mode string, region string, ec2Info *EC2Info, metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}, logger *zap.Logger) serviceProviderInterface { return &serviceprovider{ mode: mode, region: region, @@ -312,57 +316,8 @@ func newServiceProvider(mode string, region string, ec2Info *EC2Info, metadataPr ec2Provider: providerType, ec2Credential: ec2Credential, done: done, + logger: logger, logFiles: make(map[LogFileGlob]ServiceAttribute), logGroups: make(map[LogGroupName]ServiceAttribute), } } - -func refreshLoop(done chan struct{}, updateFunc func() error, oneTime bool) { - // Offset retry by 1 so we can start with 1 minute wait time - // instead of immediately retrying - retry := 1 - for { - err := updateFunc() - if err == nil && oneTime { - return - } - - waitDuration := calculateWaitTime(retry, err) - wait := time.NewTimer(waitDuration) - select { - case <-done: - log.Printf("D! serviceprovider: Shutting down now") - wait.Stop() - return - case <-wait.C: - } - - if retry > 1 { - log.Printf("D! serviceprovider: attribute retrieval retry count: %d", retry-1) - } - - if err != nil { - retry++ - log.Printf("D! serviceprovider: there was an error when retrieving service attribute. Reason: %s", err) - } else { - retry = 1 - } - - } -} - -// calculateWaitTime returns different time based on whether if -// a function call was returned with error. If returned with error, -// follow exponential backoff wait time, otherwise, refresh with jitter -func calculateWaitTime(retry int, err error) time.Duration { - var waitDuration time.Duration - if err == nil { - return time.Duration(rand.Intn(jitterMax-jitterMin)+jitterMin) * time.Second - } - if retry < len(ec2tagger.BackoffSleepArray) { - waitDuration = ec2tagger.BackoffSleepArray[retry] - } else { - waitDuration = ec2tagger.BackoffSleepArray[len(ec2tagger.BackoffSleepArray)-1] - } - return waitDuration -} diff --git a/extension/entitystore/serviceprovider_test.go b/extension/entitystore/serviceprovider_test.go index f63930f07f..70aa8da140 100644 --- a/extension/entitystore/serviceprovider_test.go +++ b/extension/entitystore/serviceprovider_test.go @@ -8,10 +8,12 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/stretchr/testify/assert" + "go.uber.org/zap" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" @@ -20,6 +22,8 @@ import ( type mockServiceNameEC2Client struct { ec2iface.EC2API + throttleError bool + authError bool } // construct the return results for the mocked DescribeTags api @@ -30,6 +34,12 @@ var ( ) func (m *mockServiceNameEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) { + if m.throttleError { + return nil, awserr.New(RequestLimitExceeded, "throttle limit exceeded", nil) + } + if m.authError { + return nil, awserr.New("UnauthorizedOperation", "UnauthorizedOperation occurred", nil) + } testTags := ec2.DescribeTagsOutput{ NextToken: nil, Tags: []*ec2.TagDescription{&tagDesService}, @@ -64,6 +74,7 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { done := make(chan struct{}) + logger, _ := zap.NewDevelopment() s := serviceprovider{ metadataProvider: tt.args.metadataProvider, ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { @@ -71,6 +82,7 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { }, ec2API: tt.args.ec2Client, done: done, + logger: logger, } go s.startServiceProvider() time.Sleep(3 * time.Second) @@ -380,62 +392,3 @@ func Test_serviceprovider_getEC2TagServiceName(t *testing.T) { }) } } - -func Test_refreshLoop(t *testing.T) { - type fields struct { - metadataProvider ec2metadataprovider.MetadataProvider - ec2API ec2iface.EC2API - iamRole string - ec2TagServiceName string - refreshInterval time.Duration - oneTime bool - } - type expectedInfo struct { - iamRole string - ec2TagServiceName string - } - tests := []struct { - name string - fields fields - expectedInfo expectedInfo - }{ - { - name: "HappyPath_CorrectRefresh", - fields: fields{ - metadataProvider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - InstanceID: "i-123456789"}, - }, - ec2API: &mockServiceNameEC2Client{}, - iamRole: "original-role", - ec2TagServiceName: "original-tag-name", - refreshInterval: time.Millisecond, - }, - expectedInfo: expectedInfo{ - iamRole: "TestRole", - ec2TagServiceName: "test-service", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - done := make(chan struct{}) - s := &serviceprovider{ - metadataProvider: tt.fields.metadataProvider, - ec2API: tt.fields.ec2API, - ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { - return tt.fields.ec2API - }, - iamRole: tt.fields.iamRole, - ec2TagServiceName: tt.fields.ec2TagServiceName, - done: done, - } - go refreshLoop(done, s.getEC2TagServiceName, tt.fields.oneTime) - go refreshLoop(done, s.getIAMRole, tt.fields.oneTime) - time.Sleep(time.Second) - close(done) - assert.Equal(t, tt.expectedInfo.iamRole, s.iamRole) - assert.Equal(t, tt.expectedInfo.ec2TagServiceName, s.ec2TagServiceName) - }) - } -} diff --git a/plugins/processors/ec2tagger/constants.go b/plugins/processors/ec2tagger/constants.go index b99054e863..32a6f960c2 100644 --- a/plugins/processors/ec2tagger/constants.go +++ b/plugins/processors/ec2tagger/constants.go @@ -70,5 +70,6 @@ const ( var ( // issue with newer versions of the sdk take longer when hop limit is 1 in eks defaultRefreshInterval = 180 * time.Second + ThrottleBackOffArray = []time.Duration{0, 1 * time.Minute, 3 * time.Minute} // backoff retry for ec2 describe instances API call. Assuming the throttle limit is 20 per second. 10 mins allow 12000 API calls. BackoffSleepArray = []time.Duration{0, 1 * time.Minute, 1 * time.Minute, 3 * time.Minute, 3 * time.Minute, 3 * time.Minute, 10 * time.Minute} // backoff retry for ec2 describe instances API call. Assuming the throttle limit is 20 per second. 10 mins allow 12000 API calls. ) From d8b5d973ffe61dc48b0feefb529b464dbba63f85 Mon Sep 17 00:00:00 2001 From: Lisa Guo Date: Tue, 1 Oct 2024 11:05:11 -0400 Subject: [PATCH 21/47] Upload container image in Compass Beta Release workflow (#797) --- .github/workflows/compass-beta-release.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/compass-beta-release.yml b/.github/workflows/compass-beta-release.yml index 85599bd051..bc8d74df36 100644 --- a/.github/workflows/compass-beta-release.yml +++ b/.github/workflows/compass-beta-release.yml @@ -16,3 +16,14 @@ jobs: PackageBucketKey: "compass-beta-release" TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} Bucket: "private-cloudwatch-agent-integration-test" + + BuildAndUploadContainer: + uses: ./.github/workflows/test-build-docker.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + ContainerRepositoryNameAndTag: "cwagent-compass-beta-release:latest" + BucketKey: "compass-beta-release" + PackageBucketKey: "compass-beta-release" \ No newline at end of file From 10e4405cf0e00603736ffa2cdda53631c341b58b Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 2 Oct 2024 10:02:35 -0400 Subject: [PATCH 22/47] Add different processing for service and resource type entities (#826) --- .../entityattributes/entityattributes.go | 11 +- .../k8sattributescraper.go | 18 -- .../k8sattributescraper_test.go | 108 +---------- plugins/processors/awsentity/processor.go | 171 +++++++++++------- .../processors/awsentity/processor_test.go | 105 ++++++----- plugins/processors/awsentity/util.go | 12 ++ .../appsignals_and_eks_config.yaml | 7 +- .../appsignals_and_k8s_config.yaml | 11 +- .../appsignals_fallback_and_eks_config.yaml | 7 +- .../appsignals_over_fallback_config.yaml | 7 +- .../sampleConfig/base_appsignals_config.yaml | 2 - .../base_appsignals_fallback_config.yaml | 2 - .../sampleConfig/compass_linux_config.conf | 2 +- .../sampleConfig/prometheus_config_linux.yaml | 3 - .../prometheus_config_windows.yaml | 3 - translator/tocwconfig/tocwconfig_test.go | 2 +- .../pipeline/applicationsignals/translator.go | 6 +- .../applicationsignals/translator_test.go | 44 ++--- .../otel/pipeline/host/translator.go | 8 +- .../otel/pipeline/host/translator_test.go | 39 +++- .../otel/pipeline/prometheus/translator.go | 12 +- .../pipeline/prometheus/translator_test.go | 4 +- .../otel/processor/awsentity/translator.go | 26 +-- .../processor/awsentity/translator_test.go | 13 +- 24 files changed, 306 insertions(+), 317 deletions(-) create mode 100644 plugins/processors/awsentity/util.go diff --git a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go index 49e73281c0..bfe39863b1 100644 --- a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go @@ -26,12 +26,9 @@ const ( AttributeEntityPlatformType = AWSEntityPrefix + "platform.type" AttributeEntityInstanceID = AWSEntityPrefix + "instance.id" AttributeEntityAutoScalingGroup = AWSEntityPrefix + "auto.scaling.group" -) -// Container Insights attributes used for scraping EKS related information -const ( - NodeName = "NodeName" - Namespace = "Namespace" - // PodName in Container Insights is the workload(Deployment, Daemonset, etc) name - PodName = "PodName" + // The following are possible platform values + AttributeEntityEC2Platform = "AWS::EC2" + AttributeEntityEKSPlatform = "AWS::EKS" + AttributeEntityK8sPlatform = "K8s" ) diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go index 597e48fc5b..580f18e2d4 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go @@ -8,13 +8,11 @@ import ( semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/prometheus" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" ) var ( namespaceAllowlist = []string{ semconv.AttributeK8SNamespaceName, - entityattributes.Namespace, } workloadAllowlist = []string{ @@ -23,11 +21,9 @@ var ( semconv.AttributeK8SStatefulSetName, semconv.AttributeK8SReplicaSetName, semconv.AttributeK8SContainerName, - entityattributes.PodName, } nodeAllowlist = []string{ semconv.AttributeK8SNodeName, - entityattributes.NodeName, } ) @@ -51,7 +47,6 @@ func (e *K8sAttributeScraper) Scrape(rm pcommon.Resource) { e.scrapeNamespace(resourceAttrs) e.scrapeWorkload(resourceAttrs) e.scrapeNode(resourceAttrs) - e.decorateEntityAttributes(resourceAttrs) } func (e *K8sAttributeScraper) relabelPrometheus(p pcommon.Map) { @@ -100,21 +95,8 @@ func (e *K8sAttributeScraper) scrapeNode(p pcommon.Map) { } } -func (e *K8sAttributeScraper) decorateEntityAttributes(p pcommon.Map) { - addAttributeIfNonEmpty(p, entityattributes.AttributeEntityCluster, e.Cluster) - addAttributeIfNonEmpty(p, entityattributes.AttributeEntityNamespace, e.Namespace) - addAttributeIfNonEmpty(p, entityattributes.AttributeEntityWorkload, e.Workload) - addAttributeIfNonEmpty(p, entityattributes.AttributeEntityNode, e.Node) -} - func (e *K8sAttributeScraper) Reset() { *e = K8sAttributeScraper{ Cluster: e.Cluster, } } - -func addAttributeIfNonEmpty(p pcommon.Map, key string, value string) { - if value != "" { - p.PutStr(key, value) - } -} diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go index 27e1e7e528..a1b37644bf 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go @@ -13,7 +13,6 @@ import ( semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/prometheus" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" ) func TestNewK8sAttributeScraper(t *testing.T) { @@ -27,113 +26,39 @@ func Test_k8sattributescraper_Scrape(t *testing.T) { name string clusterName string args pcommon.Resource - want pcommon.Map + want *K8sAttributeScraper }{ { name: "Empty", clusterName: "", args: pcommon.NewResource(), - want: pcommon.NewMap(), + want: &K8sAttributeScraper{}, }, { name: "ClusterOnly", clusterName: "test-cluster", args: pcommon.NewResource(), - want: getAttributeMap(map[string]any{ - entityattributes.AttributeEntityCluster: "test-cluster", - }), + want: &K8sAttributeScraper{ + Cluster: "test-cluster", + }, }, { name: "AllAppSignalAttributes", clusterName: "test-cluster", args: generateResourceMetrics(semconv.AttributeK8SNamespaceName, "test-namespace", semconv.AttributeK8SDeploymentName, "test-workload", semconv.AttributeK8SNodeName, "test-node"), - want: getAttributeMap(map[string]any{ - semconv.AttributeK8SNamespaceName: "test-namespace", - semconv.AttributeK8SDeploymentName: "test-workload", - semconv.AttributeK8SNodeName: "test-node", - entityattributes.AttributeEntityCluster: "test-cluster", - entityattributes.AttributeEntityNamespace: "test-namespace", - entityattributes.AttributeEntityWorkload: "test-workload", - entityattributes.AttributeEntityNode: "test-node", - }), - }, - { - name: "AllContainerInsightsAttributes", - clusterName: "test-cluster", - args: generateResourceMetrics(entityattributes.Namespace, "test-namespace", entityattributes.PodName, "test-workload", entityattributes.NodeName, "test-node"), - want: getAttributeMap(map[string]any{ - entityattributes.Namespace: "test-namespace", - entityattributes.PodName: "test-workload", - entityattributes.NodeName: "test-node", - entityattributes.AttributeEntityCluster: "test-cluster", - entityattributes.AttributeEntityNamespace: "test-namespace", - entityattributes.AttributeEntityWorkload: "test-workload", - entityattributes.AttributeEntityNode: "test-node", - }), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - e := NewK8sAttributeScraper(tt.clusterName) - e.Scrape(tt.args) - assert.Equal(t, tt.want.AsRaw(), tt.args.Attributes().AsRaw()) - }) - } -} - -func Test_k8sattributescraper_decorateEntityAttributes(t *testing.T) { - type fields struct { - Cluster string - Namespace string - Workload string - Node string - } - tests := []struct { - name string - fields fields - want pcommon.Map - }{ - { - name: "Empty", - fields: fields{}, - want: pcommon.NewMap(), - }, - { - name: "OneAttribute", - fields: fields{ - Cluster: "test-cluster", - }, - want: getAttributeMap(map[string]any{ - entityattributes.AttributeEntityCluster: "test-cluster", - }), - }, - { - name: "AllAttributes", - fields: fields{ + want: &K8sAttributeScraper{ Cluster: "test-cluster", Namespace: "test-namespace", Workload: "test-workload", Node: "test-node", }, - want: getAttributeMap(map[string]any{ - entityattributes.AttributeEntityCluster: "test-cluster", - entityattributes.AttributeEntityNamespace: "test-namespace", - entityattributes.AttributeEntityWorkload: "test-workload", - entityattributes.AttributeEntityNode: "test-node", - }), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - p := pcommon.NewMap() - e := &K8sAttributeScraper{ - Cluster: tt.fields.Cluster, - Namespace: tt.fields.Namespace, - Workload: tt.fields.Workload, - Node: tt.fields.Node, - } - e.decorateEntityAttributes(p) - assert.Equal(t, tt.want.AsRaw(), p.AsRaw()) + e := NewK8sAttributeScraper(tt.clusterName) + e.Scrape(tt.args) + assert.Equal(t, e, tt.want) }) } } @@ -207,11 +132,6 @@ func Test_k8sattributescraper_scrapeNamespace(t *testing.T) { args: getAttributeMap(map[string]any{semconv.AttributeK8SNamespaceName: "namespace-name"}), want: "namespace-name", }, - { - name: "ContainerInsightsNodeExists", - args: getAttributeMap(map[string]any{entityattributes.Namespace: "namespace-name"}), - want: "namespace-name", - }, { name: "NonmatchingNamespace", args: getAttributeMap(map[string]any{"namespace": "namespace-name"}), @@ -243,11 +163,6 @@ func Test_k8sattributescraper_scrapeNode(t *testing.T) { args: getAttributeMap(map[string]any{semconv.AttributeK8SNodeName: "node-name"}), want: "node-name", }, - { - name: "ContainerInsightNodeExists", - args: getAttributeMap(map[string]any{entityattributes.NodeName: "node-name"}), - want: "node-name", - }, { name: "NonmatchingNode", args: getAttributeMap(map[string]any{"node": "node-name"}), @@ -299,11 +214,6 @@ func Test_k8sattributescraper_scrapeWorkload(t *testing.T) { args: getAttributeMap(map[string]any{semconv.AttributeK8SContainerName: "test-container"}), want: "test-container", }, - { - name: "ContainerInsightPodNameWorkload", - args: getAttributeMap(map[string]any{entityattributes.PodName: "test-workload"}), - want: "test-workload", - }, { name: "MultipleWorkloads", args: getAttributeMap(map[string]any{ diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index bd95d4b04a..5484f54297 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -7,6 +7,7 @@ import ( "context" "strings" + "github.com/go-playground/validator/v10" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" @@ -32,6 +33,24 @@ type scraper interface { Reset() } +type EC2ServiceAttributes struct { + InstanceId string `validate:"required"` + AutoScalingGroup string `validate:"omitempty"` + ServiceNameSource string `validate:"omitempty"` +} + +type K8sServiceAttributes struct { + Cluster string `validate:"required"` + Namespace string `validate:"required"` + Workload string `validate:"required"` + Node string `validate:"required"` + InstanceId string `validate:"omitempty"` + ServiceNameSource string `validate:"omitempty"` +} + +// use a single instance of Validate, it caches struct info +var validate = validator.New(validator.WithRequiredStructEnabled()) + // exposed as a variable for unit testing var addToEntityStore = func(logGroupName entitystore.LogGroupName, serviceName string, environmentName string) { es := entitystore.GetEntityStore() @@ -49,15 +68,6 @@ var addPodToServiceEnvironmentMap = func(podName string, serviceName string, env es.AddPodServiceEnvironmentMapping(podName, serviceName, environmentName, serviceNameSource) } -var getMetricAttributesFromEntityStore = func() map[string]*string { - es := entitystore.GetEntityStore() - if es == nil { - return map[string]*string{} - } - - return es.GetServiceMetricAttributesMap() -} - var getEC2InfoFromEntityStore = func() entitystore.EC2Info { es := entitystore.GetEntityStore() if es == nil { @@ -93,81 +103,60 @@ func newAwsEntityProcessor(config *Config, logger *zap.Logger) *awsEntityProcess } func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { - var entityServiceNameSource string // Get the following metric attributes from the EntityStore: PlatformType, EC2.InstanceId, EC2.AutoScalingGroup - metricAttributes := getMetricAttributesFromEntityStore() - - var platformType, instanceID, autoScalingGroup string - if metricAttributes[entitystore.PlatformType] != nil { - platformType = *metricAttributes[entitystore.PlatformType] - } - if metricAttributes[entitystore.InstanceIDKey] != nil { - instanceID = *metricAttributes[entitystore.InstanceIDKey] - } - if metricAttributes[entitystore.ASGKey] != nil { - autoScalingGroup = *metricAttributes[entitystore.ASGKey] - } rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { - if p.config.KubernetesMode != "" { - p.k8sscraper.Scrape(rm.At(i).Resource()) - } + var logGroupNames, serviceName, environmentName string + var entityServiceNameSource, entityPlatformType string + var ec2Info entitystore.EC2Info resourceAttrs := rm.At(i).Resource().Attributes() switch p.config.EntityType { case entityattributes.Resource: - ec2Info := getEC2InfoFromEntityStore() - if p.config.Platform == config.ModeEC2 && ec2Info.InstanceID != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityType, entityattributes.AttributeEntityAWSResource) - resourceAttrs.PutStr(entityattributes.AttributeEntityResourceType, entityattributes.AttributeEntityEC2InstanceResource) - resourceAttrs.PutStr(entityattributes.AttributeEntityIdentifier, ec2Info.InstanceID) + if p.config.Platform == config.ModeEC2 { + ec2Info := getEC2InfoFromEntityStore() + if ec2Info.InstanceID != EMPTY { + resourceAttrs.PutStr(entityattributes.AttributeEntityType, entityattributes.AttributeEntityAWSResource) + resourceAttrs.PutStr(entityattributes.AttributeEntityResourceType, entityattributes.AttributeEntityEC2InstanceResource) + resourceAttrs.PutStr(entityattributes.AttributeEntityIdentifier, ec2Info.InstanceID) + } } case entityattributes.Service: - logGroupNames, _ := resourceAttrs.Get(attributeAwsLogGroupNames) - serviceName, _ := resourceAttrs.Get(attributeServiceName) - environmentName, _ := resourceAttrs.Get(attributeDeploymentEnvironment) + if logGroupNamesAttr, ok := resourceAttrs.Get(attributeAwsLogGroupNames); ok { + logGroupNames = logGroupNamesAttr.Str() + } + if serviceNameAttr, ok := resourceAttrs.Get(attributeServiceName); ok { + serviceName = serviceNameAttr.Str() + } + if environmentNameAttr, ok := resourceAttrs.Get(attributeDeploymentEnvironment); ok { + environmentName = environmentNameAttr.Str() + } if serviceNameSource, sourceExists := resourceAttrs.Get(entityattributes.AttributeEntityServiceNameSource); sourceExists { entityServiceNameSource = serviceNameSource.Str() } entityServiceName := getServiceAttributes(resourceAttrs) - entityEnvironmentName := environmentName.Str() + entityEnvironmentName := environmentName if (entityServiceName == EMPTY || entityEnvironmentName == EMPTY) && p.config.ScrapeDatapointAttribute { entityServiceName, entityEnvironmentName = p.scrapeServiceAttribute(rm.At(i).ScopeMetrics()) // If the entityServiceNameSource is empty here, that means it was not configured via instrumentation // If entityServiceName is a datapoint attribute, that means the service name is coming from the UserConfiguration source if entityServiceNameSource == EMPTY && entityServiceName != EMPTY { entityServiceNameSource = attributeServiceNameSourceUserConfig - resourceAttrs.PutStr(entityattributes.AttributeEntityServiceNameSource, attributeServiceNameSourceUserConfig) } } - if entityServiceName != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityServiceName, entityServiceName) - } - if entityEnvironmentName != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) - } - if p.config.Platform == config.ModeEC2 { - //If entityServiceNameSource is empty, it was not configured via the config. Get the source in descending priority - // 1. Incoming telemetry attributes - // 2. CWA config - // 3. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app - // 4. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) - if entityServiceNameSource == EMPTY { - entityServiceName, entityServiceNameSource = getServiceNameSource() - resourceAttrs.PutStr(entityattributes.AttributeEntityServiceNameSource, entityServiceNameSource) - } - if platformType != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityPlatformType, platformType) - } - if instanceID != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityInstanceID, instanceID) + if p.config.KubernetesMode != "" { + p.k8sscraper.Scrape(rm.At(i).Resource()) + if p.config.Platform == config.ModeEC2 { + ec2Info = getEC2InfoFromEntityStore() } - if autoScalingGroup != EMPTY { - resourceAttrs.PutStr(entityattributes.AttributeEntityAutoScalingGroup, autoScalingGroup) + + if p.config.KubernetesMode == config.ModeEKS { + entityPlatformType = entityattributes.AttributeEntityEKSPlatform + } else { + entityPlatformType = entityattributes.AttributeEntityK8sPlatform } - } - if p.config.KubernetesMode != "" { + fallbackEnvironment := entityEnvironmentName podInfo, ok := p.k8sscraper.(*k8sattributescraper.K8sAttributeScraper) if fallbackEnvironment == EMPTY && p.config.KubernetesMode == config.ModeEKS && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { @@ -175,24 +164,78 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric } else if fallbackEnvironment == EMPTY && (p.config.KubernetesMode == config.ModeK8sEC2 || p.config.KubernetesMode == config.ModeK8sOnPrem) && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { fallbackEnvironment = "k8s:" + p.config.ClusterName + "/" + podInfo.Namespace } + + // Add service information for a pod to the pod association map + // so that agent can host this information in a server fullPodName := scrapeK8sPodName(resourceAttrs) if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource != EMPTY { addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entityServiceNameSource) } else if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource == EMPTY { addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entitystore.ServiceNameSourceUnknown) } + eksAttributes := K8sServiceAttributes{ + Cluster: podInfo.Cluster, + Namespace: podInfo.Namespace, + Workload: podInfo.Workload, + Node: podInfo.Node, + InstanceId: ec2Info.InstanceID, + ServiceNameSource: entityServiceNameSource, + } + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityType, entityattributes.Service) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceName, entityServiceName) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) + + if err := validate.Struct(eksAttributes); err == nil { + resourceAttrs.PutStr(entityattributes.AttributeEntityPlatformType, entityPlatformType) + resourceAttrs.PutStr(entityattributes.AttributeEntityCluster, eksAttributes.Cluster) + resourceAttrs.PutStr(entityattributes.AttributeEntityNamespace, eksAttributes.Namespace) + resourceAttrs.PutStr(entityattributes.AttributeEntityWorkload, eksAttributes.Workload) + resourceAttrs.PutStr(entityattributes.AttributeEntityNode, eksAttributes.Node) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityInstanceID, ec2Info.InstanceID) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceNameSource, entityServiceNameSource) + } + p.k8sscraper.Reset() + } else if p.config.Platform == config.ModeEC2 { + //If entityServiceNameSource is empty, it was not configured via the config. Get the source in descending priority + // 1. Incoming telemetry attributes + // 2. CWA config + // 3. instance tags - The tags attached to the EC2 instance. Only scrape for tag with the following key: service, application, app + // 4. IAM Role - The IAM role name retrieved through IMDS(Instance Metadata Service) + if entityServiceName == EMPTY && entityServiceNameSource == EMPTY { + entityServiceName, entityServiceNameSource = getServiceNameSource() + } else if entityServiceName != EMPTY && entityServiceNameSource == EMPTY { + entityServiceNameSource = entitystore.ServiceNameSourceUnknown + } + + entityPlatformType = entityattributes.AttributeEntityEC2Platform + ec2Info = getEC2InfoFromEntityStore() + + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityType, entityattributes.Service) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceName, entityServiceName) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) + + ec2Attributes := EC2ServiceAttributes{ + InstanceId: ec2Info.InstanceID, + AutoScalingGroup: ec2Info.AutoScalingGroup, + ServiceNameSource: entityServiceNameSource, + } + if err := validate.Struct(ec2Attributes); err == nil { + resourceAttrs.PutStr(entityattributes.AttributeEntityPlatformType, entityPlatformType) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityInstanceID, ec2Attributes.InstanceId) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAutoScalingGroup, ec2Attributes.AutoScalingGroup) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceNameSource, ec2Attributes.ServiceNameSource) + } } - p.k8sscraper.Reset() - if logGroupNames.Str() == EMPTY || (serviceName.Str() == EMPTY && environmentName.Str() == EMPTY) { + if logGroupNames == EMPTY || (serviceName == EMPTY && environmentName == EMPTY) { continue } - logGroupNamesSlice := strings.Split(logGroupNames.Str(), "&") + logGroupNamesSlice := strings.Split(logGroupNames, "&") for _, logGroupName := range logGroupNamesSlice { if logGroupName == EMPTY { continue } - addToEntityStore(entitystore.LogGroupName(logGroupName), serviceName.Str(), environmentName.Str()) + addToEntityStore(entitystore.LogGroupName(logGroupName), serviceName, environmentName) } } diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 5050caa655..5ccabaf6aa 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -70,10 +70,11 @@ func newMockGetServiceNameAndSource(service, source string) func() (string, stri } } -func newMockGetEC2InfoFromEntityStore(instance string) func() entitystore.EC2Info { +func newMockGetEC2InfoFromEntityStore(instance string, asg string) func() entitystore.EC2Info { return func() entitystore.EC2Info { return entitystore.EC2Info{ - InstanceID: instance, + InstanceID: instance, + AutoScalingGroup: asg, } } } @@ -264,11 +265,11 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() tests := []struct { - name string - metrics pmetric.Metrics - mockServiceNameSource func() (string, string) - mockGetMetricAttributesFromEntityStore func() map[string]*string - want map[string]any + name string + metrics pmetric.Metrics + mockServiceNameSource func() (string, string) + mockGetEC2InfoFromEntityStore func() entitystore.EC2Info + want map[string]any }{ { name: "EmptyMetrics", @@ -277,39 +278,50 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { }, //NOTE 2 SELF: These tests assume that we are on the EC2 platform, so make sure to mock the ServiceNameSource function { - name: "ResourceAttributeServiceNameOnly", - metrics: generateMetrics(attributeServiceName, "test-service"), - mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), + name: "ResourceAttributeServiceNameOnly", + metrics: generateMetrics(attributeServiceName, "test-service"), + mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), want: map[string]any{ + entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityServiceNameSource: "Unknown", attributeServiceName: "test-service", - entityattributes.AttributeEntityServiceNameSource: "Instrumentation", }, }, { - name: "ResourceAttributeEnvironmentOnly", - metrics: generateMetrics(attributeDeploymentEnvironment, "test-environment"), - mockServiceNameSource: newMockGetServiceNameAndSource("unknown_service", "Unknown"), + name: "ResourceAttributeEnvironmentOnly", + metrics: generateMetrics(attributeDeploymentEnvironment, "test-environment"), + mockServiceNameSource: newMockGetServiceNameAndSource("unknown_service", "Unknown"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), want: map[string]any{ + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "unknown_service", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", - attributeDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", entityattributes.AttributeEntityServiceNameSource: "Unknown", + + attributeDeploymentEnvironment: "test-environment", }, }, { - name: "ResourceAttributeServiceNameAndEnvironment", - metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), - mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), - mockGetMetricAttributesFromEntityStore: newMockGetMetricAttributesFromEntityStore(), + name: "ResourceAttributeServiceNameAndEnvironment", + metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "test-auto-scaling"), want: map[string]any{ + entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", attributeServiceName: "test-service", attributeDeploymentEnvironment: "test-environment", - entityattributes.AttributeEntityServiceNameSource: "Instrumentation", + entityattributes.AttributeEntityServiceNameSource: "Unknown", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", - entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", + entityattributes.AttributeEntityAutoScalingGroup: "test-auto-scaling", }, }, } @@ -318,12 +330,11 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { t.Run(tt.name, func(t *testing.T) { // Make copy of original functions to use as resets later to prevent failing test when tests are ran in bulk resetServiceNameSource := getServiceNameSource - resetGetMetricAttributesFromEntityStore := getMetricAttributesFromEntityStore if tt.mockServiceNameSource != nil { getServiceNameSource = tt.mockServiceNameSource } - if tt.mockGetMetricAttributesFromEntityStore != nil { - getMetricAttributesFromEntityStore = tt.mockGetMetricAttributesFromEntityStore + if tt.mockGetEC2InfoFromEntityStore != nil { + getEC2InfoFromEntityStore = tt.mockGetEC2InfoFromEntityStore } p := newAwsEntityProcessor(&Config{EntityType: attributeService}, logger) p.config.Platform = config.ModeEC2 @@ -334,7 +345,6 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) } getServiceNameSource = resetServiceNameSource - getMetricAttributesFromEntityStore = resetGetMetricAttributesFromEntityStore }) } } @@ -347,6 +357,7 @@ func TestProcessMetricsResourceEntityProcessing(t *testing.T) { metrics pmetric.Metrics want map[string]any instance string + asg string }{ { name: "EmptyMetrics", @@ -373,7 +384,7 @@ func TestProcessMetricsResourceEntityProcessing(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - getEC2InfoFromEntityStore = newMockGetEC2InfoFromEntityStore(tt.instance) + getEC2InfoFromEntityStore = newMockGetEC2InfoFromEntityStore(tt.instance, tt.asg) p := newAwsEntityProcessor(&Config{EntityType: entityattributes.Resource}, logger) p.config.Platform = config.ModeEC2 _, err := p.processMetrics(ctx, tt.metrics) @@ -390,11 +401,11 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() tests := []struct { - name string - metrics pmetric.Metrics - mockServiceNameAndSource func() (string, string) - mockGetMetricAttributesFromEntityStore func() map[string]*string - want map[string]any + name string + metrics pmetric.Metrics + mockServiceNameAndSource func() (string, string) + mockGetEC2InfoFromEntityStore func() entitystore.EC2Info + want map[string]any }{ { name: "EmptyMetrics", @@ -402,10 +413,11 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { want: map[string]any{}, }, { - name: "DatapointAttributeServiceNameOnly", - metrics: generateDatapointMetrics(attributeServiceName, "test-service"), - mockGetMetricAttributesFromEntityStore: newMockGetMetricAttributesFromEntityStore(), + name: "DatapointAttributeServiceNameOnly", + metrics: generateDatapointMetrics(attributeServiceName, "test-service"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "auto-scaling"), want: map[string]any{ + entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", entityattributes.AttributeEntityPlatformType: "AWS::EC2", @@ -414,20 +426,29 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { }, }, { - name: "DatapointAttributeEnvironmentOnly", - metrics: generateDatapointMetrics(attributeDeploymentEnvironment, "test-environment"), - mockServiceNameAndSource: newMockGetServiceNameAndSource("test-service-name", "ClientIamRole"), + name: "DatapointAttributeEnvironmentOnly", + metrics: generateDatapointMetrics(attributeDeploymentEnvironment, "test-environment"), + mockServiceNameAndSource: newMockGetServiceNameAndSource("test-service-name", "ClientIamRole"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), want: map[string]any{ + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "test-service-name", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", entityattributes.AttributeEntityServiceNameSource: "ClientIamRole", }, }, { - name: "DatapointAttributeServiceNameAndEnvironment", - metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + name: "DatapointAttributeServiceNameAndEnvironment", + metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), want: map[string]any{ + entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", }, }, @@ -437,12 +458,11 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { t.Run(tt.name, func(t *testing.T) { // Make copy of original functions to use as resets later to prevent failing test when tests are ran in bulk resetServiceNameSource := getServiceNameSource - resetGetMetricAttributesFromEntityStore := getMetricAttributesFromEntityStore if tt.mockServiceNameAndSource != nil { getServiceNameSource = tt.mockServiceNameAndSource } - if tt.mockGetMetricAttributesFromEntityStore != nil { - getMetricAttributesFromEntityStore = tt.mockGetMetricAttributesFromEntityStore + if tt.mockGetEC2InfoFromEntityStore != nil { + getEC2InfoFromEntityStore = tt.mockGetEC2InfoFromEntityStore } p := newAwsEntityProcessor(&Config{ScrapeDatapointAttribute: true, EntityType: attributeService}, logger) p.config.Platform = config.ModeEC2 @@ -453,7 +473,6 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) } getServiceNameSource = resetServiceNameSource - getMetricAttributesFromEntityStore = resetGetMetricAttributesFromEntityStore }) } } diff --git a/plugins/processors/awsentity/util.go b/plugins/processors/awsentity/util.go new file mode 100644 index 0000000000..804a97c14c --- /dev/null +++ b/plugins/processors/awsentity/util.go @@ -0,0 +1,12 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package awsentity + +import "go.opentelemetry.io/collector/pdata/pcommon" + +func AddAttributeIfNonEmpty(p pcommon.Map, key string, value string) { + if value != "" { + p.PutStr(key, value) + } +} diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 8ebde3435c..2787da64c5 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -302,10 +302,11 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity: + awsentity/service: cluster_name: TestCluster + entity_type: Service kubernetes_mode: EKS - platform: EKS + platform: ec2 batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -678,7 +679,7 @@ service: exporters: - awsemf/application_signals processors: - - awsentity + - awsentity/service - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index 71aa5cde75..b0e3ec6463 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -302,10 +302,11 @@ processors: resolvers: - name: TestCluster platform: k8s - awsentity: - cluster_name: TestCluster - kubernetes_mode: K8sEC2 - platform: K8sEC2 + awsentity/service: + cluster_name: TestCluster + entity_type: Service + kubernetes_mode: K8sEC2 + platform: ec2 batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -658,7 +659,7 @@ service: exporters: - awsemf/application_signals processors: - - awsentity + - awsentity/service - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index dba8a125b2..8ce428696d 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -302,10 +302,11 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity: + awsentity/service: cluster_name: TestCluster + entity_type: Service kubernetes_mode: EKS - platform: EKS + platform: ec2 batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -678,7 +679,7 @@ service: exporters: - awsemf/application_signals processors: - - awsentity + - awsentity/service - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index 32ca642c27..36b4f53852 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -302,10 +302,11 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity: + awsentity/service: cluster_name: TestCluster + entity_type: Service kubernetes_mode: EKS - platform: EKS + platform: ec2 batch/containerinsights: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -678,7 +679,7 @@ service: exporters: - awsemf/application_signals processors: - - awsentity + - awsentity/service - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml index ee33930115..0dce7c32ba 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_config.yaml @@ -162,7 +162,6 @@ extensions: region: us-east-1 shared_credential_file: fake-path processors: - awsentity: {} awsapplicationsignals: resolvers: - name: "" @@ -486,7 +485,6 @@ service: - debug/application_signals - awsemf/application_signals processors: - - awsentity - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml index 636aa701a2..9e4eca0b38 100644 --- a/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/base_appsignals_fallback_config.yaml @@ -162,7 +162,6 @@ processors: resolvers: - name: "" platform: generic - awsentity: {} resourcedetection: aks: resource_attributes: @@ -481,7 +480,6 @@ service: exporters: - awsemf/application_signals processors: - - awsentity - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.conf b/translator/tocwconfig/sampleConfig/compass_linux_config.conf index f9c82ac5f3..c21200ebf6 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.conf +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.conf @@ -3,7 +3,7 @@ debug = true flush_interval = "1s" flush_jitter = "0s" - hostname = "host_name_from_env" + hostname = "" interval = "10s" logfile = "/tmp/fake/log/hotdog.log" logtarget = "lumberjack" diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index e83a7ccc20..3b9747ba99 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -81,8 +81,6 @@ extensions: mode: ec2 region: us-east-1 processors: - awsentity: - platform: ec2 batch/prometheus: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -391,7 +389,6 @@ service: processors: - batch/prometheus - resourcedetection - - awsentity receivers: - telegraf_prometheus telemetry: diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index 9879b1355d..cb615612ed 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -63,8 +63,6 @@ extensions: mode: ec2 region: us-east-1 processors: - awsentity: - platform: ec2 batch/prometheus: metadata_cardinality_limit: 1000 send_batch_max_size: 0 @@ -373,7 +371,6 @@ service: processors: - batch/prometheus - resourcedetection - - awsentity receivers: - telegraf_prometheus telemetry: diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index b8d8eb0b92..a19e564d13 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -156,7 +156,7 @@ func TestAppSignalsAndNativeKubernetesConfig(t *testing.T) { func TestCompassConfig(t *testing.T) { resetContext(t) - context.CurrentContext().SetRunInContainer(true) + //context.CurrentContext().SetRunInContainer(true) context.CurrentContext().SetMode(config.ModeEC2) t.Setenv(config.HOST_NAME, "host_name_from_env") diff --git a/translator/translate/otel/pipeline/applicationsignals/translator.go b/translator/translate/otel/pipeline/applicationsignals/translator.go index 7eedb584f5..682b18fb66 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsxray" @@ -53,8 +54,9 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators Extensions: common.NewTranslatorMap[component.Config](), } - if t.dataType == component.DataTypeMetrics { - translators.Processors.Set(awsentity.NewTranslator()) + mode := context.CurrentContext().KubernetesMode() + if t.dataType == component.DataTypeMetrics && mode != "" { + translators.Processors.Set(awsentity.NewTranslatorWithEntityType(awsentity.Service)) } translators.Processors.Set(resourcedetection.NewTranslator(resourcedetection.WithDataType(t.dataType))) translators.Processors.Set(awsapplicationsignals.NewTranslator(awsapplicationsignals.WithDataType(t.dataType))) diff --git a/translator/translate/otel/pipeline/applicationsignals/translator_test.go b/translator/translate/otel/pipeline/applicationsignals/translator_test.go index 913a61ff21..1abd3e0bde 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator_test.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator_test.go @@ -105,11 +105,10 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { tt := NewTranslator(component.DataTypeMetrics) assert.EqualValues(t, "metrics/application_signals", tt.ID().String()) testCases := map[string]struct { - input map[string]interface{} - want *want - wantErr error - detector func() (eksdetector.Detector, error) - isEKSCache func() eksdetector.IsEKSCache + input map[string]interface{} + want *want + wantErr error + kubernetesMode string }{ "WithoutMetricsCollectedKey": { input: map[string]interface{}{}, @@ -125,12 +124,11 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"awsentity", "resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity/service", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, - detector: eksdetector.TestEKSDetector, - isEKSCache: eksdetector.TestIsEKSCacheEKS, + kubernetesMode: config.ModeEKS, }, "WithAppSignalsAndLoggingEnabled": { input: map[string]interface{}{ @@ -145,12 +143,11 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"awsentity", "resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity/service", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"debug/application_signals", "awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, - detector: eksdetector.TestEKSDetector, - isEKSCache: eksdetector.TestIsEKSCacheEKS, + kubernetesMode: config.ModeEKS, }, "WithAppSignalsEnabledK8s": { input: map[string]interface{}{ @@ -162,19 +159,17 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"awsentity", "resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity/service", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, - detector: eksdetector.TestK8sDetector, - isEKSCache: eksdetector.TestIsEKSCacheK8s, + kubernetesMode: config.ModeK8sEC2, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { t.Setenv(common.KubernetesEnvVar, "TEST") - eksdetector.NewDetector = testCase.detector - eksdetector.IsEKS = testCase.isEKSCache + context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) conf := confmap.NewFromStringMap(testCase.input) got, err := tt.Translate(conf) assert.Equal(t, testCase.wantErr, err) @@ -200,11 +195,9 @@ func TestTranslatorMetricsForEC2(t *testing.T) { tt := NewTranslator(component.DataTypeMetrics) assert.EqualValues(t, "metrics/application_signals", tt.ID().String()) testCases := map[string]struct { - input map[string]interface{} - want *want - wantErr error - detector func() (eksdetector.Detector, error) - isEKSCache func() eksdetector.IsEKSCache + input map[string]interface{} + want *want + wantErr error }{ "WithoutMetricsCollectedKey": { input: map[string]interface{}{}, @@ -220,12 +213,10 @@ func TestTranslatorMetricsForEC2(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"awsentity", "resourcedetection", "awsapplicationsignals"}, + processors: []string{"resourcedetection", "awsapplicationsignals"}, exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, - detector: eksdetector.TestEKSDetector, - isEKSCache: eksdetector.TestIsEKSCacheEKS, }, "WithAppSignalsAndLoggingEnabled": { input: map[string]interface{}{ @@ -240,17 +231,16 @@ func TestTranslatorMetricsForEC2(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"awsentity", "resourcedetection", "awsapplicationsignals"}, + processors: []string{"resourcedetection", "awsapplicationsignals"}, exporters: []string{"debug/application_signals", "awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, - detector: eksdetector.TestEKSDetector, - isEKSCache: eksdetector.TestIsEKSCacheEKS, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { ctx := context.CurrentContext() + context.CurrentContext().SetKubernetesMode("") ctx.SetMode(config.ModeEC2) conf := confmap.NewFromStringMap(testCase.input) got, err := tt.Translate(conf) diff --git a/translator/translate/otel/pipeline/host/translator.go b/translator/translate/otel/pipeline/host/translator.go index 66763ab76f..c4b32b1cac 100644 --- a/translator/translate/otel/pipeline/host/translator.go +++ b/translator/translate/otel/pipeline/host/translator.go @@ -9,6 +9,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awscloudwatch" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" @@ -73,10 +75,14 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, translators := common.ComponentTranslators{ Receivers: t.receivers, - Processors: common.NewTranslatorMap(entityProcessor), + Processors: common.NewTranslatorMap[component.Config](), Exporters: common.NewTranslatorMap(awscloudwatch.NewTranslator()), Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeMetrics, []string{agenthealth.OperationPutMetricData})), } + currentContext := context.CurrentContext() + if currentContext.Mode() == config.ModeEC2 && !currentContext.RunInContainer() { + translators.Processors.Set(entityProcessor) + } // we need to add delta processor because (only) diskio and net input plugins report delta metric if common.PipelineNameHostDeltaMetrics == t.name { diff --git a/translator/translate/otel/pipeline/host/translator_test.go b/translator/translate/otel/pipeline/host/translator_test.go index 3ec48c72f0..02e106ab81 100644 --- a/translator/translate/otel/pipeline/host/translator_test.go +++ b/translator/translate/otel/pipeline/host/translator_test.go @@ -12,6 +12,8 @@ import ( "go.opentelemetry.io/collector/confmap" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -38,14 +40,17 @@ func TestTranslator(t *testing.T) { extensions []string } testCases := map[string]struct { - input map[string]interface{} - pipelineName string - want *want - wantErr error + input map[string]interface{} + pipelineName string + mode string + runInContainer bool + want *want + wantErr error }{ "WithoutMetricsKey": { input: map[string]interface{}{}, pipelineName: common.PipelineNameHost, + mode: config.ModeEC2, wantErr: &common.MissingKeyError{ ID: component.NewIDWithName(component.DataTypeMetrics, common.PipelineNameHost), JsonKey: common.MetricsKey, @@ -56,6 +61,7 @@ func TestTranslator(t *testing.T) { "metrics": map[string]interface{}{}, }, pipelineName: common.PipelineNameHost, + mode: config.ModeEC2, want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, @@ -73,6 +79,7 @@ func TestTranslator(t *testing.T) { }, }, pipelineName: common.PipelineNameHostDeltaMetrics, + mode: config.ModeEC2, want: &want{ pipelineID: "metrics/hostDeltaMetrics", receivers: []string{"nop", "other"}, @@ -90,6 +97,7 @@ func TestTranslator(t *testing.T) { }, }, pipelineName: common.PipelineNameHostCustomMetrics, + mode: config.ModeEC2, want: &want{ pipelineID: "metrics/hostCustomMetrics", receivers: []string{"nop", "other"}, @@ -98,6 +106,25 @@ func TestTranslator(t *testing.T) { extensions: []string{"agenthealth/metrics"}, }, }, + "WithMetricsKeyStatsDContainer": { + input: map[string]interface{}{ + "metrics": map[string]interface{}{ + "metrics_collected": map[string]interface{}{ + "statsd": map[string]interface{}{}, + }, + }, + }, + pipelineName: common.PipelineNameHostCustomMetrics, + mode: config.ModeEC2, + runInContainer: true, + want: &want{ + pipelineID: "metrics/hostCustomMetrics", + receivers: []string{"nop", "other"}, + processors: []string{}, + exporters: []string{"awscloudwatch"}, + extensions: []string{"agenthealth/metrics"}, + }, + }, "WithMetricDecoration": { input: map[string]interface{}{ "metrics": map[string]interface{}{ @@ -114,6 +141,7 @@ func TestTranslator(t *testing.T) { }, }, pipelineName: common.PipelineNameHost, + mode: config.ModeEC2, want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, @@ -135,6 +163,7 @@ func TestTranslator(t *testing.T) { }, }, pipelineName: common.PipelineNameHost, + mode: config.ModeEC2, want: &want{ pipelineID: "metrics/host", receivers: []string{"nop", "other"}, @@ -148,6 +177,8 @@ func TestTranslator(t *testing.T) { nopType, _ := component.NewType("nop") otherType, _ := component.NewType("other") t.Run(name, func(t *testing.T) { + context.CurrentContext().SetMode(testCase.mode) + context.CurrentContext().SetRunInContainer(testCase.runInContainer) ht := NewTranslator(testCase.pipelineName, common.NewTranslatorMap[component.Config]( &testTranslator{id: component.NewID(nopType)}, &testTranslator{id: component.NewID(otherType)}, diff --git a/translator/translate/otel/pipeline/prometheus/translator.go b/translator/translate/otel/pipeline/prometheus/translator.go index da8366dcf5..6c9e54defc 100644 --- a/translator/translate/otel/pipeline/prometheus/translator.go +++ b/translator/translate/otel/pipeline/prometheus/translator.go @@ -56,18 +56,22 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators func (t *translator) translateProcessors() common.TranslatorMap[component.Config] { mode := context.CurrentContext().KubernetesMode() - if mode != "" || ecsutil.GetECSUtilSingleton().IsECS() { - // we are on kubernetes or ECS we do not want resource detection processor + // if we are on kubernetes or ECS we do not want resource detection processor + // if we are on Kubernetes, enable entity processor + if mode != "" { + return common.NewTranslatorMap( + batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" + awsentity.NewTranslatorWithEntityType(awsentity.Service), + ) + } else if mode != "" || ecsutil.GetECSUtilSingleton().IsECS() { return common.NewTranslatorMap( batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" - awsentity.NewTranslator(), ) } else { // we are on ec2/onprem return common.NewTranslatorMap( batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" resourcedetection.NewTranslator(), - awsentity.NewTranslator(), ) } diff --git a/translator/translate/otel/pipeline/prometheus/translator_test.go b/translator/translate/otel/pipeline/prometheus/translator_test.go index c479e1f559..628ef5e465 100644 --- a/translator/translate/otel/pipeline/prometheus/translator_test.go +++ b/translator/translate/otel/pipeline/prometheus/translator_test.go @@ -47,7 +47,7 @@ func TestTranslator(t *testing.T) { kubernetesMode: translatorConfig.ModeEKS, want: &want{ receivers: []string{"telegraf_prometheus"}, - processors: []string{"batch/prometheus", "awsentity"}, + processors: []string{"batch/prometheus", "awsentity/service"}, exporters: []string{"awsemf/prometheus"}, extensions: []string{"agenthealth/logs"}, }, @@ -63,7 +63,7 @@ func TestTranslator(t *testing.T) { kubernetesMode: "", want: &want{ receivers: []string{"telegraf_prometheus"}, - processors: []string{"batch/prometheus", "resourcedetection", "awsentity"}, + processors: []string{"batch/prometheus", "resourcedetection"}, exporters: []string{"awsemf/prometheus"}, extensions: []string{"agenthealth/logs"}, }, diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go index 7e275e790b..463144394a 100644 --- a/translator/translate/otel/processor/awsentity/translator.go +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -75,28 +75,22 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { ctx := context.CurrentContext() mode := ctx.KubernetesMode() cfg.KubernetesMode = mode - if mode == "" { - mode = ctx.Mode() - } - if mode == config.ModeEC2 { + + mode = ctx.Mode() + if context.CurrentContext().RunInContainer() { if ecsutil.GetECSUtilSingleton().IsECS() { mode = config.ModeECS } } - switch mode { - case config.ModeEKS: + if cfg.KubernetesMode != "" { cfg.ClusterName = hostedIn - cfg.Platform = config.ModeEKS - case config.ModeK8sEC2: - cfg.ClusterName = hostedIn - cfg.Platform = config.ModeK8sEC2 - case config.ModeK8sOnPrem: - cfg.Platform = config.ModeK8sOnPrem - case config.ModeEC2: - cfg.Platform = config.ModeEC2 - case config.ModeECS: - cfg.Platform = config.ModeECS } + + // We want to keep platform config variable to be + // anything that is non-Kubernetes related so the + // processor can perform different logics for EKS + // in EC2 or Non-EC2 + cfg.Platform = mode return cfg, nil } diff --git a/translator/translate/otel/processor/awsentity/translator_test.go b/translator/translate/otel/processor/awsentity/translator_test.go index 27991b390e..7301557782 100644 --- a/translator/translate/otel/processor/awsentity/translator_test.go +++ b/translator/translate/otel/processor/awsentity/translator_test.go @@ -15,10 +15,11 @@ import ( ) func TestTranslate(t *testing.T) { - context.CurrentContext().SetKubernetesMode(config.ModeEKS) testCases := map[string]struct { - input map[string]interface{} - want *awsentity.Config + input map[string]interface{} + mode string + kubernetesMode string + want *awsentity.Config }{ "OnlyProfile": { input: map[string]interface{}{ @@ -29,15 +30,19 @@ func TestTranslate(t *testing.T) { }, }, }}, + mode: config.ModeEC2, + kubernetesMode: config.ModeEKS, want: &awsentity.Config{ ClusterName: "test", KubernetesMode: config.ModeEKS, - Platform: config.ModeEKS, + Platform: config.ModeEC2, }, }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { + context.CurrentContext().SetMode(testCase.mode) + context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) tt := NewTranslator() assert.Equal(t, "awsentity", tt.ID().String()) conf := confmap.NewFromStringMap(testCase.input) From f16e9eb1561b7a516224353544badaa3d4c5cd0e Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Wed, 2 Oct 2024 16:23:02 -0400 Subject: [PATCH 23/47] Adds Entity on PMD request (#820) --- plugins/outputs/cloudwatch/aggregator.go | 1 + plugins/outputs/cloudwatch/cloudwatch.go | 49 ++++-- plugins/outputs/cloudwatch/cloudwatch_test.go | 33 ++++ plugins/outputs/cloudwatch/convert_otel.go | 62 +++++-- .../outputs/cloudwatch/convert_otel_test.go | 161 +++++++++++++++++- plugins/outputs/cloudwatch/util.go | 72 ++++++++ plugins/outputs/cloudwatch/util_test.go | 41 +++++ .../entityattributes/entityattributes.go | 77 +++++++++ .../entityattributes/entityattributes.go | 34 ---- plugins/processors/awsentity/processor.go | 2 +- .../processors/awsentity/processor_test.go | 2 +- 11 files changed, 471 insertions(+), 63 deletions(-) create mode 100644 plugins/processors/awsentity/entityattributes/entityattributes.go delete mode 100644 plugins/processors/awsentity/internal/entityattributes/entityattributes.go diff --git a/plugins/outputs/cloudwatch/aggregator.go b/plugins/outputs/cloudwatch/aggregator.go index 3295f93d87..8869035664 100644 --- a/plugins/outputs/cloudwatch/aggregator.go +++ b/plugins/outputs/cloudwatch/aggregator.go @@ -28,6 +28,7 @@ type aggregationDatum struct { cloudwatch.MetricDatum aggregationInterval time.Duration distribution distribution.Distribution + entity cloudwatch.Entity } type Aggregator interface { diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index e81605edfe..054c0879b9 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -17,6 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/jellydator/ttlcache/v3" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" @@ -60,18 +61,19 @@ type CloudWatch struct { // todo: may want to increase the size of the chan since the type changed. // 1 telegraf Metric could have many Fields. // Each field corresponds to a MetricDatum. - metricChan chan *aggregationDatum - datumBatchChan chan []*cloudwatch.MetricDatum - metricDatumBatch *MetricDatumBatch - shutdownChan chan struct{} - retries int - publisher *publisher.Publisher - retryer *retryer.LogThrottleRetryer - droppingOriginMetrics collections.Set[string] - aggregator Aggregator - aggregatorShutdownChan chan struct{} - aggregatorWaitGroup sync.WaitGroup - lastRequestBytes int + metricChan chan *aggregationDatum + datumBatchChan chan []*cloudwatch.MetricDatum + metricDatumBatch *MetricDatumBatch + shutdownChan chan struct{} + retries int + publisher *publisher.Publisher + retryer *retryer.LogThrottleRetryer + droppingOriginMetrics collections.Set[string] + aggregator Aggregator + aggregatorShutdownChan chan struct{} + aggregatorWaitGroup sync.WaitGroup + lastRequestBytes int + entityToMetricDatumCache *ttlcache.Cache[string, []*cloudwatch.MetricDatum] } // Compile time interface check. @@ -115,6 +117,7 @@ func (c *CloudWatch) Start(_ context.Context, host component.Host) error { c.config.RollupDimensions = GetUniqueRollupList(c.config.RollupDimensions) c.svc = svc c.retryer = logThrottleRetryer + c.entityToMetricDatumCache = ttlcache.New[string, []*cloudwatch.MetricDatum](ttlcache.WithTTL[string, []*cloudwatch.MetricDatum](5 * time.Minute)) c.startRoutines() return nil } @@ -128,6 +131,7 @@ func (c *CloudWatch) startRoutines() { c.aggregator = NewAggregator(c.metricChan, c.aggregatorShutdownChan, &c.aggregatorWaitGroup) perRequestConstSize := overallConstPerRequestSize + len(c.config.Namespace) + namespaceOverheads c.metricDatumBatch = newMetricDatumBatch(c.config.MaxDatumsPerCall, perRequestConstSize) + go c.entityToMetricDatumCache.Start() go c.pushMetricDatum() go c.publish() } @@ -148,6 +152,7 @@ func (c *CloudWatch) Shutdown(ctx context.Context) error { close(c.shutdownChan) c.publisher.Close() c.retryer.Stop() + c.entityToMetricDatumCache.Stop() log.Println("D! Stopped the CloudWatch output plugin") return nil } @@ -334,11 +339,26 @@ func (c *CloudWatch) backoffSleep() { time.Sleep(d) } +func createEntityMetricData(entityToMetricDatumCache *ttlcache.Cache[string, []*cloudwatch.MetricDatum]) []*cloudwatch.EntityMetricData { + var entityMetricData []*cloudwatch.EntityMetricData + for _, item := range entityToMetricDatumCache.Items() { + entity := stringToEntity(item.Key()) + entityMetricData = append(entityMetricData, &cloudwatch.EntityMetricData{ + Entity: &entity, + MetricData: item.Value(), + }) + } + return entityMetricData +} + func (c *CloudWatch) WriteToCloudWatch(req interface{}) { datums := req.([]*cloudwatch.MetricDatum) + entityMetricData := createEntityMetricData(c.entityToMetricDatumCache) params := &cloudwatch.PutMetricDataInput{ - MetricData: datums, - Namespace: aws.String(c.config.Namespace), + MetricData: datums, + Namespace: aws.String(c.config.Namespace), + EntityMetricData: entityMetricData, + StrictEntityValidation: aws.Bool(false), } var err error for i := 0; i < defaultRetryCount; i++ { @@ -437,6 +457,7 @@ func (c *CloudWatch) BuildMetricDatum(metric *aggregationDatum) []*cloudwatch.Me } } } + c.entityToMetricDatumCache.Set(entityToString(metric.entity), datums, ttlcache.DefaultTTL) return datums } diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 2c3ea73a22..7181f7ace3 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -19,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/jellydator/ttlcache/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -386,6 +387,7 @@ func newCloudWatchClient( MaxDatumsPerCall: defaultMaxDatumsPerCall, MaxValuesPerDatum: defaultMaxValuesPerDatum, }, + entityToMetricDatumCache: ttlcache.New[string, []*cloudwatch.MetricDatum](), } cloudwatch.startRoutines() return cloudwatch @@ -495,6 +497,7 @@ func TestPublish(t *testing.T) { // 10K metrics in batches of 20... time.Sleep(interval) assert.Equal(t, expectedCalls, len(svc.Calls)) + assert.Equal(t, 0, metrics.ResourceMetrics().At(0).Resource().Attributes().Len()) cw.Shutdown(ctx) } @@ -582,3 +585,33 @@ func TestCloudWatch_metricDatumBatchFull(t *testing.T) { <-c.datumBatchChan assert.False(t, c.metricDatumBatchFull()) } + +func TestCreateEntityMetricData(t *testing.T) { + svc := new(mockCloudWatchClient) + cw := newCloudWatchClient(svc, time.Second) + entity := cloudwatch.Entity{ + KeyAttributes: map[string]*string{ + "Type": aws.String("Service"), + "Environment": aws.String("Environment"), + "Name": aws.String("MyServiceName"), + }, + Attributes: map[string]*string{ + "InstanceID": aws.String("i-123456789"), + "Platform": aws.String("AWS::EC2"), + }, + } + entityToAttributesMap := ttlcache.New[string, []*cloudwatch.MetricDatum](ttlcache.WithTTL[string, []*cloudwatch.MetricDatum](5 * time.Minute)) + metrics := createTestMetrics(1, 1, 1, "s") + assert.Equal(t, 6, metrics.ResourceMetrics().At(0).Resource().Attributes().Len()) + aggregations := ConvertOtelMetrics(metrics) + assert.Equal(t, 0, metrics.ResourceMetrics().At(0).Resource().Attributes().Len()) + metricDatum := cw.BuildMetricDatum(aggregations[0]) + entityToAttributesMap.Set(entityToString(entity), metricDatum, ttlcache.DefaultTTL) + wantedEntityMetricData := []*cloudwatch.EntityMetricData{ + { + Entity: &entity, + MetricData: metricDatum, + }, + } + assert.Equal(t, wantedEntityMetricData, createEntityMetricData(entityToAttributesMap)) +} diff --git a/plugins/outputs/cloudwatch/convert_otel.go b/plugins/outputs/cloudwatch/convert_otel.go index 6df75a36bc..a9f85d13d4 100644 --- a/plugins/outputs/cloudwatch/convert_otel.go +++ b/plugins/outputs/cloudwatch/convert_otel.go @@ -14,6 +14,7 @@ import ( cloudwatchutil "github.com/aws/amazon-cloudwatch-agent/internal/cloudwatch" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" ) @@ -22,12 +23,22 @@ func ConvertOtelDimensions(attributes pcommon.Map) []*cloudwatch.Dimension { // Loop through map, similar to EMF exporter createLabels(). mTags := make(map[string]string, attributes.Len()) attributes.Range(func(k string, v pcommon.Value) bool { + // we don't want to export entity related attributes as dimensions, so we skip these + if isEntityAttribute(k) { + return true + } mTags[k] = v.AsString() return true }) return BuildDimensions(mTags) } +func isEntityAttribute(k string) bool { + _, ok := entityattributes.KeyAttributeEntityToShortNameMap[k] + _, ok2 := entityattributes.AttributeEntityToShortNameMap[k] + return ok || ok2 +} + // NumberDataPointValue converts to float64 since that is what AWS SDK will use. func NumberDataPointValue(dp pmetric.NumberDataPoint) float64 { switch dp.ValueType() { @@ -76,6 +87,7 @@ func ConvertOtelNumberDataPoints( name string, unit string, scale float64, + entity cloudwatch.Entity, ) []*aggregationDatum { // Could make() with attrs.Len() * len(c.RollupDimensions). datums := make([]*aggregationDatum, 0, dataPoints.Len()) @@ -96,6 +108,7 @@ func ConvertOtelNumberDataPoints( StorageResolution: aws.Int64(storageResolution), }, aggregationInterval: aggregationInterval, + entity: entity, } datums = append(datums, &ad) } @@ -109,6 +122,7 @@ func ConvertOtelHistogramDataPoints( name string, unit string, scale float64, + entity cloudwatch.Entity, ) []*aggregationDatum { datums := make([]*aggregationDatum, 0, dataPoints.Len()) for i := 0; i < dataPoints.Len(); i++ { @@ -126,6 +140,7 @@ func ConvertOtelHistogramDataPoints( StorageResolution: aws.Int64(storageResolution), }, aggregationInterval: aggregationInterval, + entity: entity, } // Assume function pointer is valid. ad.distribution = distribution.NewDistribution() @@ -139,7 +154,7 @@ func ConvertOtelHistogramDataPoints( // metric and returns it. Only supports the metric DataTypes that we plan to use. // Intentionally not caching previous values and converting cumulative to delta. // Instead use cumulativetodeltaprocessor which supports monotonic cumulative sums. -func ConvertOtelMetric(m pmetric.Metric) []*aggregationDatum { +func ConvertOtelMetric(m pmetric.Metric, entity cloudwatch.Entity) []*aggregationDatum { name := m.Name() unit, scale, err := cloudwatchutil.ToStandardUnit(m.Unit()) if err != nil { @@ -147,34 +162,57 @@ func ConvertOtelMetric(m pmetric.Metric) []*aggregationDatum { } switch m.Type() { case pmetric.MetricTypeGauge: - return ConvertOtelNumberDataPoints(m.Gauge().DataPoints(), name, unit, scale) + return ConvertOtelNumberDataPoints(m.Gauge().DataPoints(), name, unit, scale, entity) case pmetric.MetricTypeSum: - return ConvertOtelNumberDataPoints(m.Sum().DataPoints(), name, unit, scale) + return ConvertOtelNumberDataPoints(m.Sum().DataPoints(), name, unit, scale, entity) case pmetric.MetricTypeHistogram: - return ConvertOtelHistogramDataPoints(m.Histogram().DataPoints(), name, unit, scale) + return ConvertOtelHistogramDataPoints(m.Histogram().DataPoints(), name, unit, scale, entity) default: log.Printf("E! cloudwatch: Unsupported type, %s", m.Type()) } return []*aggregationDatum{} } -// ConvertOtelMetrics only uses dimensions/attributes on each "datapoint", -// not each "Resource". -// This is acceptable because ResourceToTelemetrySettings defaults to true. func ConvertOtelMetrics(m pmetric.Metrics) []*aggregationDatum { datums := make([]*aggregationDatum, 0, m.DataPointCount()) - // Metrics -> ResourceMetrics -> ScopeMetrics -> MetricSlice -> DataPoints - resourceMetrics := m.ResourceMetrics() - for i := 0; i < resourceMetrics.Len(); i++ { - scopeMetrics := resourceMetrics.At(i).ScopeMetrics() + for i := 0; i < m.ResourceMetrics().Len(); i++ { + entity := fetchEntityFields(m.ResourceMetrics().At(i).Resource().Attributes()) + scopeMetrics := m.ResourceMetrics().At(i).ScopeMetrics() for j := 0; j < scopeMetrics.Len(); j++ { metrics := scopeMetrics.At(j).Metrics() for k := 0; k < metrics.Len(); k++ { metric := metrics.At(k) - newDatums := ConvertOtelMetric(metric) + newDatums := ConvertOtelMetric(metric, entity) datums = append(datums, newDatums...) + } } } return datums } + +func fetchEntityFields(resourceAttributes pcommon.Map) cloudwatch.Entity { + keyAttributesMap := map[string]*string{} + attributeMap := map[string]*string{} + + processEntityAttributes(entityattributes.KeyAttributeEntityToShortNameMap, keyAttributesMap, resourceAttributes) + processEntityAttributes(entityattributes.AttributeEntityToShortNameMap, attributeMap, resourceAttributes) + + return cloudwatch.Entity{ + KeyAttributes: keyAttributesMap, + Attributes: attributeMap, + } +} + +// processEntityAttributes fetches the aws.entity fields and creates an entity to be sent at the PutMetricData call. It also +// removes the entity attributes so that it is not tagged as a dimension, and reduces the size of the PMD payload. +func processEntityAttributes(entityMap map[string]string, targetMap map[string]*string, mutableResourceAttributes pcommon.Map) { + for entityField, shortName := range entityMap { + if val, ok := mutableResourceAttributes.Get(entityField); ok { + if strVal := val.Str(); strVal != "" { + targetMap[shortName] = aws.String(strVal) + } + mutableResourceAttributes.Remove(entityField) + } + } +} diff --git a/plugins/outputs/cloudwatch/convert_otel_test.go b/plugins/outputs/cloudwatch/convert_otel_test.go index 672e89c72f..f85d3c3a2a 100644 --- a/plugins/outputs/cloudwatch/convert_otel_test.go +++ b/plugins/outputs/cloudwatch/convert_otel_test.go @@ -9,12 +9,15 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" "github.com/aws/amazon-cloudwatch-agent/metric/distribution/regular" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" + "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" ) const ( @@ -51,6 +54,13 @@ func createTestMetrics( ) pmetric.Metrics { metrics := pmetric.NewMetrics() rm := metrics.ResourceMetrics().AppendEmpty() + rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityType, "Service") + rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityDeploymentEnvironment, "MyEnvironment") + rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityServiceName, "MyServiceName") + rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityInstanceID, "i-123456789") + rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityAutoScalingGroup, "asg-123") + rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "AWS::EC2") + sm := rm.ScopeMetrics().AppendEmpty() for i := 0; i < numMetrics; i++ { @@ -210,9 +220,158 @@ func TestConvertOtelMetrics_Dimensions(t *testing.T) { } } +func TestConvertOtelMetrics_Entity(t *testing.T) { + metrics := createTestMetrics(1, 1, 1, "s") + datums := ConvertOtelMetrics(metrics) + expectedEntity := cloudwatch.Entity{ + KeyAttributes: map[string]*string{ + "Type": aws.String("Service"), + "Environment": aws.String("MyEnvironment"), + "Name": aws.String("MyServiceName"), + }, + Attributes: map[string]*string{ + "InstanceID": aws.String("i-123456789"), + "Platform": aws.String("AWS::EC2"), + "AutoScalingGroup": aws.String("asg-123"), + }, + } + assert.Equal(t, 1, len(datums)) + assert.Equal(t, expectedEntity, datums[0].entity) + +} + +func TestProcessEntityAttributes(t *testing.T) { + testCases := []struct { + name string + entityMap []map[string]string + resourceAttributes map[string]any + wantedAttributes map[string]*string + leftoverAttributes map[string]any + }{ + { + name: "key_attributes", + entityMap: []map[string]string{entityattributes.KeyAttributeEntityToShortNameMap}, + resourceAttributes: map[string]any{ + entityattributes.AttributeEntityServiceName: "my-service", + entityattributes.AttributeEntityDeploymentEnvironment: "my-environment", + }, + wantedAttributes: map[string]*string{ + entityattributes.ServiceName: aws.String("my-service"), + entityattributes.DeploymentEnvironment: aws.String("my-environment"), + }, + leftoverAttributes: make(map[string]any), + }, + { + name: "non-key_attributes", + entityMap: []map[string]string{entityattributes.AttributeEntityToShortNameMap}, + resourceAttributes: map[string]any{ + entityattributes.AttributeEntityCluster: "my-cluster", + entityattributes.AttributeEntityNamespace: "my-namespace", + entityattributes.AttributeEntityNode: "my-node", + entityattributes.AttributeEntityWorkload: "my-workload", + }, + wantedAttributes: map[string]*string{ + entityattributes.Cluster: aws.String("my-cluster"), + entityattributes.Namespace: aws.String("my-namespace"), + entityattributes.Node: aws.String("my-node"), + entityattributes.Workload: aws.String("my-workload"), + }, + leftoverAttributes: make(map[string]any), + }, + { + name: "key_and_non_key_attributes", + entityMap: []map[string]string{entityattributes.KeyAttributeEntityToShortNameMap, entityattributes.AttributeEntityToShortNameMap}, + resourceAttributes: map[string]any{ + entityattributes.AttributeEntityServiceName: "my-service", + entityattributes.AttributeEntityDeploymentEnvironment: "my-environment", + entityattributes.AttributeEntityCluster: "my-cluster", + entityattributes.AttributeEntityNamespace: "my-namespace", + entityattributes.AttributeEntityNode: "my-node", + entityattributes.AttributeEntityWorkload: "my-workload", + }, + wantedAttributes: map[string]*string{ + entityattributes.ServiceName: aws.String("my-service"), + entityattributes.DeploymentEnvironment: aws.String("my-environment"), + entityattributes.Cluster: aws.String("my-cluster"), + entityattributes.Namespace: aws.String("my-namespace"), + entityattributes.Node: aws.String("my-node"), + entityattributes.Workload: aws.String("my-workload"), + }, + leftoverAttributes: make(map[string]any), + }, + { + name: "key_and_non_key_attributes_plus_extras", + entityMap: []map[string]string{entityattributes.KeyAttributeEntityToShortNameMap, entityattributes.AttributeEntityToShortNameMap}, + resourceAttributes: map[string]any{ + "extra_attribute": "extra_value", + entityattributes.AttributeEntityServiceName: "my-service", + entityattributes.AttributeEntityDeploymentEnvironment: "my-environment", + entityattributes.AttributeEntityCluster: "my-cluster", + entityattributes.AttributeEntityNamespace: "my-namespace", + entityattributes.AttributeEntityNode: "my-node", + entityattributes.AttributeEntityWorkload: "my-workload", + }, + wantedAttributes: map[string]*string{ + entityattributes.ServiceName: aws.String("my-service"), + entityattributes.DeploymentEnvironment: aws.String("my-environment"), + entityattributes.Cluster: aws.String("my-cluster"), + entityattributes.Namespace: aws.String("my-namespace"), + entityattributes.Node: aws.String("my-node"), + entityattributes.Workload: aws.String("my-workload"), + }, + leftoverAttributes: map[string]any{ + "extra_attribute": "extra_value", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + attrs := pcommon.NewMap() + err := attrs.FromRaw(tc.resourceAttributes) + assert.Nil(t, err) + targetMap := make(map[string]*string) + for _, entityMap := range tc.entityMap { + processEntityAttributes(entityMap, targetMap, attrs) + } + assert.Equal(t, tc.leftoverAttributes, attrs.AsRaw()) + assert.Equal(t, tc.wantedAttributes, targetMap) + }) + } +} + +func TestFetchEntityFields(t *testing.T) { + resourceMetrics := pmetric.NewResourceMetrics() + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityType, "Service") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityDeploymentEnvironment, "my-environment") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityServiceName, "my-service") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNode, "my-node") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityCluster, "my-cluster") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNamespace, "my-namespace") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityWorkload, "my-workload") + assert.Equal(t, 7, resourceMetrics.Resource().Attributes().Len()) + + expectedEntity := cloudwatch.Entity{KeyAttributes: map[string]*string{ + entityattributes.EntityType: aws.String(entityattributes.Service), + entityattributes.ServiceName: aws.String("my-service"), + entityattributes.DeploymentEnvironment: aws.String("my-environment"), + }, + Attributes: map[string]*string{ + entityattributes.Node: aws.String("my-node"), + entityattributes.Cluster: aws.String("my-cluster"), + entityattributes.Namespace: aws.String("my-namespace"), + entityattributes.Workload: aws.String("my-workload"), + }, + } + entity := fetchEntityFields(resourceMetrics.Resource().Attributes()) + assert.Equal(t, 0, resourceMetrics.Resource().Attributes().Len()) + assert.Equal(t, expectedEntity, entity) + +} + func TestInvalidMetric(t *testing.T) { m := pmetric.NewMetric() m.SetName("name") m.SetUnit("unit") - assert.Empty(t, ConvertOtelMetric(m)) + assert.Empty(t, ConvertOtelMetric(m, cloudwatch.Entity{})) } diff --git a/plugins/outputs/cloudwatch/util.go b/plugins/outputs/cloudwatch/util.go index 805c22bb82..939428a76c 100644 --- a/plugins/outputs/cloudwatch/util.go +++ b/plugins/outputs/cloudwatch/util.go @@ -4,9 +4,11 @@ package cloudwatch import ( + "fmt" "log" "math/rand" "sort" + "strings" "time" "github.com/aws/amazon-cloudwatch-agent/metric/distribution" @@ -120,3 +122,73 @@ func payload(datum *cloudwatch.MetricDatum) (size int) { return } + +func entityToString(entity cloudwatch.Entity) string { + var attributes, keyAttributes string + if entity.Attributes != nil { + attributes = entityAttributesToString(entity.Attributes) + } + if entity.KeyAttributes != nil { + keyAttributes = entityAttributesToString(entity.KeyAttributes) + } + + data := fmt.Sprintf( + "%s|%s", + attributes, + keyAttributes, + ) + return data +} + +// Helper function to convert a map of entityAttributes to a consistent string representation +func entityAttributesToString(m map[string]*string) string { + if m == nil { + return "" + } + pairs := make([]string, 0, len(m)) + for k, v := range m { + if v == nil { + pairs = append(pairs, k+":") + } else { + pairs = append(pairs, k+":"+*v) + } + } + sort.Strings(pairs) // Ensure a consistent order + return strings.Join(pairs, ";") +} + +func stringToEntity(data string) cloudwatch.Entity { + parts := strings.Split(data, "|") + if len(parts) < 2 { + // Handle error: invalid input string + return cloudwatch.Entity{} + } + + entity := cloudwatch.Entity{ + Attributes: make(map[string]*string), + KeyAttributes: make(map[string]*string), + } + + if parts[0] != "" { + entity.Attributes = stringToEntityAttributes(parts[0]) + } + + if parts[1] != "" { + entity.KeyAttributes = stringToEntityAttributes(parts[1]) + } + + return entity +} + +func stringToEntityAttributes(s string) map[string]*string { + result := make(map[string]*string) + pairs := strings.Split(s, ";") + for _, pair := range pairs { + kv := strings.SplitN(pair, ":", 2) + if len(kv) == 2 { + value := kv[1] + result[kv[0]] = &value + } + } + return result +} diff --git a/plugins/outputs/cloudwatch/util_test.go b/plugins/outputs/cloudwatch/util_test.go index 36acc387cd..aca977b9f2 100644 --- a/plugins/outputs/cloudwatch/util_test.go +++ b/plugins/outputs/cloudwatch/util_test.go @@ -141,3 +141,44 @@ func TestPayload_Min(t *testing.T) { datum.SetTimestamp(time.Now()) assert.Equal(t, 148, payload(datum)) } + +func TestEntityToString_StringToEntity(t *testing.T) { + testCases := []struct { + name string + entity cloudwatch.Entity + entityString string + }{ + { + name: "Full Entity", + entity: cloudwatch.Entity{ + KeyAttributes: map[string]*string{ + "Service": aws.String("Service"), + "Environment": aws.String("Environment"), + }, + Attributes: map[string]*string{ + "InstanceId": aws.String("InstanceId"), + "InstanceType": aws.String("InstanceType"), + }, + }, + entityString: "InstanceId:InstanceId;InstanceType:InstanceType|Environment:Environment;Service:Service", + }, + { + name: "Empty Attributes", + entity: cloudwatch.Entity{ + KeyAttributes: map[string]*string{ + "Service": aws.String("Service"), + "Environment": aws.String("Environment"), + }, + Attributes: map[string]*string{}, + }, + entityString: "|Environment:Environment;Service:Service", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.entityString, entityToString(tc.entity)) + assert.Equal(t, tc.entity, stringToEntity(tc.entityString)) + }) + } +} diff --git a/plugins/processors/awsentity/entityattributes/entityattributes.go b/plugins/processors/awsentity/entityattributes/entityattributes.go new file mode 100644 index 0000000000..c79dfb902e --- /dev/null +++ b/plugins/processors/awsentity/entityattributes/entityattributes.go @@ -0,0 +1,77 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package entityattributes + +const ( + + // The following are the possible values for EntityType config options + Resource = "Resource" + Service = "Service" + + // The following are entity related attributes + AWSEntityPrefix = "com.amazonaws.cloudwatch.entity.internal." + AttributeEntityType = AWSEntityPrefix + "type" + AttributeEntityAWSResource = "AWS::Resource" + AttributeEntityResourceType = AWSEntityPrefix + "resource.type" + AttributeEntityEC2InstanceResource = "AWS::EC2::Instance" + AttributeEntityIdentifier = AWSEntityPrefix + "identifier" + AttributeEntityServiceName = AWSEntityPrefix + "service.name" + AttributeEntityDeploymentEnvironment = AWSEntityPrefix + "deployment.environment" + AttributeEntityCluster = AWSEntityPrefix + "k8s.cluster.name" + AttributeEntityNamespace = AWSEntityPrefix + "k8s.namespace.name" + AttributeEntityWorkload = AWSEntityPrefix + "k8s.workload.name" + AttributeEntityNode = AWSEntityPrefix + "k8s.node.name" + AttributeEntityServiceNameSource = AWSEntityPrefix + "service.name.source" + AttributeEntityPlatformType = AWSEntityPrefix + "platform.type" + AttributeEntityInstanceID = AWSEntityPrefix + "instance.id" + AttributeEntityAutoScalingGroup = AWSEntityPrefix + "auto.scaling.group" + + // The following are possible platform values + AttributeEntityEC2Platform = "AWS::EC2" + AttributeEntityEKSPlatform = "AWS::EKS" + AttributeEntityK8sPlatform = "K8s" + + // The following Fields are the actual names attached to the Entity requests. + ServiceName = "Name" + DeploymentEnvironment = "Environment" + EntityType = "Type" + ResourceType = "ResourceType" + Identifier = "Identifier" + Cluster = "Cluster" + Workload = "Workload" + Node = "Node" + ServiceNameSource = "Source" + Platform = "Platform" + InstanceID = "InstanceID" + AutoscalingGroup = "AutoScalingGroup" +) + +// KeyAttributeEntityToShortNameMap is used to map key attributes from otel to the actual values used in the Entity object +var KeyAttributeEntityToShortNameMap = map[string]string{ + AttributeEntityType: EntityType, + AttributeEntityResourceType: ResourceType, + AttributeEntityIdentifier: Identifier, + AttributeEntityServiceName: ServiceName, + AttributeEntityDeploymentEnvironment: DeploymentEnvironment, +} + +// AttributeEntityToShortNameMap is used to map attributes from otel to the actual values used in the Entity object +var AttributeEntityToShortNameMap = map[string]string{ + AttributeEntityCluster: Cluster, + AttributeEntityNamespace: Namespace, + AttributeEntityWorkload: Workload, + AttributeEntityNode: Node, + AttributeEntityPlatformType: Platform, + AttributeEntityInstanceID: InstanceID, + AttributeEntityAutoScalingGroup: AutoscalingGroup, + AttributeEntityServiceNameSource: ServiceNameSource, +} + +// Container Insights attributes used for scraping EKS related information +const ( + NodeName = "NodeName" + Namespace = "Namespace" + // PodName in Container Insights is the workload(Deployment, Daemonset, etc) name + PodName = "PodName" +) diff --git a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go b/plugins/processors/awsentity/internal/entityattributes/entityattributes.go deleted file mode 100644 index bfe39863b1..0000000000 --- a/plugins/processors/awsentity/internal/entityattributes/entityattributes.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package entityattributes - -const ( - // The following are the possible values for EntityType config options - Resource = "Resource" - Service = "Service" - - // The following are entity related attributes - AWSEntityPrefix = "com.amazonaws.cloudwatch.entity.internal." - AttributeEntityType = AWSEntityPrefix + "type" - AttributeEntityAWSResource = "AWS::Resource" - AttributeEntityResourceType = AWSEntityPrefix + "resource.type" - AttributeEntityEC2InstanceResource = "AWS::EC2::Instance" - AttributeEntityIdentifier = AWSEntityPrefix + "identifier" - - AttributeEntityServiceName = AWSEntityPrefix + "service.name" - AttributeEntityDeploymentEnvironment = AWSEntityPrefix + "deployment.environment" - AttributeEntityCluster = AWSEntityPrefix + "k8s.cluster.name" - AttributeEntityNamespace = AWSEntityPrefix + "k8s.namespace.name" - AttributeEntityWorkload = AWSEntityPrefix + "k8s.workload.name" - AttributeEntityNode = AWSEntityPrefix + "k8s.node.name" - AttributeEntityServiceNameSource = AWSEntityPrefix + "service.name.source" - AttributeEntityPlatformType = AWSEntityPrefix + "platform.type" - AttributeEntityInstanceID = AWSEntityPrefix + "instance.id" - AttributeEntityAutoScalingGroup = AWSEntityPrefix + "auto.scaling.group" - - // The following are possible platform values - AttributeEntityEC2Platform = "AWS::EC2" - AttributeEntityEKSPlatform = "AWS::EKS" - AttributeEntityK8sPlatform = "K8s" -) diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 5484f54297..4574fd70fa 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -14,7 +14,7 @@ import ( "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/k8sattributescraper" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 5ccabaf6aa..9882f21ae7 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -13,7 +13,7 @@ import ( "go.uber.org/zap" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" - "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/internal/entityattributes" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) From 40655bbbfc099ba20ef741b5a4cd2083fd70472f Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Wed, 2 Oct 2024 18:33:26 -0400 Subject: [PATCH 24/47] Add fallback logic to use workload as service name (#829) --- .../k8sattributescraper.go | 10 ++++- plugins/processors/awsentity/processor.go | 22 ++++++---- .../processors/awsentity/processor_test.go | 40 ++++++++++++++++--- 3 files changed, 57 insertions(+), 15 deletions(-) diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go index 580f18e2d4..6301166f1a 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go @@ -15,11 +15,17 @@ var ( semconv.AttributeK8SNamespaceName, } + // these kubernetes resource attributes are set by the openTelemtry operator + // see the code referecnes from upstream: + // * https://github.com/open-telemetry/opentelemetry-operator/blame/main/pkg/instrumentation/sdk.go#L421 workloadAllowlist = []string{ semconv.AttributeK8SDeploymentName, - semconv.AttributeK8SDaemonSetName, - semconv.AttributeK8SStatefulSetName, semconv.AttributeK8SReplicaSetName, + semconv.AttributeK8SStatefulSetName, + semconv.AttributeK8SDaemonSetName, + semconv.AttributeK8SCronJobName, + semconv.AttributeK8SJobName, + semconv.AttributeK8SPodName, semconv.AttributeK8SContainerName, } nodeAllowlist = []string{ diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 4574fd70fa..d92a29bed0 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -157,21 +157,29 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric entityPlatformType = entityattributes.AttributeEntityK8sPlatform } - fallbackEnvironment := entityEnvironmentName podInfo, ok := p.k8sscraper.(*k8sattributescraper.K8sAttributeScraper) - if fallbackEnvironment == EMPTY && p.config.KubernetesMode == config.ModeEKS && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { - fallbackEnvironment = "eks:" + p.config.ClusterName + "/" + podInfo.Namespace - } else if fallbackEnvironment == EMPTY && (p.config.KubernetesMode == config.ModeK8sEC2 || p.config.KubernetesMode == config.ModeK8sOnPrem) && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { - fallbackEnvironment = "k8s:" + p.config.ClusterName + "/" + podInfo.Namespace + // Perform fallback mechanism for service and environment name if they + // are empty + if entityServiceName == EMPTY && podInfo.Workload != EMPTY { + entityServiceName = podInfo.Workload + entityServiceNameSource = entitystore.ServiceNameSourceK8sWorkload + } + + if entityEnvironmentName == EMPTY && ok && podInfo.Cluster != EMPTY && podInfo.Namespace != EMPTY { + if p.config.KubernetesMode == config.ModeEKS { + entityEnvironmentName = "eks:" + p.config.ClusterName + "/" + podInfo.Namespace + } else if p.config.KubernetesMode == config.ModeK8sEC2 || p.config.KubernetesMode == config.ModeK8sOnPrem { + entityEnvironmentName = "k8s:" + p.config.ClusterName + "/" + podInfo.Namespace + } } // Add service information for a pod to the pod association map // so that agent can host this information in a server fullPodName := scrapeK8sPodName(resourceAttrs) if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource != EMPTY { - addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entityServiceNameSource) + addPodToServiceEnvironmentMap(fullPodName, entityServiceName, entityEnvironmentName, entityServiceNameSource) } else if fullPodName != EMPTY && entityServiceName != EMPTY && entityServiceNameSource == EMPTY { - addPodToServiceEnvironmentMap(fullPodName, entityServiceName, fallbackEnvironment, entitystore.ServiceNameSourceUnknown) + addPodToServiceEnvironmentMap(fullPodName, entityServiceName, entityEnvironmentName, entitystore.ServiceNameSourceUnknown) } eksAttributes := K8sServiceAttributes{ Cluster: podInfo.Cluster, diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 9882f21ae7..3a99ac1da0 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -234,7 +234,7 @@ func TestProcessMetricsForAddingPodToServiceMap(t *testing.T) { name: "WithPodNameAndEmptyServiceAndEnvironmentName", metrics: generateMetrics(semconv.AttributeK8SPodName, "cloudwatch-agent-adhgaf"), k8sMode: config.ModeEKS, - want: map[string]entitystore.ServiceEnvironment{}, + want: map[string]entitystore.ServiceEnvironment{"cloudwatch-agent-adhgaf": {ServiceName: "cloudwatch-agent-adhgaf", ServiceNameSource: entitystore.ServiceNameSourceK8sWorkload}}, }, { name: "WithEmptyPodName", @@ -266,19 +266,24 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { ctx := context.Background() tests := []struct { name string + platform string + kubernetesMode string + clusterName string metrics pmetric.Metrics mockServiceNameSource func() (string, string) mockGetEC2InfoFromEntityStore func() entitystore.EC2Info want map[string]any }{ { - name: "EmptyMetrics", - metrics: pmetric.NewMetrics(), - want: map[string]any{}, + name: "EmptyMetrics", + platform: config.ModeEC2, + metrics: pmetric.NewMetrics(), + want: map[string]any{}, }, //NOTE 2 SELF: These tests assume that we are on the EC2 platform, so make sure to mock the ServiceNameSource function { name: "ResourceAttributeServiceNameOnly", + platform: config.ModeEC2, metrics: generateMetrics(attributeServiceName, "test-service"), mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), @@ -293,6 +298,7 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { }, { name: "ResourceAttributeEnvironmentOnly", + platform: config.ModeEC2, metrics: generateMetrics(attributeDeploymentEnvironment, "test-environment"), mockServiceNameSource: newMockGetServiceNameAndSource("unknown_service", "Unknown"), mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), @@ -309,6 +315,7 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { }, { name: "ResourceAttributeServiceNameAndEnvironment", + platform: config.ModeEC2, metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "test-auto-scaling"), @@ -324,6 +331,26 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { entityattributes.AttributeEntityAutoScalingGroup: "test-auto-scaling", }, }, + { + name: "ResourceAttributeWorkloadFallback", + kubernetesMode: config.ModeEKS, + clusterName: "test-cluster", + metrics: generateMetrics(semconv.AttributeK8SNamespaceName, "test-namespace", semconv.AttributeK8SDeploymentName, "test-workload", semconv.AttributeK8SNodeName, "test-node"), + want: map[string]any{ + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "test-workload", + entityattributes.AttributeEntityDeploymentEnvironment: "eks:test-cluster/test-namespace", + entityattributes.AttributeEntityCluster: "test-cluster", + entityattributes.AttributeEntityNamespace: "test-namespace", + entityattributes.AttributeEntityNode: "test-node", + entityattributes.AttributeEntityWorkload: "test-workload", + entityattributes.AttributeEntityServiceNameSource: "K8sWorkload", + entityattributes.AttributeEntityPlatformType: "AWS::EKS", + semconv.AttributeK8SNamespaceName: "test-namespace", + semconv.AttributeK8SDeploymentName: "test-workload", + semconv.AttributeK8SNodeName: "test-node", + }, + }, } for _, tt := range tests { @@ -336,8 +363,9 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { if tt.mockGetEC2InfoFromEntityStore != nil { getEC2InfoFromEntityStore = tt.mockGetEC2InfoFromEntityStore } - p := newAwsEntityProcessor(&Config{EntityType: attributeService}, logger) - p.config.Platform = config.ModeEC2 + p := newAwsEntityProcessor(&Config{EntityType: attributeService, ClusterName: tt.clusterName}, logger) + p.config.Platform = tt.platform + p.config.KubernetesMode = tt.kubernetesMode _, err := p.processMetrics(ctx, tt.metrics) assert.NoError(t, err) rm := tt.metrics.ResourceMetrics() From 1bc8e206e2ef180e323deaf14d28bd3b925b6800 Mon Sep 17 00:00:00 2001 From: POOJA REDDY NATHALA Date: Wed, 2 Oct 2024 21:24:38 -0400 Subject: [PATCH 25/47] Implemented cert watcher for agent server tls certs (#821) --- extension/server/config.go | 3 + extension/server/extension.go | 77 ++++-- extension/server/extension_test.go | 106 +++++--- extension/server/factory_test.go | 4 +- extension/server/testdata/bad-CA-cert.pem | 3 + extension/server/testdata/example-CA-cert.pem | 20 ++ .../server/testdata/example-server-cert.pem | 20 ++ .../server/testdata/example-server-key.pem | 27 ++ internal/tls/certWatcher.go | 194 ++++++++++++++ internal/tls/certWatcher_test.go | 239 ++++++++++++++++++ internal/tls/testdata/bad-CA-cert.pem | 3 + .../appsignals_and_eks_config.yaml | 3 + .../appsignals_and_k8s_config.yaml | 3 + .../appsignals_fallback_and_eks_config.yaml | 3 + .../appsignals_over_fallback_config.yaml | 3 + .../otel/extension/server/translator.go | 10 +- .../otel/extension/server/translator_test.go | 2 +- 17 files changed, 663 insertions(+), 57 deletions(-) create mode 100644 extension/server/testdata/bad-CA-cert.pem create mode 100644 extension/server/testdata/example-CA-cert.pem create mode 100644 extension/server/testdata/example-server-cert.pem create mode 100644 extension/server/testdata/example-server-key.pem create mode 100644 internal/tls/certWatcher.go create mode 100644 internal/tls/certWatcher_test.go create mode 100644 internal/tls/testdata/bad-CA-cert.pem diff --git a/extension/server/config.go b/extension/server/config.go index e2aad3043c..a6d145d283 100644 --- a/extension/server/config.go +++ b/extension/server/config.go @@ -9,6 +9,9 @@ import ( type Config struct { ListenAddress string `mapstructure:"listen_addr"` + TLSCAPath string `mapstructure:"tls_ca_path, omitempty"` + TLSCertPath string `mapstructure:"tls_cert_path, omitempty"` + TLSKeyPath string `mapstructure:"tls_key_path, omitempty"` } var _ component.Config = (*Config)(nil) diff --git a/extension/server/extension.go b/extension/server/extension.go index 351439075b..4a34b6602f 100644 --- a/extension/server/extension.go +++ b/extension/server/extension.go @@ -20,17 +20,13 @@ import ( tlsInternal "github.com/aws/amazon-cloudwatch-agent/internal/tls" ) -const ( - tlsServerCertFilePath = "/etc/amazon-cloudwatch-observability-agent-server-cert/server.crt" - tlsServerKeyFilePath = "/etc/amazon-cloudwatch-observability-agent-server-cert/server.key" - caFilePath = "/etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt" -) - type Server struct { logger *zap.Logger config *Config jsonMarshaller jsoniter.API httpsServer *http.Server + ctx context.Context + watcher *tlsInternal.CertWatcher } var _ extension.Extension = (*Server)(nil) @@ -48,35 +44,45 @@ func NewServer(logger *zap.Logger, config *Config) *Server { logger: logger, config: config, jsonMarshaller: jsoniter.ConfigCompatibleWithStandardLibrary, + ctx: context.Background(), } gin.SetMode(gin.ReleaseMode) - tlsConfig, err := getTlsConfig() - if tlsConfig == nil { - s.logger.Error("failed to create TLS config", zap.Error(err)) + // Initialize a new cert watcher with cert/key pair + watcher, err := tlsInternal.NewCertWatcher(config.TLSCertPath, config.TLSKeyPath, config.TLSCAPath, logger) + if err != nil { + s.logger.Error("failed to initialize cert watcher", zap.Error(err)) return s } + s.watcher = watcher + + watcher.RegisterCallback(func() { + s.logger.Debug("Calling registered callback, reloading TLS server") + if err := s.reloadServer(watcher.GetTLSConfig()); err != nil { + s.logger.Error("Failed to reload TLS server", zap.Error(err)) + } + }) + + // Start goroutine with certwatcher running fsnotify against supplied certdir + go func() { + if err := watcher.Start(s.ctx); err != nil { + s.logger.Error("failed to start cert watcher", zap.Error(err)) + return + } + }() + httpsRouter := gin.New() s.setRouter(httpsRouter) - s.httpsServer = &http.Server{Addr: config.ListenAddress, Handler: httpsRouter, ReadHeaderTimeout: 90 * time.Second, TLSConfig: tlsConfig} + s.httpsServer = &http.Server{Addr: config.ListenAddress, Handler: httpsRouter, ReadHeaderTimeout: 90 * time.Second, TLSConfig: watcher.GetTLSConfig()} return s } -var getTlsConfig = func() (*tls.Config, error) { - serverConfig := &tlsInternal.ServerConfig{ - TLSCert: tlsServerCertFilePath, - TLSKey: tlsServerKeyFilePath, - TLSAllowedCACerts: []string{caFilePath}, - } - return serverConfig.TLSConfig() -} - func (s *Server) Start(context.Context, component.Host) error { if s.httpsServer != nil { - s.logger.Info("Starting HTTPS server...") + s.logger.Debug("Starting HTTPS server...") go func() { err := s.httpsServer.ListenAndServeTLS("", "") if err != nil { @@ -88,13 +94,42 @@ func (s *Server) Start(context.Context, component.Host) error { } func (s *Server) Shutdown(ctx context.Context) error { + s.ctx.Done() if s.httpsServer != nil { - s.logger.Info("Shutting down HTTPS server...") + s.logger.Debug("Shutting down HTTPS server...") return s.httpsServer.Shutdown(ctx) } return nil } +func (s *Server) reloadServer(config *tls.Config) error { + s.logger.Debug("Reloading TLS Server...") + // close the current server + if s.httpsServer != nil { + // closing the server gracefully + if err := s.httpsServer.Close(); err != nil { + s.logger.Error("Failed to shutdown HTTPS server", zap.Error(err)) + } + } + // Create a new HTTP server with the new router and updated TLS config + httpsRouter := gin.New() + s.setRouter(httpsRouter) + s.httpsServer = &http.Server{ + Addr: s.config.ListenAddress, + Handler: httpsRouter, + TLSConfig: config, + ReadHeaderTimeout: 90 * time.Second, + } + + go func() { + err := s.httpsServer.ListenAndServeTLS("", "") + if err != nil { + s.logger.Error("failed to serve and listen", zap.Error(err)) + } + }() + return nil +} + func (s *Server) k8sPodToServiceMapHandler(c *gin.Context) { podServiceEnvironmentMap := convertTtlCacheToMap(getPodServiceEnvironmentMapping()) s.jsonHandler(c.Writer, podServiceEnvironmentMap) diff --git a/extension/server/extension_test.go b/extension/server/extension_test.go index a60ad842f8..f6c801acf1 100644 --- a/extension/server/extension_test.go +++ b/extension/server/extension_test.go @@ -66,46 +66,82 @@ func newMockTLSConfig(c *mockServerConfig) func() (*tls.Config, error) { func TestNewServer(t *testing.T) { logger, _ := zap.NewProduction() - config := &Config{ - ListenAddress: ":8080", - } tests := []struct { - name string - want *Server - mockSvrCfg *mockServerConfig - isTLS bool + name string + want *Server + config *Config + isTLS bool }{ { - name: "HTTPSServer", + name: "Should load valid HTTPS server", want: &Server{ - config: config, logger: logger, }, - mockSvrCfg: &mockServerConfig{ - TLSCert: "cert", - TLSKey: "key", - TLSAllowedCACerts: []string{"ca"}, + config: &Config{ + TLSCertPath: "./testdata/example-server-cert.pem", + TLSKeyPath: "./testdata/example-server-key.pem", + TLSCAPath: "./testdata/example-CA-cert.pem", + ListenAddress: ":8080", }, isTLS: true, }, { - name: "EmptyHTTPSServer", + name: "should load server with empty HTTPS server as certs are empty", + want: &Server{ + logger: logger, + }, + config: &Config{ + ListenAddress: ":8080", + }, + isTLS: false, + }, + { + name: "should load server with empty HTTPS server as CA cert is not valid", + want: &Server{ + logger: logger, + }, + config: &Config{ + TLSCertPath: "./testdata/example-server-cert.pem", + TLSKeyPath: "./testdata/example-server-key.pem", + TLSCAPath: "./testdata/bad-CA-cert.pem", + ListenAddress: ":8080", + }, + isTLS: false, + }, + { + name: "should load server with empty HTTPS server as server cert is not valid", + want: &Server{ + logger: logger, + }, + config: &Config{ + TLSCertPath: "./testdata/bad-CA-cert.pem", + TLSKeyPath: "./testdata/example-server-key.pem", + TLSCAPath: "./testdata/example-CA-cert.pem", + ListenAddress: ":8080", + }, + isTLS: false, + }, + { + name: "should load server with empty HTTPS server as server key is empty", want: &Server{ - config: config, logger: logger, }, - mockSvrCfg: &mockServerConfig{}, - isTLS: false, + config: &Config{ + TLSCertPath: "./testdata/example-server-cert.pem", + TLSKeyPath: "", + TLSCAPath: "./testdata/example-CA-cert.pem", + ListenAddress: ":8080", + }, + isTLS: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - getTlsConfig = newMockTLSConfig(tt.mockSvrCfg) - server := NewServer(logger, config) + server := NewServer(logger, tt.config) assert.NotNil(t, server) - assert.Equal(t, config, server.config) + assert.Equal(t, tt.config, server.config) assert.NotNil(t, server.logger) if tt.isTLS { assert.NotNil(t, server.httpsServer) @@ -115,6 +151,8 @@ func TestNewServer(t *testing.T) { assert.Equal(t, tls.RequireAndVerifyClientCert, server.httpsServer.TLSConfig.ClientAuth) assert.NotNil(t, server.httpsServer.Handler) assert.Equal(t, 90*time.Second, server.httpsServer.ReadHeaderTimeout) + assert.NotNil(t, server.watcher) + assert.Equal(t, server.watcher.GetTLSConfig(), server.httpsServer.TLSConfig) } else { assert.Nil(t, server.httpsServer) } @@ -219,35 +257,37 @@ func TestJSONHandler(t *testing.T) { func TestServerStartAndShutdown(t *testing.T) { logger, _ := zap.NewProduction() - config := &Config{ - ListenAddress: ":8080", - } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() tests := []struct { - name string - mockSvrCfg *mockServerConfig + name string + config *Config }{ { name: "HTTPSServer", - mockSvrCfg: &mockServerConfig{ - TLSCert: "cert", - TLSKey: "key", - TLSAllowedCACerts: []string{"ca"}, + config: &Config{ + TLSCertPath: "./testdata/example-server-cert.pem", + TLSKeyPath: "./testdata/example-server-key.pem", + TLSCAPath: "./testdata/example-CA-cert.pem", + ListenAddress: ":8080", }, }, { - name: "EmptyHTTPSServer", - mockSvrCfg: &mockServerConfig{}, + name: "EmptyHTTPSServer", + config: &Config{ + TLSCertPath: "./testdata/example-server-cert.pem", + TLSKeyPath: "./testdata/example-server-key.pem", + TLSCAPath: "./testdata/bad-CA-cert.pem", + ListenAddress: ":8080", + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - getTlsConfig = newMockTLSConfig(tt.mockSvrCfg) - server := NewServer(logger, config) + server := NewServer(logger, tt.config) err := server.Start(ctx, nil) assert.NoError(t, err) diff --git a/extension/server/factory_test.go b/extension/server/factory_test.go index aad68e8c49..0d571fd36e 100644 --- a/extension/server/factory_test.go +++ b/extension/server/factory_test.go @@ -26,7 +26,9 @@ func TestCreateExtension(t *testing.T) { } func TestCreateExtensionWithConfig(t *testing.T) { - cfg := &Config{ListenAddress: ":8080"} + cfg := &Config{ListenAddress: ":8080", TLSCertPath: "./testdata/example-server-cert.pem", + TLSKeyPath: "./testdata/example-server-key.pem", + TLSCAPath: "./testdata/example-CA-cert.pem"} got, err := NewFactory().CreateExtension(context.Background(), extensiontest.NewNopCreateSettings(), cfg) assert.NoError(t, err) assert.NotNil(t, got) diff --git a/extension/server/testdata/bad-CA-cert.pem b/extension/server/testdata/bad-CA-cert.pem new file mode 100644 index 0000000000..3035f111d4 --- /dev/null +++ b/extension/server/testdata/bad-CA-cert.pem @@ -0,0 +1,3 @@ +-----BEGIN CERTIFICATE----- +bad certificate +-----END CERTIFICATE----- \ No newline at end of file diff --git a/extension/server/testdata/example-CA-cert.pem b/extension/server/testdata/example-CA-cert.pem new file mode 100644 index 0000000000..b5c6f1f7db --- /dev/null +++ b/extension/server/testdata/example-CA-cert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDMTCCAhkCFCi65dSe1JONpNGghyam61+4gTL7MA0GCSqGSIb3DQEBCwUAMFUx +CzAJBgNVBAYTAkFVMRIwEAYDVQQIDAlBdXN0cmFsaWExDzANBgNVBAcMBlN5ZG5l +eTEQMA4GA1UECgwHTG9nei5pbzEPMA0GA1UEAwwGSmFlZ2VyMB4XDTIyMDkxMDAw +MjE0NFoXDTMyMDkwNzAwMjE0NFowVTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1 +c3RyYWxpYTEPMA0GA1UEBwwGU3lkbmV5MRAwDgYDVQQKDAdMb2d6LmlvMQ8wDQYD +VQQDDAZKYWVnZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLlEq/ +DF2pkhfSedvAd5h6BXCjpC/mUA6BN3RyMHUjTWr9hhBtaIYv68O12GMVf//ST/Fs +CjRrjOcqrz2QQn3P8UelGRd2vJfcMhJElQ/lnKmZZlAHEOMF8TC7nQfsReLCwcpj +T6bXqvDcfHjDye+45F2rPDpRGLzyysg7pgdINp0Duph0Z16ggrBgz7RVNBmWsYVe +sGD3VOR3hLd8GTDzJ5amRpkq8nfliJ+U3JLGcDG/7Wkuvl/YZZxf21v9f4yYVEZZ +aLAcKsHIUoFRDJtdrBeaPZRJjL/I9B1M6En+Styxb5wJw42h9BXtJd2IeQPp15pP +KfPbkmOj+X+2s9n1AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAJbm7WXgQirWQbaa +E304K8tvdpC2E1ewxTTrUEN8jUONER4KC+epRnsTgkEpVlj7sehiAgSMnbT4E3ve +GjmsUrZiJcKPaf+ogn49Cj0weD99wbJtUNgbH4HiqR1ePOHIRDQ7GD5G0zdFq7oO +Il09eHAbbWM61x04I3XDQ0OwXyeVXIEWJcR1R6wnuNMJm54czbXvn6SrIuoMCvs6 +oSkVm43Q+plk0hlDZnA/KiOxqFRLVHBuX/SgRf5NBg8m7id3fNzIJnWWK+zqoDoZ +ryja7dFIJnLqEXJxJkc5ubT1/j9PDE51WbM5MyPB6lnuQKdZTbDziyKiVXg0au3E +QK5K/Ow= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/extension/server/testdata/example-server-cert.pem b/extension/server/testdata/example-server-cert.pem new file mode 100644 index 0000000000..41d812c5cf --- /dev/null +++ b/extension/server/testdata/example-server-cert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDUjCCAjqgAwIBAgIUE56RLVss9rH/ojHQlVqysg6vJQUwDQYJKoZIhvcNAQEL +BQAwVTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1c3RyYWxpYTEPMA0GA1UEBwwG +U3lkbmV5MRAwDgYDVQQKDAdMb2d6LmlvMQ8wDQYDVQQDDAZKYWVnZXIwHhcNMjIw +OTEwMDAyMTQ0WhcNMzIwOTA3MDAyMTQ0WjBVMQswCQYDVQQGEwJBVTESMBAGA1UE +CAwJQXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEDAOBgNVBAoMB0xvZ3ouaW8x +DzANBgNVBAMMBkphZWdlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN17nlVlHzFoEDnAA7kvrjzuKiZQZ70znDW5TrqtwXqHr5XG0m7rdQlt9xyr3HFg +DbXbkg7wBidqUySWZ7N/cxiqB/oMnfbntapwmBP77Ss8KLLQx17Geb8pryIHrhcE +a/E556epv3WRkoz3j8ph3DY7g+ghQWNtWI3UvBdaIkmPaS+wVfH6hwzpT4rbdVSF +1n7SnMcJccKPEPgqASiEsYZeQgnZUedayKzHRnJeQD3lOPXLHAOIGHajGvyQFMqE +fG9dJfWNVxH/+GxMNul9jsUfJMc99mG/vy3B1WROOl2EiTi8FzfM64lo8SvEs3Db +jcAFItI7BcyM/MJxqYtYFQ0CAwEAAaMaMBgwFgYDVR0RBA8wDYILZXhhbXBsZS5j +b20wDQYJKoZIhvcNAQELBQADggEBAFjZrgLJiezjX2enrh1pJDRrj9NClTKM8Vck +dnpI4OFmViqSyUkyY28PO9omoXUPAbcVuXcGQ/f4PR7tlKmv1lGH/4vGGgmvLjus +Mm0vYZoBos/KPN92RIUkpO1Lvt3es96CFI0k6G0JmstXn4EShQibm1424jTWU3tF +praOAsaTVWO/ukVPbULJ8dWzKoQVTyb/cNQiPiL0IXx7XYc/cqCB2yqzELtMOmIe +kQuyCmUNzK1qQaezxwkMl2P+121QdOvKkxcu7XlAEo0SRNNNkpOkyRqLvC2iou39 +SHxqc/Vbf+Pj9N6oC0twI7KAJELHMi9qhlQsNssxUMjYe7BRYmQ= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/extension/server/testdata/example-server-key.pem b/extension/server/testdata/example-server-key.pem new file mode 100644 index 0000000000..403be1e535 --- /dev/null +++ b/extension/server/testdata/example-server-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA3XueVWUfMWgQOcADuS+uPO4qJlBnvTOcNblOuq3BeoevlcbS +but1CW33HKvccWANtduSDvAGJ2pTJJZns39zGKoH+gyd9ue1qnCYE/vtKzwostDH +XsZ5vymvIgeuFwRr8Tnnp6m/dZGSjPePymHcNjuD6CFBY21YjdS8F1oiSY9pL7BV +8fqHDOlPitt1VIXWftKcxwlxwo8Q+CoBKISxhl5CCdlR51rIrMdGcl5APeU49csc +A4gYdqMa/JAUyoR8b10l9Y1XEf/4bEw26X2OxR8kxz32Yb+/LcHVZE46XYSJOLwX +N8zriWjxK8SzcNuNwAUi0jsFzIz8wnGpi1gVDQIDAQABAoIBAQCAE55J74IMRgsr ++hetHR966JbDNTfoN1Ib1x7p4NTDkHc++4xwzAQQAeEmWVPO1CbZhTF/JdnJLTkL +LVamfAsItjqKpIUsZG2vNBEdbU+G8vDuBsFj0w5QN0CpQxuu/8WT51JIqGapDBdd +IUOrWs/HJL9wmtp/LppI2j4ymtK9Cffce8AVTazfHspVF2e05b8GEeBjoMmvpPgw +bvHPLdCVoWPGsYOFUWG9V1eCo2CFtvspsa8CYghpaXg7EOElF73W1gEoEd5SdMx9 +svHeH4bJAzrWoqDrC5kOJUZRip9YjF8WXRudVmSaRPHptwN6qRvF8HWiGrYrdTtJ +j1seb87BAoGBAPUrxCI64EN/6YYNziM49RpORLVrZGLaZQCf0IJkoH3DcsBcrtF8 +hqJC73z75kj1Y+oOzulYPBlhQr+4hvbMSzHwsffi5nepPXSSGK2+D1O5rASou7b3 +Re/OiJNex7IrDAy354PV4B/7iFmgGOVUn+sXIKoprqnor7f3mALAa/yZAoGBAOdE +AMKktCQYIHweKPF0mYDsOnoJ8TEAydxShOan5r5gkVTnZhDHa3fD9eGh19Mfi9qC +cDro5Sq1+8OLoX6Ta/Ju3PNfI2Qn4KLF9CZrEQhrV90HmXluCflZXyL71SB8pGVo +5ybr8UtalUXVPXKi+inK7CXaJBZaboJWnqmaqJCVAoGAdIX0lgA9jldA+gGds4fi +ljoU1dTQxVrfHkjWpOKGlL9Lzrk+LTpuEriVcmWWsZ5PenLHTIgvKDDdtJlTLAE0 +y+uF6jbhKoY5OyokqI7oYfahFyXK8c7cYnla2A/4AWoMNA9D7Zi9CPZXe6Fns7dg +ui8nyzg8V2zL9zep+8TQjiECgYEAm2zTif0BaGSiqGfoomX3qHKa1lwKMiHSiHUZ +Bp9+7yGdas9dhBdSPZqAjJSlpSlFZ6RUYvMU2UCXJJOaBKR1XuhtLE8bTPuT+DFL +5en894iU82JhHf/7Sg5rZuqTERNTtSfsefcGItuNCPLIKlwn/qB3VvUlXbSHIqeu +WFQtx4UCgYEA1FIVEc4BjRE6jH80X7RSSOLJ6PwPglZzM8JEVyiYHAHE65zdORF1 +iCiuI+pRQc3yHkm2gbB+hY5HSrCmyJrJc0tcUd4QoMqOHV8UEGLVwxtr/4DPMsl4 +JIEmzmgvs56TJeKX0YlXnD612zjDWCPV6q+LWlUUzd8qLwk6L1+EFhE= +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/internal/tls/certWatcher.go b/internal/tls/certWatcher.go new file mode 100644 index 0000000000..e5666b5ead --- /dev/null +++ b/internal/tls/certWatcher.go @@ -0,0 +1,194 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package tls + +import ( + "context" + "crypto/tls" + "errors" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" +) + +// CertWatcher watches certificate and key files for changes. When either file +// changes, it reads and parses both and calls an optional callback with the new +// certificate. +type CertWatcher struct { + sync.RWMutex + + watcher *fsnotify.Watcher + logger *zap.Logger + currentTLSConfig *tls.Config + + certPath string + keyPath string + caPath string + + // callback is a function to be invoked when the certificate changes. + callback func() +} + +var NewCertWatcherFunc = NewCertWatcher + +// NewCertWatcher returns a new CertWatcher watching the given server certificate and client certificate. +func NewCertWatcher(certPath, keyPath, caPath string, logger *zap.Logger) (*CertWatcher, error) { + if certPath == "" || keyPath == "" || caPath == "" { + return nil, errors.New("cert, key, and ca paths are required") + } + var err error + + cw := &CertWatcher{ + certPath: certPath, + keyPath: keyPath, + caPath: caPath, + logger: logger, + } + + cw.logger.Debug("Creating new certificate watcher with", zap.String("cert", certPath), zap.String("key", keyPath), zap.String("ca", caPath)) + + // Initial read of certificate and key. + if err := cw.ReadTlsConfig(); err != nil { + return nil, err + } + + cw.watcher, err = fsnotify.NewWatcher() + if err != nil { + return nil, err + } + + return cw, nil +} + +// RegisterCallback registers a callback to be invoked when the certificate changes. +func (cw *CertWatcher) RegisterCallback(callback func()) { + cw.callback = callback +} + +// GetTLSConfig fetches the currently loaded tls Config, which may be nil. +func (cw *CertWatcher) GetTLSConfig() *tls.Config { + cw.Lock() + defer cw.Unlock() + return cw.currentTLSConfig +} + +func (cw *CertWatcher) ReadTlsConfig() error { + cw.logger.Debug("Reading TLS certificate") + serverConfig := &ServerConfig{ + TLSCert: cw.certPath, + TLSKey: cw.keyPath, + TLSAllowedCACerts: []string{cw.caPath}, + } + //cw.printCerts(serverConfig) + tlsConfig, err := serverConfig.TLSConfig() + if err != nil { + cw.logger.Error("failed to read certificate", zap.Error(err)) + return err + } + + if tlsConfig != cw.currentTLSConfig { + cw.logger.Debug("TLS certificate changed") + cw.currentTLSConfig = tlsConfig + + // If a callback is registered, invoke it with the new certificate. + if cw.callback != nil { + go func() { + cw.logger.Debug("Invoking callback") + cw.callback() + }() + } + } + return nil +} + +// Start starts the watch on the certificate and key files. +func (cw *CertWatcher) Start(ctx context.Context) error { + cw.logger.Debug("Starting certificate watcher") + files := sets.New(cw.certPath, cw.keyPath, cw.caPath) + { + var watchErr error + if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, func(ctx context.Context) (done bool, err error) { + for f := range files { + if err := cw.watcher.Add(f); err != nil { + watchErr = err + return false, nil //nolint:nilerr // We want to keep trying. + } + } + files.Clear() + return true, nil + }); err != nil { + cw.logger.Error("failed to add watches", zap.Error(err), zap.Error(watchErr)) + return errors.Join(err, watchErr) + } + } + + go cw.Watch() + + cw.logger.Debug("Successfully started certificate watcher") + + // Block until the context is done. + <-ctx.Done() + + return cw.watcher.Close() +} + +// Watch reads events from the watcher's channel and reacts to changes. +func (cw *CertWatcher) Watch() { + for { + select { + case event, ok := <-cw.watcher.Events: + // Channel is closed. + if !ok { + return + } + + cw.handleEvent(event) + + case err, ok := <-cw.watcher.Errors: + // Channel is closed. + if !ok { + return + } + + cw.logger.Error("certificate watch error", zap.Error(err)) + } + + } +} + +func (cw *CertWatcher) handleEvent(event fsnotify.Event) { + // Only care about events which may modify the contents of the file. + if !(isWrite(event) || isRemove(event) || isCreate(event)) { + return + } + + cw.logger.Debug("certificate event", zap.Any("event", event)) + + // If the file was removed, re-add the watch. + if isRemove(event) { + if err := cw.watcher.Add(event.Name); err != nil { + cw.logger.Error("error re-watching file", zap.Error(err)) + } + } + + if err := cw.ReadTlsConfig(); err != nil { + cw.logger.Error("failed to re-read certificate", zap.Error(err)) + } +} + +func isWrite(event fsnotify.Event) bool { + return event.Op.Has(fsnotify.Write) +} + +func isCreate(event fsnotify.Event) bool { + return event.Op.Has(fsnotify.Create) +} + +func isRemove(event fsnotify.Event) bool { + return event.Op.Has(fsnotify.Remove) +} diff --git a/internal/tls/certWatcher_test.go b/internal/tls/certWatcher_test.go new file mode 100644 index 0000000000..eb527324c1 --- /dev/null +++ b/internal/tls/certWatcher_test.go @@ -0,0 +1,239 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package tls + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "net" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" +) + +const ( + // Set up paths for test certificates + testCertPath = "./testdata/server.crt" + testKeyPath = "./testdata/server.key" + testCAPath = "./testdata/tls-ca.crt" +) + +func createRootCert(caPath string) (*x509.Certificate, *rsa.PrivateKey, error) { + rootKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return nil, nil, err + } + + rootTemplate := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Root CA"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), // Valid for 10 years + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + rootCertDER, err := x509.CreateCertificate(rand.Reader, &rootTemplate, &rootTemplate, &rootKey.PublicKey, rootKey) + if err != nil { + return nil, nil, err + } + + rootCert, err := x509.ParseCertificate(rootCertDER) + if err != nil { + return nil, nil, err + } + + // Write the root certificate + rootCertOut, err := os.Create(caPath) + if err != nil { + return nil, nil, err + } + defer rootCertOut.Close() + if err := pem.Encode(rootCertOut, &pem.Block{Type: "CERTIFICATE", Bytes: rootCertDER}); err != nil { + return nil, nil, err + } + + return rootCert, rootKey, nil +} + +func writeCerts(certPath, keyPath, caPath, ip string) error { + // Generate root certificate + rootCert, rootKey, err := createRootCert(caPath) + if err != nil { + return err + } + + // Generate key for the actual certificate + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return err + } + + notBefore := time.Now() + notAfter := notBefore.Add(1 * time.Hour) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Kubernetes"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + template.IPAddresses = append(template.IPAddresses, net.ParseIP(ip)) + + // Create the certificate using the root certificate as the CA + derBytes, err := x509.CreateCertificate(rand.Reader, &template, rootCert, &priv.PublicKey, rootKey) + if err != nil { + return err + } + + // Write the certificate + certOut, err := os.Create(certPath) + if err != nil { + return err + } + defer certOut.Close() + if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + return err + } + + // Write the private key + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer keyOut.Close() + privBytes, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return err + } + if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil { + return err + } + + return nil +} + +func TestMain(m *testing.M) { + // Setup + err := Init() + if err != nil { + panic(err) + } + + // Run tests + code := m.Run() + + // Teardown (if needed) + // You can add cleanup code here + + // Exit with the test result code + os.Exit(code) +} + +func Init() error { + + // Generate test certificates + err := writeCerts(testCertPath, testKeyPath, testCAPath, "127.0.0.1") + if err != nil { + return err + } + + return nil +} + +func TestNewCertWatcher(t *testing.T) { + // Setup + logger := zaptest.NewLogger(t) + + // Test case: Create a new CertWatcher + cw, err := NewCertWatcher(testCertPath, testKeyPath, testCAPath, logger) + assert.NoError(t, err, "Failed to create CertWatcher") + + // Check if the initial TLS config was loaded correctly + assert.NotNil(t, cw.GetTLSConfig(), "TLS config was not loaded correctly") +} + +func TestRegisterCallback(t *testing.T) { + // Setup + logger := zaptest.NewLogger(t) + + cw, err := NewCertWatcher(testCertPath, testKeyPath, testCAPath, logger) + assert.NoError(t, err, "Failed to create CertWatcher") + + // Test case: Register a callback + callbackCalled := false + callback := func() { + callbackCalled = true + } + cw.RegisterCallback(callback) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + err := cw.Start(ctx) + assert.NoError(t, err, "Failed to start CertWatcher") + }() + + // Trigger a certificate change event + _, _, err = createRootCert(testCAPath) + assert.NoError(t, err, "Failed to update root certificate") + + // Wait for the callback to be called + time.Sleep(1 * time.Second) + + assert.True(t, callbackCalled) +} + +func TestWatchCertificateChanges(t *testing.T) { + // Setup + logger := zaptest.NewLogger(t) + + cw, err := NewCertWatcher(testCertPath, testKeyPath, testCAPath, logger) + assert.NoError(t, err, "Failed to create CertWatcher") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + err := cw.Start(ctx) + assert.NoError(t, err, "Failed to start CertWatcher") + }() + + beforeTLSConfig := cw.GetTLSConfig() + + // Trigger a certificate change event + _, _, err = createRootCert(testCAPath) + assert.NoError(t, err, "Failed to update root certificate") + + // Wait for the certificate change to be processed + time.Sleep(2 * time.Second) + + // Check if the TLS config was updated + assert.True(t, beforeTLSConfig != cw.GetTLSConfig(), "TLS config was not updated after certificate change") +} diff --git a/internal/tls/testdata/bad-CA-cert.pem b/internal/tls/testdata/bad-CA-cert.pem new file mode 100644 index 0000000000..3035f111d4 --- /dev/null +++ b/internal/tls/testdata/bad-CA-cert.pem @@ -0,0 +1,3 @@ +-----BEGIN CERTIFICATE----- +bad certificate +-----END CERTIFICATE----- \ No newline at end of file diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 2787da64c5..072c1dded6 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -291,6 +291,9 @@ extensions: kubernetes_mode: EKS server: listen_addr: :4311 + tls_ca_path: "/etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt" + tls_cert_path: "/etc/amazon-cloudwatch-observability-agent-server-cert/server.crt" + tls_key_path: "/etc/amazon-cloudwatch-observability-agent-server-cert/server.key" processors: awsapplicationsignals: limiter: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index b0e3ec6463..2da7e40c08 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -291,6 +291,9 @@ extensions: kubernetes_mode: K8sEC2 server: listen_addr: :4311 + tls_ca_path: "/etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt" + tls_cert_path: "/etc/amazon-cloudwatch-observability-agent-server-cert/server.crt" + tls_key_path: "/etc/amazon-cloudwatch-observability-agent-server-cert/server.key" processors: awsapplicationsignals: limiter: diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 8ce428696d..cf23c1a1d3 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -291,6 +291,9 @@ extensions: kubernetes_mode: EKS server: listen_addr: :4311 + tls_ca_path: "/etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt" + tls_cert_path: "/etc/amazon-cloudwatch-observability-agent-server-cert/server.crt" + tls_key_path: "/etc/amazon-cloudwatch-observability-agent-server-cert/server.key" processors: awsapplicationsignals: limiter: diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index 36b4f53852..43b3268f7b 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -291,6 +291,9 @@ extensions: kubernetes_mode: EKS server: listen_addr: :4311 + tls_ca_path: "/etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt" + tls_cert_path: "/etc/amazon-cloudwatch-observability-agent-server-cert/server.crt" + tls_key_path: "/etc/amazon-cloudwatch-observability-agent-server-cert/server.key" processors: awsapplicationsignals: limiter: diff --git a/translator/translate/otel/extension/server/translator.go b/translator/translate/otel/extension/server/translator.go index 15e3186095..1be1d814ee 100644 --- a/translator/translate/otel/extension/server/translator.go +++ b/translator/translate/otel/extension/server/translator.go @@ -12,7 +12,12 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) -const defaultListenAddr = ":4311" +const ( + defaultListenAddr = ":4311" + tlsServerCertFilePath = "/etc/amazon-cloudwatch-observability-agent-server-cert/server.crt" + tlsServerKeyFilePath = "/etc/amazon-cloudwatch-observability-agent-server-cert/server.key" + caFilePath = "/etc/amazon-cloudwatch-observability-agent-client-cert/tls-ca.crt" +) type translator struct { name string @@ -35,5 +40,8 @@ func (t *translator) ID() component.ID { func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg := t.factory.CreateDefaultConfig().(*server.Config) cfg.ListenAddress = defaultListenAddr + cfg.TLSCAPath = caFilePath + cfg.TLSCertPath = tlsServerCertFilePath + cfg.TLSKeyPath = tlsServerKeyFilePath return cfg, nil } diff --git a/translator/translate/otel/extension/server/translator_test.go b/translator/translate/otel/extension/server/translator_test.go index 666f3de868..010f275164 100644 --- a/translator/translate/otel/extension/server/translator_test.go +++ b/translator/translate/otel/extension/server/translator_test.go @@ -19,7 +19,7 @@ func TestTranslate(t *testing.T) { }{ "DefaultConfig": { input: map[string]interface{}{}, - want: &server.Config{ListenAddress: defaultListenAddr}, + want: &server.Config{ListenAddress: defaultListenAddr, TLSCAPath: caFilePath, TLSCertPath: tlsServerCertFilePath, TLSKeyPath: tlsServerKeyFilePath}, }, } for name, testCase := range testCases { From 5f7aba3a375bb72f30ab5b2a5829ea0ae3e8f680 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Thu, 3 Oct 2024 13:01:29 -0400 Subject: [PATCH 26/47] Bug fix for duplicate log events by resetting the response body stream in agent health (#832) Co-authored-by: Bryce Carey --- extension/agenthealth/handler/stats/client/client.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/extension/agenthealth/handler/stats/client/client.go b/extension/agenthealth/handler/stats/client/client.go index 39c4c667d7..04ca7771f3 100644 --- a/extension/agenthealth/handler/stats/client/client.go +++ b/extension/agenthealth/handler/stats/client/client.go @@ -139,8 +139,11 @@ func rejectedEntityInfoExists(r *http.Response) bool { if r == nil || r.Body == nil { return false } - defer r.Body.Close() bodyBytes, err := io.ReadAll(r.Body) + r.Body.Close() + // Reset the response body stream since it can only be read once. Not doing this results in duplicate requests. + // See https://stackoverflow.com/questions/33532374/in-go-how-can-i-reuse-a-readcloser + r.Body = io.NopCloser(bytes.NewReader(bodyBytes)) if err != nil { return false } From e70c6e84dcbabfdfb5ec4a7635a68389f5efa6d5 Mon Sep 17 00:00:00 2001 From: Kaushik Surya <108111936+sky333999@users.noreply.github.com> Date: Thu, 3 Oct 2024 17:32:24 -0400 Subject: [PATCH 27/47] Attach AwsAccountId as KeyAttribute on Compass Entity (#831) --- extension/entitystore/ec2Info.go | 10 ++-- extension/entitystore/ec2Info_test.go | 17 +++++- plugins/outputs/cloudwatch/cloudwatch_test.go | 9 +-- .../outputs/cloudwatch/convert_otel_test.go | 8 ++- .../entityattributes/entityattributes.go | 3 + plugins/processors/awsentity/processor.go | 3 + .../processors/awsentity/processor_test.go | 57 +++++++++++-------- 7 files changed, 70 insertions(+), 37 deletions(-) diff --git a/extension/entitystore/ec2Info.go b/extension/entitystore/ec2Info.go index 82eb6ea7b0..af6b078314 100644 --- a/extension/entitystore/ec2Info.go +++ b/extension/entitystore/ec2Info.go @@ -31,6 +31,7 @@ const ( type EC2Info struct { InstanceID string + AccountID string AutoScalingGroup string // region is used while making call to describeTags Ec2 API for AutoScalingGroup @@ -46,7 +47,7 @@ type EC2Info struct { func (ei *EC2Info) initEc2Info() { ei.logger.Debug("Initializing EC2Info") - if err := ei.setInstanceId(); err != nil { + if err := ei.setInstanceIDAccountID(); err != nil { return } ei.ec2API = ei.ec2Provider(ei.Region, ei.ec2Credential) @@ -57,11 +58,11 @@ func (ei *EC2Info) initEc2Info() { ei.ignoreInvalidFields() } -func (ei *EC2Info) setInstanceId() error { +func (ei *EC2Info) setInstanceIDAccountID() error { for { metadataDoc, err := ei.metadataProvider.Get(context.Background()) if err != nil { - ei.logger.Warn("Failed to get Instance Id through metadata provider", zap.Error(err)) + ei.logger.Warn("Failed to get Instance ID / Account ID through metadata provider", zap.Error(err)) wait := time.NewTimer(1 * time.Minute) select { case <-ei.done: @@ -71,8 +72,9 @@ func (ei *EC2Info) setInstanceId() error { continue } } - ei.logger.Debug("Successfully retrieved Instance ID") + ei.logger.Debug("Successfully retrieved Instance ID and Account ID") ei.InstanceID = metadataDoc.InstanceID + ei.AccountID = metadataDoc.AccountID return nil } } diff --git a/extension/entitystore/ec2Info_test.go b/extension/entitystore/ec2Info_test.go index 566e4dad02..b2170b6c63 100644 --- a/extension/entitystore/ec2Info_test.go +++ b/extension/entitystore/ec2Info_test.go @@ -23,6 +23,7 @@ import ( var mockedInstanceIdentityDoc = &ec2metadata.EC2InstanceIdentityDocument{ InstanceID: "i-01d2417c27a396e44", + AccountID: "874389809020", Region: "us-east-1", InstanceType: "m5ad.large", ImageID: "ami-09edd32d9b0990d49", @@ -70,7 +71,7 @@ func (m *mockEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsO return &allTags, nil } -func mockEC2Provider(region string, credential *configaws.CredentialConfig) ec2iface.EC2API { +func mockEC2Provider(_ string, _ *configaws.CredentialConfig) ec2iface.EC2API { return &mockEC2Client{withASG: true} } @@ -92,6 +93,7 @@ func TestSetInstanceIdAndRegion(t *testing.T) { wantErr: false, want: EC2Info{ InstanceID: mockedInstanceIdentityDoc.InstanceID, + AccountID: mockedInstanceIdentityDoc.AccountID, }, }, } @@ -102,10 +104,11 @@ func TestSetInstanceIdAndRegion(t *testing.T) { metadataProvider: tt.args.metadataProvider, logger: logger, } - if err := ei.setInstanceId(); (err != nil) != tt.wantErr { - t.Errorf("setInstanceId() error = %v, wantErr %v", err, tt.wantErr) + if err := ei.setInstanceIDAccountID(); (err != nil) != tt.wantErr { + t.Errorf("setInstanceIDAccountID() error = %v, wantErr %v", err, tt.wantErr) } assert.Equal(t, tt.want.InstanceID, ei.InstanceID) + assert.Equal(t, tt.want.AccountID, ei.AccountID) }) } } @@ -214,6 +217,7 @@ func TestIgnoreInvalidFields(t *testing.T) { logger, _ := zap.NewDevelopment() type want struct { instanceId string + accountId string autoScalingGroup string } tests := []struct { @@ -225,11 +229,13 @@ func TestIgnoreInvalidFields(t *testing.T) { name: "Happy path", args: &EC2Info{ InstanceID: "i-01d2417c27a396e44", + AccountID: "0123456789012", AutoScalingGroup: "asg", logger: logger, }, want: want{ instanceId: "i-01d2417c27a396e44", + accountId: "0123456789012", autoScalingGroup: "asg", }, }, @@ -237,11 +243,13 @@ func TestIgnoreInvalidFields(t *testing.T) { name: "InstanceId too large", args: &EC2Info{ InstanceID: strings.Repeat("a", 20), + AccountID: "0123456789012", AutoScalingGroup: "asg", logger: logger, }, want: want{ instanceId: "", + accountId: "0123456789012", autoScalingGroup: "asg", }, }, @@ -249,11 +257,13 @@ func TestIgnoreInvalidFields(t *testing.T) { name: "AutoScalingGroup too large", args: &EC2Info{ InstanceID: "i-01d2417c27a396e44", + AccountID: "0123456789012", AutoScalingGroup: strings.Repeat("a", 256), logger: logger, }, want: want{ instanceId: "i-01d2417c27a396e44", + accountId: "0123456789012", autoScalingGroup: "", }, }, @@ -262,6 +272,7 @@ func TestIgnoreInvalidFields(t *testing.T) { t.Run(tt.name, func(t *testing.T) { tt.args.ignoreInvalidFields() assert.Equal(t, tt.want.instanceId, tt.args.InstanceID) + assert.Equal(t, tt.want.accountId, tt.args.AccountID) assert.Equal(t, tt.want.autoScalingGroup, tt.args.AutoScalingGroup) }) } diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 7181f7ace3..cffc9c5422 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -591,9 +591,10 @@ func TestCreateEntityMetricData(t *testing.T) { cw := newCloudWatchClient(svc, time.Second) entity := cloudwatch.Entity{ KeyAttributes: map[string]*string{ - "Type": aws.String("Service"), - "Environment": aws.String("Environment"), - "Name": aws.String("MyServiceName"), + "Type": aws.String("Service"), + "Environment": aws.String("Environment"), + "Name": aws.String("MyServiceName"), + "AwsAccountId": aws.String("0123456789012"), }, Attributes: map[string]*string{ "InstanceID": aws.String("i-123456789"), @@ -602,7 +603,7 @@ func TestCreateEntityMetricData(t *testing.T) { } entityToAttributesMap := ttlcache.New[string, []*cloudwatch.MetricDatum](ttlcache.WithTTL[string, []*cloudwatch.MetricDatum](5 * time.Minute)) metrics := createTestMetrics(1, 1, 1, "s") - assert.Equal(t, 6, metrics.ResourceMetrics().At(0).Resource().Attributes().Len()) + assert.Equal(t, 7, metrics.ResourceMetrics().At(0).Resource().Attributes().Len()) aggregations := ConvertOtelMetrics(metrics) assert.Equal(t, 0, metrics.ResourceMetrics().At(0).Resource().Attributes().Len()) metricDatum := cw.BuildMetricDatum(aggregations[0]) diff --git a/plugins/outputs/cloudwatch/convert_otel_test.go b/plugins/outputs/cloudwatch/convert_otel_test.go index f85d3c3a2a..dcf8f30357 100644 --- a/plugins/outputs/cloudwatch/convert_otel_test.go +++ b/plugins/outputs/cloudwatch/convert_otel_test.go @@ -58,6 +58,7 @@ func createTestMetrics( rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityDeploymentEnvironment, "MyEnvironment") rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityServiceName, "MyServiceName") rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityInstanceID, "i-123456789") + rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityAwsAccountId, "0123456789012") rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityAutoScalingGroup, "asg-123") rm.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "AWS::EC2") @@ -225,9 +226,10 @@ func TestConvertOtelMetrics_Entity(t *testing.T) { datums := ConvertOtelMetrics(metrics) expectedEntity := cloudwatch.Entity{ KeyAttributes: map[string]*string{ - "Type": aws.String("Service"), - "Environment": aws.String("MyEnvironment"), - "Name": aws.String("MyServiceName"), + "Type": aws.String("Service"), + "Environment": aws.String("MyEnvironment"), + "Name": aws.String("MyServiceName"), + "AwsAccountId": aws.String("0123456789012"), }, Attributes: map[string]*string{ "InstanceID": aws.String("i-123456789"), diff --git a/plugins/processors/awsentity/entityattributes/entityattributes.go b/plugins/processors/awsentity/entityattributes/entityattributes.go index c79dfb902e..df54a57ab0 100644 --- a/plugins/processors/awsentity/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/entityattributes/entityattributes.go @@ -16,6 +16,7 @@ const ( AttributeEntityResourceType = AWSEntityPrefix + "resource.type" AttributeEntityEC2InstanceResource = "AWS::EC2::Instance" AttributeEntityIdentifier = AWSEntityPrefix + "identifier" + AttributeEntityAwsAccountId = AWSEntityPrefix + "aws.account.id" AttributeEntityServiceName = AWSEntityPrefix + "service.name" AttributeEntityDeploymentEnvironment = AWSEntityPrefix + "deployment.environment" AttributeEntityCluster = AWSEntityPrefix + "k8s.cluster.name" @@ -38,6 +39,7 @@ const ( EntityType = "Type" ResourceType = "ResourceType" Identifier = "Identifier" + AwsAccountId = "AwsAccountId" Cluster = "Cluster" Workload = "Workload" Node = "Node" @@ -52,6 +54,7 @@ var KeyAttributeEntityToShortNameMap = map[string]string{ AttributeEntityType: EntityType, AttributeEntityResourceType: ResourceType, AttributeEntityIdentifier: Identifier, + AttributeEntityAwsAccountId: AwsAccountId, AttributeEntityServiceName: ServiceName, AttributeEntityDeploymentEnvironment: DeploymentEnvironment, } diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index d92a29bed0..b3c399a1a9 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -120,6 +120,7 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric resourceAttrs.PutStr(entityattributes.AttributeEntityResourceType, entityattributes.AttributeEntityEC2InstanceResource) resourceAttrs.PutStr(entityattributes.AttributeEntityIdentifier, ec2Info.InstanceID) } + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.AccountID) } case entityattributes.Service: if logGroupNamesAttr, ok := resourceAttrs.Get(attributeAwsLogGroupNames); ok { @@ -200,6 +201,7 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric resourceAttrs.PutStr(entityattributes.AttributeEntityWorkload, eksAttributes.Workload) resourceAttrs.PutStr(entityattributes.AttributeEntityNode, eksAttributes.Node) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityInstanceID, ec2Info.InstanceID) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.AccountID) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceNameSource, entityServiceNameSource) } p.k8sscraper.Reset() @@ -221,6 +223,7 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityType, entityattributes.Service) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceName, entityServiceName) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.AccountID) ec2Attributes := EC2ServiceAttributes{ InstanceId: ec2Info.InstanceID, diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 3a99ac1da0..b4a3075536 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -70,10 +70,11 @@ func newMockGetServiceNameAndSource(service, source string) func() (string, stri } } -func newMockGetEC2InfoFromEntityStore(instance string, asg string) func() entitystore.EC2Info { +func newMockGetEC2InfoFromEntityStore(instance, accountId, asg string) func() entitystore.EC2Info { return func() entitystore.EC2Info { return entitystore.EC2Info{ InstanceID: instance, + AccountID: accountId, AutoScalingGroup: asg, } } @@ -286,12 +287,13 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { platform: config.ModeEC2, metrics: generateMetrics(attributeServiceName, "test-service"), mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), - mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), want: map[string]any{ entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", entityattributes.AttributeEntityServiceNameSource: "Unknown", attributeServiceName: "test-service", }, @@ -301,13 +303,14 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { platform: config.ModeEC2, metrics: generateMetrics(attributeDeploymentEnvironment, "test-environment"), mockServiceNameSource: newMockGetServiceNameAndSource("unknown_service", "Unknown"), - mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), want: map[string]any{ entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "unknown_service", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", entityattributes.AttributeEntityServiceNameSource: "Unknown", attributeDeploymentEnvironment: "test-environment", @@ -318,7 +321,7 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { platform: config.ModeEC2, metrics: generateMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), - mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "test-auto-scaling"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", "test-auto-scaling"), want: map[string]any{ entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", @@ -328,6 +331,7 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { entityattributes.AttributeEntityServiceNameSource: "Unknown", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", entityattributes.AttributeEntityAutoScalingGroup: "test-auto-scaling", }, }, @@ -381,11 +385,12 @@ func TestProcessMetricsResourceEntityProcessing(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() tests := []struct { - name string - metrics pmetric.Metrics - want map[string]any - instance string - asg string + name string + metrics pmetric.Metrics + want map[string]any + instance string + accountId string + asg string }{ { name: "EmptyMetrics", @@ -393,26 +398,29 @@ func TestProcessMetricsResourceEntityProcessing(t *testing.T) { want: map[string]any{}, }, { - name: "ResourceEntityEC2", - metrics: generateMetrics(), - instance: "i-123456789", + name: "ResourceEntityEC2", + metrics: generateMetrics(), + instance: "i-123456789", + accountId: "0123456789012", want: map[string]any{ - "com.amazonaws.cloudwatch.entity.internal.type": "AWS::Resource", - "com.amazonaws.cloudwatch.entity.internal.resource.type": "AWS::EC2::Instance", - "com.amazonaws.cloudwatch.entity.internal.identifier": "i-123456789", + "com.amazonaws.cloudwatch.entity.internal.type": "AWS::Resource", + "com.amazonaws.cloudwatch.entity.internal.resource.type": "AWS::EC2::Instance", + "com.amazonaws.cloudwatch.entity.internal.identifier": "i-123456789", + "com.amazonaws.cloudwatch.entity.internal.aws.account.id": "0123456789012", }, }, { - name: "ResourceEntityEC2NoInstance", - metrics: generateMetrics(), - instance: "", - want: map[string]any{}, + name: "ResourceEntityEC2NoInstance", + metrics: generateMetrics(), + instance: "", + accountId: "", + want: map[string]any{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - getEC2InfoFromEntityStore = newMockGetEC2InfoFromEntityStore(tt.instance, tt.asg) + getEC2InfoFromEntityStore = newMockGetEC2InfoFromEntityStore(tt.instance, tt.accountId, tt.asg) p := newAwsEntityProcessor(&Config{EntityType: entityattributes.Resource}, logger) p.config.Platform = config.ModeEC2 _, err := p.processMetrics(ctx, tt.metrics) @@ -443,13 +451,14 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { { name: "DatapointAttributeServiceNameOnly", metrics: generateDatapointMetrics(attributeServiceName, "test-service"), - mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "auto-scaling"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", "auto-scaling"), want: map[string]any{ entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", }, }, @@ -457,26 +466,28 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { name: "DatapointAttributeEnvironmentOnly", metrics: generateDatapointMetrics(attributeDeploymentEnvironment, "test-environment"), mockServiceNameAndSource: newMockGetServiceNameAndSource("test-service-name", "ClientIamRole"), - mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), want: map[string]any{ entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service-name", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", entityattributes.AttributeEntityServiceNameSource: "ClientIamRole", }, }, { name: "DatapointAttributeServiceNameAndEnvironment", metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), - mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", ""), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), want: map[string]any{ entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", }, }, From a7559f245eff1a53e09ab6cfd9bbea8ffd821904 Mon Sep 17 00:00:00 2001 From: Lisa Guo Date: Mon, 7 Oct 2024 13:53:02 -0400 Subject: [PATCH 28/47] Ensure entity is not populated on ECS by removing entity processor from translation (#834) --- .../otel/processor/awsentity/translator.go | 12 +++++----- .../processor/awsentity/translator_test.go | 22 ++++++++++++++++--- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go index 463144394a..e057a65d17 100644 --- a/translator/translate/otel/processor/awsentity/translator.go +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -11,7 +11,6 @@ import ( "go.opentelemetry.io/collector/processor" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity" - "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/util" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -49,6 +48,11 @@ func (t *translator) ID() component.ID { } func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { + // Do not send entity for ECS + if context.CurrentContext().RunInContainer() && ecsutil.GetECSUtilSingleton().IsECS() { + return nil, nil + } + cfg := t.factory.CreateDefaultConfig().(*awsentity.Config) if t.entityType != "" { @@ -77,12 +81,6 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg.KubernetesMode = mode mode = ctx.Mode() - if context.CurrentContext().RunInContainer() { - if ecsutil.GetECSUtilSingleton().IsECS() { - mode = config.ModeECS - } - } - if cfg.KubernetesMode != "" { cfg.ClusterName = hostedIn } diff --git a/translator/translate/otel/processor/awsentity/translator_test.go b/translator/translate/otel/processor/awsentity/translator_test.go index 7301557782..11b44e37f4 100644 --- a/translator/translate/otel/processor/awsentity/translator_test.go +++ b/translator/translate/otel/processor/awsentity/translator_test.go @@ -12,6 +12,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity" "github.com/aws/amazon-cloudwatch-agent/translator/config" "github.com/aws/amazon-cloudwatch-agent/translator/context" + "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) func TestTranslate(t *testing.T) { @@ -38,17 +39,32 @@ func TestTranslate(t *testing.T) { Platform: config.ModeEC2, }, }, + "ECS": { + input: map[string]interface{}{}, + mode: config.ModeECS, + want: nil, + }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - context.CurrentContext().SetMode(testCase.mode) - context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) + if testCase.mode == config.ModeECS { + context.CurrentContext().SetRunInContainer(true) + t.Setenv(config.RUN_IN_CONTAINER, config.RUN_IN_CONTAINER_TRUE) + ecsutil.GetECSUtilSingleton().Region = "test" + } else { + context.CurrentContext().SetMode(testCase.mode) + context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) + } tt := NewTranslator() assert.Equal(t, "awsentity", tt.ID().String()) conf := confmap.NewFromStringMap(testCase.input) got, err := tt.Translate(conf) assert.NoError(t, err) - assert.Equal(t, testCase.want, got) + if testCase.want == nil { + assert.Nil(t, got) + } else { + assert.Equal(t, testCase.want, got) + } }) } } From 816082a88d7de1ca341125f11866e6e9135ed311 Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Tue, 8 Oct 2024 11:48:19 -0400 Subject: [PATCH 29/47] Drops all attributes with entity prefix from being exported to cloudwatch (#833) --- plugins/outputs/cloudwatch/convert_otel.go | 20 +++++++-------- .../outputs/cloudwatch/convert_otel_test.go | 25 ++++++++++++++++++- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/cloudwatch/convert_otel.go b/plugins/outputs/cloudwatch/convert_otel.go index a9f85d13d4..78c2d00b94 100644 --- a/plugins/outputs/cloudwatch/convert_otel.go +++ b/plugins/outputs/cloudwatch/convert_otel.go @@ -24,7 +24,7 @@ func ConvertOtelDimensions(attributes pcommon.Map) []*cloudwatch.Dimension { mTags := make(map[string]string, attributes.Len()) attributes.Range(func(k string, v pcommon.Value) bool { // we don't want to export entity related attributes as dimensions, so we skip these - if isEntityAttribute(k) { + if strings.HasPrefix(k, entityattributes.AWSEntityPrefix) { return true } mTags[k] = v.AsString() @@ -33,12 +33,6 @@ func ConvertOtelDimensions(attributes pcommon.Map) []*cloudwatch.Dimension { return BuildDimensions(mTags) } -func isEntityAttribute(k string) bool { - _, ok := entityattributes.KeyAttributeEntityToShortNameMap[k] - _, ok2 := entityattributes.AttributeEntityToShortNameMap[k] - return ok || ok2 -} - // NumberDataPointValue converts to float64 since that is what AWS SDK will use. func NumberDataPointValue(dp pmetric.NumberDataPoint) float64 { switch dp.ValueType() { @@ -197,6 +191,7 @@ func fetchEntityFields(resourceAttributes pcommon.Map) cloudwatch.Entity { processEntityAttributes(entityattributes.KeyAttributeEntityToShortNameMap, keyAttributesMap, resourceAttributes) processEntityAttributes(entityattributes.AttributeEntityToShortNameMap, attributeMap, resourceAttributes) + removeEntityFields(resourceAttributes) return cloudwatch.Entity{ KeyAttributes: keyAttributesMap, @@ -204,15 +199,20 @@ func fetchEntityFields(resourceAttributes pcommon.Map) cloudwatch.Entity { } } -// processEntityAttributes fetches the aws.entity fields and creates an entity to be sent at the PutMetricData call. It also -// removes the entity attributes so that it is not tagged as a dimension, and reduces the size of the PMD payload. +// processEntityAttributes fetches the fields with entity prefix and creates an entity to be sent at the PutMetricData call. func processEntityAttributes(entityMap map[string]string, targetMap map[string]*string, mutableResourceAttributes pcommon.Map) { for entityField, shortName := range entityMap { if val, ok := mutableResourceAttributes.Get(entityField); ok { if strVal := val.Str(); strVal != "" { targetMap[shortName] = aws.String(strVal) } - mutableResourceAttributes.Remove(entityField) } } } + +// removeEntityFields so that it is not tagged as a dimension, and reduces the size of the PMD payload. +func removeEntityFields(mutableResourceAttributes pcommon.Map) { + mutableResourceAttributes.RemoveIf(func(s string, _ pcommon.Value) bool { + return strings.HasPrefix(s, entityattributes.AWSEntityPrefix) + }) +} diff --git a/plugins/outputs/cloudwatch/convert_otel_test.go b/plugins/outputs/cloudwatch/convert_otel_test.go index dcf8f30357..a2992da5b2 100644 --- a/plugins/outputs/cloudwatch/convert_otel_test.go +++ b/plugins/outputs/cloudwatch/convert_otel_test.go @@ -242,7 +242,7 @@ func TestConvertOtelMetrics_Entity(t *testing.T) { } -func TestProcessEntityAttributes(t *testing.T) { +func TestProcessAndRemoveEntityAttributes(t *testing.T) { testCases := []struct { name string entityMap []map[string]string @@ -325,6 +325,28 @@ func TestProcessEntityAttributes(t *testing.T) { "extra_attribute": "extra_value", }, }, + { + name: "key_and_non_key_attributes_plus_unsupported_entity_field", + entityMap: []map[string]string{entityattributes.KeyAttributeEntityToShortNameMap, entityattributes.AttributeEntityToShortNameMap}, + resourceAttributes: map[string]any{ + entityattributes.AWSEntityPrefix + "not.real.values": "unsupported", + entityattributes.AttributeEntityServiceName: "my-service", + entityattributes.AttributeEntityDeploymentEnvironment: "my-environment", + entityattributes.AttributeEntityCluster: "my-cluster", + entityattributes.AttributeEntityNamespace: "my-namespace", + entityattributes.AttributeEntityNode: "my-node", + entityattributes.AttributeEntityWorkload: "my-workload", + }, + wantedAttributes: map[string]*string{ + entityattributes.ServiceName: aws.String("my-service"), + entityattributes.DeploymentEnvironment: aws.String("my-environment"), + entityattributes.Cluster: aws.String("my-cluster"), + entityattributes.Namespace: aws.String("my-namespace"), + entityattributes.Node: aws.String("my-node"), + entityattributes.Workload: aws.String("my-workload"), + }, + leftoverAttributes: map[string]any{}, + }, } for _, tc := range testCases { @@ -336,6 +358,7 @@ func TestProcessEntityAttributes(t *testing.T) { for _, entityMap := range tc.entityMap { processEntityAttributes(entityMap, targetMap, attrs) } + removeEntityFields(attrs) assert.Equal(t, tc.leftoverAttributes, attrs.AsRaw()) assert.Equal(t, tc.wantedAttributes, targetMap) }) From 1910a3669f2eb794e2b8c57395e466a60f144543 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Thu, 10 Oct 2024 09:54:47 -0400 Subject: [PATCH 30/47] Add unit tests to ensure no user resource info is revealed in logs (#837) --- extension/entitystore/ec2Info_test.go | 8 +-- extension/entitystore/extension.go | 4 +- extension/entitystore/extension_test.go | 88 +++++++++++++++++++++++++ 3 files changed, 91 insertions(+), 9 deletions(-) diff --git a/extension/entitystore/ec2Info_test.go b/extension/entitystore/ec2Info_test.go index b2170b6c63..77f7f6303a 100644 --- a/extension/entitystore/ec2Info_test.go +++ b/extension/entitystore/ec2Info_test.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/stretchr/testify/assert" "go.uber.org/zap" - "go.uber.org/zap/zapcore" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" @@ -310,13 +309,8 @@ func TestLogMessageDoesNotIncludeResourceInfo(t *testing.T) { t.Run(tt.name, func(t *testing.T) { // Create a buffer to capture the logger output var buf bytes.Buffer - writer := zapcore.AddSync(&buf) - // Create a custom zapcore.Core that writes to the buffer - encoder := zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()) - core := zapcore.NewCore(encoder, writer, zapcore.DebugLevel) - - logger := zap.New(core) + logger := CreateTestLogger(&buf) done := make(chan struct{}) ei := &EC2Info{ diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index 4928ee0000..aaa56399bc 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -235,12 +235,12 @@ func (e *EntityStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) m return serviceKeyAttr } -func getMetaDataProvider() ec2metadataprovider.MetadataProvider { +var getMetaDataProvider = func() ec2metadataprovider.MetadataProvider { mdCredentialConfig := &configaws.CredentialConfig{} return ec2metadataprovider.NewMetadataProvider(mdCredentialConfig.Credentials(), retryer.GetDefaultRetryNumber()) } -func getEC2Provider(region string, ec2CredentialConfig *configaws.CredentialConfig) ec2iface.EC2API { +var getEC2Provider = func(region string, ec2CredentialConfig *configaws.CredentialConfig) ec2iface.EC2API { ec2CredentialConfig.Region = region return ec2.New( ec2CredentialConfig.Credentials(), diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 163b481434..fb3be5a05a 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -4,8 +4,10 @@ package entitystore import ( + "bytes" "context" "errors" + "log" "reflect" "testing" "time" @@ -19,7 +21,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) @@ -28,6 +32,19 @@ type mockServiceProvider struct { mock.Mock } +// This helper function creates a test logger +// so that it can send the log messages into a +// temporary buffer for pattern matching +func CreateTestLogger(buf *bytes.Buffer) *zap.Logger { + writer := zapcore.AddSync(buf) + + // Create a custom zapcore.Core that writes to the buffer + encoder := zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()) + core := zapcore.NewCore(encoder, writer, zapcore.DebugLevel) + logger := zap.New(core) + return logger +} + func (s *mockServiceProvider) startServiceProvider() {} func (s *mockServiceProvider) addEntryForLogGroup(logGroupName LogGroupName, serviceAttr ServiceAttribute) { @@ -63,6 +80,16 @@ type mockMetadataProvider struct { InstanceTagError bool } +func mockMetadataProviderFunc() ec2metadataprovider.MetadataProvider { + return &mockMetadataProvider{ + Tags: "aws:autoscaling:groupName", + TagValue: "ASG-1", + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789", + }, + } +} + func mockMetadataProviderWithAccountId(accountId string) *mockMetadataProvider { return &mockMetadataProvider{ InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ @@ -499,3 +526,64 @@ func TestEntityStore_GetMetricServiceNameSource(t *testing.T) { assert.Equal(t, "test-service-name", serviceName) assert.Equal(t, "UserConfiguration", serviceNameSource) } + +func TestEntityStore_LogMessageDoesNotIncludeResourceInfo(t *testing.T) { + type args struct { + metadataProvider ec2metadataprovider.MetadataProvider + mode string + kubernetesMode string + } + tests := []struct { + name string + args args + }{ + { + name: "AutoScalingGroupWithInstanceTagsEC2", + args: args{ + mode: config.ModeEC2, + }, + }, + { + name: "AutoScalingGroupWithInstanceTagsEKS", + args: args{ + kubernetesMode: config.ModeEKS, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a buffer to capture the logger output + var buf bytes.Buffer + + logger := CreateTestLogger(&buf) + done := make(chan struct{}) + config := &Config{ + Mode: tt.args.mode, + KubernetesMode: tt.args.kubernetesMode, + } + getEC2Provider = mockEC2Provider + getMetaDataProvider = mockMetadataProviderFunc + es := &EntityStore{ + logger: logger, + done: done, + metadataprovider: tt.args.metadataProvider, + config: config, + } + go es.Start(nil, nil) + time.Sleep(2 * time.Second) + + logOutput := buf.String() + log.Println(logOutput) + assertIfNonEmpty(t, logOutput, es.ec2Info.InstanceID) + assertIfNonEmpty(t, logOutput, es.ec2Info.AutoScalingGroup) + assertIfNonEmpty(t, logOutput, es.ec2Info.AccountID) + + }) + } +} + +func assertIfNonEmpty(t *testing.T, message string, pattern string) { + if pattern != "" { + assert.NotContains(t, message, pattern) + } +} From b2920e0b16f39ca8741b9465689b9cd834c0f6da Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Fri, 11 Oct 2024 11:11:30 -0400 Subject: [PATCH 31/47] Removes all instances of ec2 describe tags call from enity logic (#838) --- extension/entitystore/ec2Info.go | 63 +----- extension/entitystore/ec2Info_test.go | 119 ++--------- extension/entitystore/extension.go | 2 +- extension/entitystore/extension_test.go | 15 +- extension/entitystore/retryer_test.go | 106 +--------- extension/entitystore/serviceprovider.go | 120 +++-------- extension/entitystore/serviceprovider_test.go | 199 ++++++------------ 7 files changed, 129 insertions(+), 495 deletions(-) diff --git a/extension/entitystore/ec2Info.go b/extension/entitystore/ec2Info.go index af6b078314..703ff7db1d 100644 --- a/extension/entitystore/ec2Info.go +++ b/extension/entitystore/ec2Info.go @@ -9,12 +9,8 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "go.uber.org/zap" - configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) @@ -38,9 +34,6 @@ type EC2Info struct { Region string metadataProvider ec2metadataprovider.MetadataProvider - ec2API ec2iface.EC2API - ec2Provider ec2ProviderType - ec2Credential *configaws.CredentialConfig logger *zap.Logger done chan struct{} } @@ -50,7 +43,6 @@ func (ei *EC2Info) initEc2Info() { if err := ei.setInstanceIDAccountID(); err != nil { return } - ei.ec2API = ei.ec2Provider(ei.Region, ei.ec2Credential) if err := ei.setAutoScalingGroup(); err != nil { return } @@ -101,8 +93,8 @@ func (ei *EC2Info) setAutoScalingGroup() error { ei.logger.Debug("Initial retrieval of tags and volumes", zap.Int("retry", retry)) } - if err := ei.retrieveAsgName(ei.ec2API); err != nil { - ei.logger.Warn("Unable to describe ec2 tags", zap.Int("retry", retry), zap.Error(err)) + if err := ei.retrieveAsgName(); err != nil { + ei.logger.Warn("Unable to fetch instance tags with imds", zap.Int("retry", retry), zap.Error(err)) } else { ei.logger.Debug("Retrieval of auto-scaling group tags succeeded") return nil @@ -113,15 +105,11 @@ func (ei *EC2Info) setAutoScalingGroup() error { } -/* -This can also be implemented by just calling the InstanceTagValue and then DescribeTags on failure. But preferred the current implementation -as we need to distinguish the tags not being fetchable at all, from the ASG tag in particular not existing. -*/ -func (ei *EC2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { +func (ei *EC2Info) retrieveAsgName() error { tags, err := ei.metadataProvider.InstanceTags(context.Background()) if err != nil { ei.logger.Debug("Failed to get tags through metadata provider", zap.Error(err)) - return ei.retrieveAsgNameWithDescribeTags(ec2API) + return err } else if strings.Contains(tags, ec2tagger.Ec2InstanceTagKeyASG) { asg, err := ei.metadataProvider.InstanceTagValue(context.Background(), ec2tagger.Ec2InstanceTagKeyASG) if err != nil { @@ -134,50 +122,9 @@ func (ei *EC2Info) retrieveAsgName(ec2API ec2iface.EC2API) error { return nil } -func (ei *EC2Info) retrieveAsgNameWithDescribeTags(ec2API ec2iface.EC2API) error { - tagFilters := []*ec2.Filter{ - { - Name: aws.String("resource-type"), - Values: aws.StringSlice([]string{"instance"}), - }, - { - Name: aws.String("resource-id"), - Values: aws.StringSlice([]string{ei.InstanceID}), - }, - { - Name: aws.String("key"), - Values: aws.StringSlice([]string{ec2tagger.Ec2InstanceTagKeyASG}), - }, - } - input := &ec2.DescribeTagsInput{ - Filters: tagFilters, - } - for { - result, err := ec2API.DescribeTags(input) - if err != nil { - ei.logger.Error("Unable to retrieve EC2 AutoScalingGroup. This feature must only be used on an EC2 instance.") - return err - } - for _, tag := range result.Tags { - key := *tag.Key - if ec2tagger.Ec2InstanceTagKeyASG == key { - ei.AutoScalingGroup = *tag.Value - return nil - } - } - if result.NextToken == nil { - break - } - input.SetNextToken(*result.NextToken) - } - return nil -} - -func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}, region string, logger *zap.Logger) *EC2Info { +func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, done chan struct{}, region string, logger *zap.Logger) *EC2Info { return &EC2Info{ metadataProvider: metadataProvider, - ec2Provider: providerType, - ec2Credential: ec2Credential, done: done, Region: region, logger: logger, diff --git a/extension/entitystore/ec2Info_test.go b/extension/entitystore/ec2Info_test.go index 77f7f6303a..13b2674861 100644 --- a/extension/entitystore/ec2Info_test.go +++ b/extension/entitystore/ec2Info_test.go @@ -11,12 +11,9 @@ import ( "time" "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/stretchr/testify/assert" "go.uber.org/zap" - configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" ) @@ -28,52 +25,10 @@ var mockedInstanceIdentityDoc = &ec2metadata.EC2InstanceIdentityDocument{ ImageID: "ami-09edd32d9b0990d49", } -type mockEC2Client struct { - ec2iface.EC2API - withASG bool -} - -// construct the return results for the mocked DescribeTags api -var ( - tagKey1 = "tagKey1" - tagVal1 = "tagVal1" - tagDes1 = ec2.TagDescription{Key: &tagKey1, Value: &tagVal1} -) - -var ( - tagKey2 = "tagKey2" - tagVal2 = "tagVal2" - tagDes2 = ec2.TagDescription{Key: &tagKey2, Value: &tagVal2} -) - var ( - tagKey3 = "aws:autoscaling:groupName" tagVal3 = "ASG-1" - tagDes3 = ec2.TagDescription{Key: &tagKey3, Value: &tagVal3} ) -func (m *mockEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) { - //all tags are returned when the ec2 metadata service knows about all tags - var allTags ec2.DescribeTagsOutput - if m.withASG { - allTags = ec2.DescribeTagsOutput{ - NextToken: nil, - Tags: []*ec2.TagDescription{&tagDes1, &tagDes2, &tagDes3}, - } - } else { - allTags = ec2.DescribeTagsOutput{ - NextToken: nil, - Tags: []*ec2.TagDescription{&tagDes1, &tagDes2}, - } - } - - return &allTags, nil -} - -func mockEC2Provider(_ string, _ *configaws.CredentialConfig) ec2iface.EC2API { - return &mockEC2Client{withASG: true} -} - func TestSetInstanceIdAndRegion(t *testing.T) { type args struct { metadataProvider ec2metadataprovider.MetadataProvider @@ -114,7 +69,6 @@ func TestSetInstanceIdAndRegion(t *testing.T) { func TestRetrieveASGName(t *testing.T) { type args struct { - ec2Client ec2iface.EC2API metadataProvider ec2metadataprovider.MetadataProvider } tests := []struct { @@ -126,8 +80,7 @@ func TestRetrieveASGName(t *testing.T) { { name: "happy path", args: args{ - ec2Client: &mockEC2Client{}, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName", TagValue: tagVal3}, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"aws:autoscaling:groupName": tagVal3}}, }, wantErr: false, want: EC2Info{ @@ -137,9 +90,15 @@ func TestRetrieveASGName(t *testing.T) { { name: "happy path with multiple tags", args: args{ - ec2Client: &mockEC2Client{}, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName\nenv\nname", TagValue: tagVal3}, + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: mockedInstanceIdentityDoc, + Tags: map[string]string{ + "aws:autoscaling:groupName": tagVal3, + "env": "test-env", + "name": "test-name", + }}, }, + wantErr: false, want: EC2Info{ AutoScalingGroup: tagVal3, @@ -148,8 +107,7 @@ func TestRetrieveASGName(t *testing.T) { { name: "Success IMDS tags call but no ASG", args: args{ - ec2Client: &mockEC2Client{}, - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "name", TagValue: tagVal3}, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"name": tagVal3}}, }, wantErr: false, want: EC2Info{ @@ -161,50 +119,7 @@ func TestRetrieveASGName(t *testing.T) { logger, _ := zap.NewDevelopment() t.Run(tt.name, func(t *testing.T) { ei := &EC2Info{metadataProvider: tt.args.metadataProvider, logger: logger} - if err := ei.retrieveAsgName(tt.args.ec2Client); (err != nil) != tt.wantErr { - t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) - } - assert.Equal(t, tt.want.AutoScalingGroup, ei.AutoScalingGroup) - }) - } -} - -func TestRetrieveASGNameWithDescribeTags(t *testing.T) { - type args struct { - ec2Client ec2iface.EC2API - } - tests := []struct { - name string - args args - wantErr bool - want EC2Info - }{ - { - name: "happy path", - args: args{ - ec2Client: &mockEC2Client{withASG: true}, - }, - wantErr: false, - want: EC2Info{ - AutoScalingGroup: tagVal3, - }, - }, - { - name: "Success Describe tags call but no ASG", - args: args{ - ec2Client: &mockEC2Client{withASG: false}, - }, - wantErr: false, - want: EC2Info{ - AutoScalingGroup: "", - }, - }, - } - for _, tt := range tests { - logger, _ := zap.NewDevelopment() - t.Run(tt.name, func(t *testing.T) { - ei := &EC2Info{logger: logger} - if err := ei.retrieveAsgNameWithDescribeTags(tt.args.ec2Client); (err != nil) != tt.wantErr { + if err := ei.retrieveAsgName(); (err != nil) != tt.wantErr { t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) } assert.Equal(t, tt.want.AutoScalingGroup, ei.AutoScalingGroup) @@ -286,19 +201,10 @@ func TestLogMessageDoesNotIncludeResourceInfo(t *testing.T) { args args want EC2Info }{ - { - name: "AutoScalingGroupWithDescribeTags", - args: args{ - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, InstanceTagError: true}, - }, - want: EC2Info{ - InstanceID: mockedInstanceIdentityDoc.InstanceID, - }, - }, { name: "AutoScalingGroupWithInstanceTags", args: args{ - metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: "aws:autoscaling:groupName", TagValue: tagVal3}, + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"aws:autoscaling:groupName": tagVal3}}, }, want: EC2Info{ InstanceID: mockedInstanceIdentityDoc.InstanceID, @@ -315,7 +221,6 @@ func TestLogMessageDoesNotIncludeResourceInfo(t *testing.T) { ei := &EC2Info{ metadataProvider: tt.args.metadataProvider, - ec2Provider: mockEC2Provider, logger: logger, done: done, } diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index aaa56399bc..afbd1bf7a2 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -96,7 +96,7 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { } switch e.mode { case config.ModeEC2: - e.ec2Info = *newEC2Info(e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done, e.config.Region, e.logger) + e.ec2Info = *newEC2Info(e.metadataprovider, e.done, e.config.Region, e.logger) go e.ec2Info.initEc2Info() } if e.kubernetesMode != "" { diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index fb3be5a05a..0763e3afec 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -75,8 +75,7 @@ func (ms *mockSTSClient) GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.Ge type mockMetadataProvider struct { InstanceIdentityDocument *ec2metadata.EC2InstanceIdentityDocument - Tags string - TagValue string + Tags map[string]string InstanceTagError bool } @@ -121,11 +120,19 @@ func (m *mockMetadataProvider) InstanceTags(ctx context.Context) (string, error) if m.InstanceTagError { return "", errors.New("an error occurred for instance tag retrieval") } - return m.Tags, nil + var tagsString string + for key, val := range m.Tags { + tagsString += key + "=" + val + "," + } + return tagsString, nil } func (m *mockMetadataProvider) InstanceTagValue(ctx context.Context, tagKey string) (string, error) { - return m.TagValue, nil + tag, ok := m.Tags[tagKey] + if !ok { + return "", errors.New("tag not found") + } + return tag, nil } func TestEntityStore_EC2Info(t *testing.T) { diff --git a/extension/entitystore/retryer_test.go b/extension/entitystore/retryer_test.go index 46521a36b7..35883cf695 100644 --- a/extension/entitystore/retryer_test.go +++ b/extension/entitystore/retryer_test.go @@ -8,35 +8,23 @@ import ( "time" "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/stretchr/testify/assert" "go.uber.org/zap" - configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/ec2tagger" ) -var ( - FastBackOffArray = []time.Duration{0, 0, 0} -) - func TestRetryer_refreshLoop(t *testing.T) { type fields struct { - metadataProvider ec2metadataprovider.MetadataProvider - ec2API ec2iface.EC2API - iamRole string - ec2TagServiceName string - oneTime bool - } - type expectedInfo struct { - iamRole string - ec2TagServiceName string + metadataProvider ec2metadataprovider.MetadataProvider + iamRole string + oneTime bool } tests := []struct { - name string - fields fields - expectedInfo expectedInfo + name string + fields fields + wantIamRole string }{ { name: "HappyPath_CorrectRefresh", @@ -45,14 +33,9 @@ func TestRetryer_refreshLoop(t *testing.T) { InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ InstanceID: "i-123456789"}, }, - ec2API: &mockServiceNameEC2Client{}, - iamRole: "original-role", - ec2TagServiceName: "original-tag-name", - }, - expectedInfo: expectedInfo{ - iamRole: "TestRole", - ec2TagServiceName: "test-service", + iamRole: "original-role", }, + wantIamRole: "TestRole", }, } for _, tt := range tests { @@ -61,81 +44,14 @@ func TestRetryer_refreshLoop(t *testing.T) { done := make(chan struct{}) s := &serviceprovider{ metadataProvider: tt.fields.metadataProvider, - ec2API: tt.fields.ec2API, - ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { - return tt.fields.ec2API - }, - iamRole: tt.fields.iamRole, - ec2TagServiceName: tt.fields.ec2TagServiceName, - done: done, + iamRole: tt.fields.iamRole, + done: done, } - limitedRetryer := NewRetryer(tt.fields.oneTime, false, describeTagsJitterMin, describeTagsJitterMax, ec2tagger.ThrottleBackOffArray, maxRetry, s.done, logger) unlimitedRetryer := NewRetryer(tt.fields.oneTime, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, logger) - go limitedRetryer.refreshLoop(s.getEC2TagServiceName) go unlimitedRetryer.refreshLoop(s.getIAMRole) time.Sleep(time.Second) close(done) - assert.Equal(t, tt.expectedInfo.iamRole, s.iamRole) - assert.Equal(t, tt.expectedInfo.ec2TagServiceName, s.ec2TagServiceName) - }) - } -} - -func TestRetryer_refreshLoopRetry(t *testing.T) { - type fields struct { - metadataProvider ec2metadataprovider.MetadataProvider - ec2API ec2iface.EC2API - oneTime bool - } - tests := []struct { - name string - fields fields - expectedRetry int - }{ - { - name: "ThrottleLimitError", - fields: fields{ - metadataProvider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - InstanceID: "i-123456789"}, - }, - ec2API: &mockServiceNameEC2Client{ - throttleError: true, - }, - }, - expectedRetry: 4, - }, - { - name: "AuthError", - fields: fields{ - metadataProvider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - InstanceID: "i-123456789"}, - }, - ec2API: &mockServiceNameEC2Client{ - authError: true, - }, - }, - expectedRetry: 1, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - logger, _ := zap.NewDevelopment() - done := make(chan struct{}) - s := &serviceprovider{ - metadataProvider: tt.fields.metadataProvider, - ec2API: tt.fields.ec2API, - ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { - return tt.fields.ec2API - }, - done: done, - } - limitedRetryer := NewRetryer(tt.fields.oneTime, false, describeTagsJitterMin, describeTagsJitterMax, FastBackOffArray, maxRetry, s.done, logger) - retry := limitedRetryer.refreshLoop(s.getEC2TagServiceName) - time.Sleep(time.Second) - close(done) - assert.Equal(t, tt.expectedRetry, retry) + assert.Equal(t, tt.wantIamRole, s.iamRole) }) } } diff --git a/extension/entitystore/serviceprovider.go b/extension/entitystore/serviceprovider.go index ffc527feae..d9a32fc83c 100644 --- a/extension/entitystore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -5,14 +5,10 @@ package entitystore import ( "context" - "errors" "fmt" "strings" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "go.uber.org/zap" configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" @@ -46,11 +42,8 @@ const ( ) var ( - priorityMap = map[string]int{ - SERVICE: 2, - APPLICATION: 1, - APP: 0, - } + //priorityMap is ranking in how we prioritize which IMDS tag determines the service name + priorityMap = []string{SERVICE, APPLICATION, APP} ) type ServiceAttribute struct { @@ -63,17 +56,14 @@ type LogGroupName string type LogFileGlob string type serviceprovider struct { - mode string - ec2Info *EC2Info - metadataProvider ec2metadataprovider.MetadataProvider - ec2API ec2iface.EC2API - ec2Provider ec2ProviderType - ec2Credential *configaws.CredentialConfig - iamRole string - ec2TagServiceName string - region string - done chan struct{} - logger *zap.Logger + mode string + ec2Info *EC2Info + metadataProvider ec2metadataprovider.MetadataProvider + iamRole string + imdsServiceName string + region string + done chan struct{} + logger *zap.Logger // logFiles stores the service attributes that were configured for log files in CloudWatch Agent configuration. // Example: // "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {ServiceName: "cloudwatch-agent"} @@ -86,15 +76,10 @@ type serviceprovider struct { } func (s *serviceprovider) startServiceProvider() { - oneTimeRetryer := NewRetryer(true, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, s.logger) unlimitedRetryer := NewRetryer(false, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, s.logger) limitedRetryer := NewRetryer(false, false, describeTagsJitterMin, describeTagsJitterMax, ec2tagger.ThrottleBackOffArray, maxRetry, s.done, s.logger) - err := s.getEC2Client() - if err != nil { - go oneTimeRetryer.refreshLoop(s.getEC2Client) - } go unlimitedRetryer.refreshLoop(s.getIAMRole) - go limitedRetryer.refreshLoop(s.getEC2TagServiceName) + go limitedRetryer.refreshLoop(s.getImdsServiceName) } // addEntryForLogFile adds an association between a log file glob and a service attribute, as configured in the @@ -146,7 +131,7 @@ func (s *serviceprovider) logFileServiceAttribute(logFile LogFileGlob, logGroup return mergeServiceAttributes([]serviceAttributeProvider{ func() ServiceAttribute { return s.serviceAttributeForLogGroup(logGroup) }, func() ServiceAttribute { return s.serviceAttributeForLogFile(logFile) }, - s.serviceAttributeFromEc2Tags, + s.serviceAttributeFromImdsTags, s.serviceAttributeFromIamRole, s.serviceAttributeFromAsg, s.serviceAttributeFallback, @@ -155,7 +140,7 @@ func (s *serviceprovider) logFileServiceAttribute(logFile LogFileGlob, logGroup func (s *serviceprovider) getServiceNameAndSource() (string, string) { sa := mergeServiceAttributes([]serviceAttributeProvider{ - s.serviceAttributeFromEc2Tags, + s.serviceAttributeFromImdsTags, s.serviceAttributeFromIamRole, s.serviceAttributeFallback, }) @@ -178,13 +163,13 @@ func (s *serviceprovider) serviceAttributeForLogFile(logFile LogFileGlob) Servic return s.logFiles[logFile] } -func (s *serviceprovider) serviceAttributeFromEc2Tags() ServiceAttribute { - if s.ec2TagServiceName == "" { +func (s *serviceprovider) serviceAttributeFromImdsTags() ServiceAttribute { + if s.imdsServiceName == "" { return ServiceAttribute{} } return ServiceAttribute{ - ServiceName: s.ec2TagServiceName, + ServiceName: s.imdsServiceName, ServiceNameSource: ServiceNameSourceResourceTags, } } @@ -240,81 +225,36 @@ func (s *serviceprovider) getIAMRole() error { } return nil } - -func (s *serviceprovider) getEC2TagServiceName() error { - if s.ec2API == nil { - return fmt.Errorf("can't get EC2 tag since client is not set up yet ") - } - serviceTagFilters, err := s.getEC2TagFilters() +func (s *serviceprovider) getImdsServiceName() error { + tags, err := s.metadataProvider.InstanceTags(context.Background()) if err != nil { - return fmt.Errorf("failed to get service name from EC2 tag: %s", err) + s.logger.Debug("Failed to get tags through metadata provider", zap.Error(err)) + return err } - currentTagPriority := -1 - for { - input := &ec2.DescribeTagsInput{ - Filters: serviceTagFilters, - } - result, err := s.ec2API.DescribeTags(input) - if err != nil { - return err - } - for _, tag := range result.Tags { - key := *tag.Key - value := *tag.Value - if priority, found := priorityMap[key]; found { - if priority > currentTagPriority { - s.ec2TagServiceName = value - currentTagPriority = priority - } + // This will check whether the tags contains SERVICE, APPLICATION, APP, in that order. + for _, value := range priorityMap { + if strings.Contains(tags, value) { + serviceName, err := s.metadataProvider.InstanceTagValue(context.Background(), value) + if err != nil { + continue + } else { + s.imdsServiceName = serviceName } - } - if result.NextToken == nil { break } - input.SetNextToken(*result.NextToken) } - return nil -} - -func (s *serviceprovider) getEC2Client() error { - if s.ec2API != nil { - return nil + if s.imdsServiceName == "" { + s.logger.Debug("Service name not found through IMDS") } - s.ec2API = s.ec2Provider(s.region, s.ec2Credential) return nil } -func (s *serviceprovider) getEC2TagFilters() ([]*ec2.Filter, error) { - instanceDocument, err := s.metadataProvider.Get(context.Background()) - if err != nil { - return nil, errors.New("failed to get instance document") - } - instanceID := instanceDocument.InstanceID - tagFilters := []*ec2.Filter{ - { - Name: aws.String("resource-type"), - Values: aws.StringSlice([]string{"instance"}), - }, - { - Name: aws.String("resource-id"), - Values: aws.StringSlice([]string{instanceID}), - }, - { - Name: aws.String("key"), - Values: aws.StringSlice([]string{SERVICE, APPLICATION, APP}), - }, - } - return tagFilters, nil -} - func newServiceProvider(mode string, region string, ec2Info *EC2Info, metadataProvider ec2metadataprovider.MetadataProvider, providerType ec2ProviderType, ec2Credential *configaws.CredentialConfig, done chan struct{}, logger *zap.Logger) serviceProviderInterface { return &serviceprovider{ mode: mode, region: region, ec2Info: ec2Info, metadataProvider: metadataProvider, - ec2Provider: providerType, - ec2Credential: ec2Credential, done: done, logger: logger, logFiles: make(map[LogFileGlob]ServiceAttribute), diff --git a/extension/entitystore/serviceprovider_test.go b/extension/entitystore/serviceprovider_test.go index 70aa8da140..a05f87f755 100644 --- a/extension/entitystore/serviceprovider_test.go +++ b/extension/entitystore/serviceprovider_test.go @@ -7,65 +7,27 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/stretchr/testify/assert" "go.uber.org/zap" - configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) -type mockServiceNameEC2Client struct { - ec2iface.EC2API - throttleError bool - authError bool -} - -// construct the return results for the mocked DescribeTags api -var ( - tagKeyService = "service" - tagValService = "test-service" - tagDesService = ec2.TagDescription{Key: &tagKeyService, Value: &tagValService} -) - -func (m *mockServiceNameEC2Client) DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error) { - if m.throttleError { - return nil, awserr.New(RequestLimitExceeded, "throttle limit exceeded", nil) - } - if m.authError { - return nil, awserr.New("UnauthorizedOperation", "UnauthorizedOperation occurred", nil) - } - testTags := ec2.DescribeTagsOutput{ - NextToken: nil, - Tags: []*ec2.TagDescription{&tagDesService}, - } - return &testTags, nil -} - func Test_serviceprovider_startServiceProvider(t *testing.T) { - type args struct { - metadataProvider ec2metadataprovider.MetadataProvider - ec2Client ec2iface.EC2API - } tests := []struct { - name string - args args - wantIAM string - wantTag string + name string + metadataProvider ec2metadataprovider.MetadataProvider + wantIAM string + wantTag string }{ { name: "HappyPath_AllServiceNames", - args: args{ - metadataProvider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - InstanceID: "i-123456789"}, - }, - ec2Client: &mockServiceNameEC2Client{}, + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-123456789"}, + Tags: map[string]string{"service": "test-service"}, }, wantIAM: "TestRole", wantTag: "test-service", @@ -76,20 +38,16 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { done := make(chan struct{}) logger, _ := zap.NewDevelopment() s := serviceprovider{ - metadataProvider: tt.args.metadataProvider, - ec2Provider: func(s string, config *configaws.CredentialConfig) ec2iface.EC2API { - return tt.args.ec2Client - }, - ec2API: tt.args.ec2Client, - done: done, - logger: logger, + metadataProvider: tt.metadataProvider, + done: done, + logger: logger, } go s.startServiceProvider() time.Sleep(3 * time.Second) close(done) assert.Equal(t, tt.wantIAM, s.iamRole) - assert.Equal(t, tt.wantTag, s.ec2TagServiceName) + assert.Equal(t, tt.wantTag, s.imdsServiceName) }) } } @@ -196,10 +154,10 @@ func Test_serviceprovider_serviceAttributeForLogFile(t *testing.T) { func Test_serviceprovider_serviceAttributeFromEc2Tags(t *testing.T) { s := &serviceprovider{} - assert.Equal(t, ServiceAttribute{}, s.serviceAttributeFromEc2Tags()) + assert.Equal(t, ServiceAttribute{}, s.serviceAttributeFromImdsTags()) - s = &serviceprovider{ec2TagServiceName: "test-service"} - assert.Equal(t, ServiceAttribute{ServiceName: "test-service", ServiceNameSource: ServiceNameSourceResourceTags}, s.serviceAttributeFromEc2Tags()) + s = &serviceprovider{imdsServiceName: "test-service"} + assert.Equal(t, ServiceAttribute{ServiceName: "test-service", ServiceNameSource: ServiceNameSourceResourceTags}, s.serviceAttributeFromImdsTags()) } func Test_serviceprovider_serviceAttributeFromIamRole(t *testing.T) { @@ -248,7 +206,7 @@ func Test_serviceprovider_logFileServiceAttribute(t *testing.T) { s.iamRole = "test-role" assert.Equal(t, ServiceAttribute{ServiceName: "test-role", ServiceNameSource: ServiceNameSourceClientIamRole, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) - s.ec2TagServiceName = "test-service-from-tags" + s.imdsServiceName = "test-service-from-tags" assert.Equal(t, ServiceAttribute{ServiceName: "test-service-from-tags", ServiceNameSource: ServiceNameSourceResourceTags, Environment: "ec2:test-asg"}, s.logFileServiceAttribute("glob", "group")) s.logFiles["glob"] = ServiceAttribute{ServiceName: "test-service-from-logfile", ServiceNameSource: ServiceNameSourceUserConfiguration} @@ -274,36 +232,29 @@ func Test_serviceprovider_getServiceNameSource(t *testing.T) { assert.Equal(t, s.iamRole, serviceName) assert.Equal(t, ServiceNameSourceClientIamRole, serviceNameSource) - s.ec2TagServiceName = "test-service-from-tags" + s.imdsServiceName = "test-service-from-tags" serviceName, serviceNameSource = s.getServiceNameAndSource() - assert.Equal(t, s.ec2TagServiceName, serviceName) + assert.Equal(t, s.imdsServiceName, serviceName) assert.Equal(t, ServiceNameSourceResourceTags, serviceNameSource) + } func Test_serviceprovider_getIAMRole(t *testing.T) { - type fields struct { - metadataProvider ec2metadataprovider.MetadataProvider - ec2API ec2iface.EC2API - } tests := []struct { - name string - fields fields - want string + name string + metadataProvider ec2metadataprovider.MetadataProvider + want string }{ { - name: "Happypath_MockMetadata", - fields: fields{ - metadataProvider: &mockMetadataProvider{}, - ec2API: &mockServiceNameEC2Client{}, - }, - want: "TestRole", + name: "Happypath_MockMetadata", + metadataProvider: &mockMetadataProvider{}, + want: "TestRole", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := serviceprovider{ - metadataProvider: tt.fields.metadataProvider, - ec2API: tt.fields.ec2API, + metadataProvider: tt.metadataProvider, } s.getIAMRole() assert.Equal(t, tt.want, s.iamRole) @@ -311,84 +262,52 @@ func Test_serviceprovider_getIAMRole(t *testing.T) { } } -func Test_serviceprovider_getEC2TagFilters(t *testing.T) { - type fields struct { - metadataProvider ec2metadataprovider.MetadataProvider - ec2API ec2iface.EC2API - } - tests := []struct { - name string - fields fields - want []*ec2.Filter - wantErr assert.ErrorAssertionFunc - }{ - { - name: "HappyPath_MatchTags", - fields: fields{ - metadataProvider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - InstanceID: "i-123456789"}, - }, - ec2API: &mockServiceNameEC2Client{}, - }, - want: []*ec2.Filter{ - { - Name: aws.String("resource-type"), - Values: aws.StringSlice([]string{"instance"}), - }, { - Name: aws.String("resource-id"), - Values: aws.StringSlice([]string{"i-123456789"}), - }, { - Name: aws.String("key"), - Values: aws.StringSlice([]string{"service", "application", "app"}), - }, - }, - wantErr: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &serviceprovider{ - metadataProvider: tt.fields.metadataProvider, - ec2API: tt.fields.ec2API, - } - got, err := s.getEC2TagFilters() - assert.NoError(t, err) - assert.Equalf(t, tt.want, got, "getEC2TagFilters()") - }) - } -} +func Test_serviceprovider_getImdsServiceName(t *testing.T) { -func Test_serviceprovider_getEC2TagServiceName(t *testing.T) { - type fields struct { - metadataProvider ec2metadataprovider.MetadataProvider - ec2API ec2iface.EC2API - } tests := []struct { name string - fields fields + metadataProvider ec2metadataprovider.MetadataProvider wantTagServiceName string }{ { - name: "HappyPath_ServiceExists", - fields: fields{ - metadataProvider: &mockMetadataProvider{ - InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ - InstanceID: "i-123456789"}, - }, - ec2API: &mockServiceNameEC2Client{}, - }, + name: "HappyPath_ServiceExists", + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"service": "test-service"}}, + wantTagServiceName: "test-service", + }, + { + name: "HappyPath_ApplicationExists", + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"application": "test-application"}}, + wantTagServiceName: "test-application", + }, + { + name: "HappyPath_AppExists", + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"app": "test-app"}}, + wantTagServiceName: "test-app", + }, + { + name: "HappyPath_PreferServiceOverApplication", + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"service": "test-service", "application": "test-application"}}, + wantTagServiceName: "test-service", + }, + { + name: "HappyPath_PreferApplicationOverApp", + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"application": "test-application", "app": "test-app"}}, + wantTagServiceName: "test-application", + }, + { + name: "HappyPath_PreferServiceOverApplicationAndApp", + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDoc, Tags: map[string]string{"service": "test-service", "application": "test-application", "app": "test-app"}}, wantTagServiceName: "test-service", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &serviceprovider{ - metadataProvider: tt.fields.metadataProvider, - ec2API: tt.fields.ec2API, + logger: zap.NewExample(), + metadataProvider: tt.metadataProvider, } - s.getEC2TagServiceName() - assert.Equal(t, tt.wantTagServiceName, s.ec2TagServiceName) + s.getImdsServiceName() + assert.Equal(t, tt.wantTagServiceName, s.imdsServiceName) }) } } From ac8bbcebe6ee696e9112068f922e87a018723b98 Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Mon, 21 Oct 2024 11:24:37 -0400 Subject: [PATCH 32/47] Discard service and environment name dimensions from user configuration (#853) --- extension/entitystore/extension_test.go | 6 +- .../entityattributes/entityattributes.go | 4 ++ plugins/processors/awsentity/processor.go | 72 +++++++++++++++---- .../processors/awsentity/processor_test.go | 69 ++++++++++++++++-- .../sampleConfig/compass_linux_config.conf | 4 ++ .../collectd/ruleDeploymentEnvironment.go | 5 +- .../collectd/ruleServiceName.go | 5 +- .../statsd/ruleDeploymentEnvironment.go | 5 +- .../metrics_collect/statsd/ruleServiceName.go | 5 +- 9 files changed, 145 insertions(+), 30 deletions(-) diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 0763e3afec..816d6470f1 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -81,8 +81,9 @@ type mockMetadataProvider struct { func mockMetadataProviderFunc() ec2metadataprovider.MetadataProvider { return &mockMetadataProvider{ - Tags: "aws:autoscaling:groupName", - TagValue: "ASG-1", + Tags: map[string]string{ + "aws:autoscaling:groupName": "ASG-1", + }, InstanceIdentityDocument: &ec2metadata.EC2InstanceIdentityDocument{ InstanceID: "i-123456789", }, @@ -568,7 +569,6 @@ func TestEntityStore_LogMessageDoesNotIncludeResourceInfo(t *testing.T) { Mode: tt.args.mode, KubernetesMode: tt.args.kubernetesMode, } - getEC2Provider = mockEC2Provider getMetaDataProvider = mockMetadataProviderFunc es := &EntityStore{ logger: logger, diff --git a/plugins/processors/awsentity/entityattributes/entityattributes.go b/plugins/processors/awsentity/entityattributes/entityattributes.go index df54a57ab0..5378221803 100644 --- a/plugins/processors/awsentity/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/entityattributes/entityattributes.go @@ -9,6 +9,10 @@ const ( Resource = "Resource" Service = "Service" + AttributeServiceNameSource = "service.name.source" + AttributeDeploymentEnvironmentSource = "deployment.environment.source" + AttributeServiceNameSourceUserConfig = "UserConfiguration" + // The following are entity related attributes AWSEntityPrefix = "com.amazonaws.cloudwatch.entity.internal." AttributeEntityType = AWSEntityPrefix + "type" diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index b3c399a1a9..1fa1dfadfd 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -20,12 +20,11 @@ import ( ) const ( - attributeAwsLogGroupNames = "aws.log.group.names" - attributeDeploymentEnvironment = "deployment.environment" - attributeServiceName = "service.name" - attributeService = "Service" - attributeServiceNameSourceUserConfig = "UserConfiguration" - EMPTY = "" + attributeAwsLogGroupNames = "aws.log.group.names" + attributeDeploymentEnvironment = "deployment.environment" + attributeServiceName = "service.name" + attributeService = "Service" + EMPTY = "" ) type scraper interface { @@ -139,11 +138,11 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric entityServiceName := getServiceAttributes(resourceAttrs) entityEnvironmentName := environmentName if (entityServiceName == EMPTY || entityEnvironmentName == EMPTY) && p.config.ScrapeDatapointAttribute { - entityServiceName, entityEnvironmentName = p.scrapeServiceAttribute(rm.At(i).ScopeMetrics()) + entityServiceName, entityEnvironmentName, entityServiceNameSource = p.scrapeServiceAttribute(rm.At(i).ScopeMetrics()) // If the entityServiceNameSource is empty here, that means it was not configured via instrumentation // If entityServiceName is a datapoint attribute, that means the service name is coming from the UserConfiguration source - if entityServiceNameSource == EMPTY && entityServiceName != EMPTY { - entityServiceNameSource = attributeServiceNameSourceUserConfig + if entityServiceNameSource == entityattributes.AttributeServiceNameSourceUserConfig && entityServiceName != EMPTY { + entityServiceNameSource = entityattributes.AttributeServiceNameSourceUserConfig } } if p.config.KubernetesMode != "" { @@ -257,14 +256,15 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric // scrapeServiceAttribute expands the datapoint attributes and search for // service name and environment attributes. This is only used for components // that only emit attributes on datapoint level. -func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMetricsSlice) (string, string) { +func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMetricsSlice) (string, string, string) { entityServiceName := EMPTY + entityServiceNameSource := EMPTY entityEnvironmentName := EMPTY for j := 0; j < scopeMetric.Len(); j++ { metric := scopeMetric.At(j).Metrics() for k := 0; k < metric.Len(); k++ { - if entityServiceName != EMPTY && entityEnvironmentName != EMPTY { - return entityServiceName, entityEnvironmentName + if entityServiceName != EMPTY && entityEnvironmentName != EMPTY && entityServiceNameSource != EMPTY { + return entityServiceName, entityEnvironmentName, entityServiceNameSource } m := metric.At(k) switch m.Type() { @@ -275,9 +275,19 @@ func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMet if dpService != EMPTY { entityServiceName = dpService } + if dpServiceNameSource, ok := dps.At(l).Attributes().Get(entityattributes.AttributeServiceNameSource); ok { + entityServiceNameSource = dpServiceNameSource.Str() + dps.At(l).Attributes().Remove(semconv.AttributeServiceName) + dps.At(l).Attributes().Remove(entityattributes.AttributeServiceNameSource) + } if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { entityEnvironmentName = dpEnvironment.Str() } + if _, ok := dps.At(l).Attributes().Get(entityattributes.AttributeDeploymentEnvironmentSource); ok { + dps.At(l).Attributes().Remove(semconv.AttributeDeploymentEnvironment) + dps.At(l).Attributes().Remove(entityattributes.AttributeDeploymentEnvironmentSource) + } + } case pmetric.MetricTypeSum: dps := m.Sum().DataPoints() @@ -286,9 +296,18 @@ func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMet if dpService != EMPTY { entityServiceName = dpService } + if dpServiceNameSource, ok := dps.At(l).Attributes().Get(entityattributes.AttributeServiceNameSource); ok { + entityServiceNameSource = dpServiceNameSource.Str() + dps.At(l).Attributes().Remove(semconv.AttributeServiceName) + dps.At(l).Attributes().Remove(entityattributes.AttributeServiceNameSource) + } if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { entityEnvironmentName = dpEnvironment.Str() } + if _, ok := dps.At(l).Attributes().Get(entityattributes.AttributeDeploymentEnvironmentSource); ok { + dps.At(l).Attributes().Remove(semconv.AttributeDeploymentEnvironment) + dps.At(l).Attributes().Remove(entityattributes.AttributeDeploymentEnvironmentSource) + } } case pmetric.MetricTypeHistogram: dps := m.Histogram().DataPoints() @@ -297,9 +316,18 @@ func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMet if dpService != EMPTY { entityServiceName = dpService } + if dpServiceNameSource, ok := dps.At(l).Attributes().Get(entityattributes.AttributeServiceNameSource); ok { + entityServiceNameSource = dpServiceNameSource.Str() + dps.At(l).Attributes().Remove(semconv.AttributeServiceName) + dps.At(l).Attributes().Remove(entityattributes.AttributeServiceNameSource) + } if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { entityEnvironmentName = dpEnvironment.Str() } + if _, ok := dps.At(l).Attributes().Get(entityattributes.AttributeDeploymentEnvironmentSource); ok { + dps.At(l).Attributes().Remove(semconv.AttributeDeploymentEnvironment) + dps.At(l).Attributes().Remove(entityattributes.AttributeDeploymentEnvironmentSource) + } } case pmetric.MetricTypeExponentialHistogram: dps := m.ExponentialHistogram().DataPoints() @@ -308,9 +336,18 @@ func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMet if dpService != EMPTY { entityServiceName = dpService } + if dpServiceNameSource, ok := dps.At(l).Attributes().Get(entityattributes.AttributeServiceNameSource); ok { + entityServiceNameSource = dpServiceNameSource.Str() + dps.At(l).Attributes().Remove(semconv.AttributeServiceName) + dps.At(l).Attributes().Remove(entityattributes.AttributeServiceNameSource) + } if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { entityEnvironmentName = dpEnvironment.Str() } + if _, ok := dps.At(l).Attributes().Get(entityattributes.AttributeDeploymentEnvironmentSource); ok { + dps.At(l).Attributes().Remove(semconv.AttributeDeploymentEnvironment) + dps.At(l).Attributes().Remove(entityattributes.AttributeDeploymentEnvironmentSource) + } } case pmetric.MetricTypeSummary: dps := m.Sum().DataPoints() @@ -319,9 +356,18 @@ func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMet if dpService != EMPTY { entityServiceName = dpService } + if dpServiceNameSource, ok := dps.At(l).Attributes().Get(entityattributes.AttributeServiceNameSource); ok { + entityServiceNameSource = dpServiceNameSource.Str() + dps.At(l).Attributes().Remove(semconv.AttributeServiceName) + dps.At(l).Attributes().Remove(entityattributes.AttributeServiceNameSource) + } if dpEnvironment, ok := dps.At(l).Attributes().Get(semconv.AttributeDeploymentEnvironment); ok { entityEnvironmentName = dpEnvironment.Str() } + if _, ok := dps.At(l).Attributes().Get(entityattributes.AttributeDeploymentEnvironmentSource); ok { + dps.At(l).Attributes().Remove(semconv.AttributeDeploymentEnvironment) + dps.At(l).Attributes().Remove(entityattributes.AttributeDeploymentEnvironmentSource) + } } default: p.logger.Debug("Ignore unknown metric type", zap.String("type", m.Type().String())) @@ -329,7 +375,7 @@ func (p *awsEntityProcessor) scrapeServiceAttribute(scopeMetric pmetric.ScopeMet } } - return entityServiceName, entityEnvironmentName + return entityServiceName, entityEnvironmentName, entityServiceNameSource } // getServiceAttributes prioritize service name retrieval based on diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index b4a3075536..a6e5c6d830 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -437,11 +437,13 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() tests := []struct { - name string - metrics pmetric.Metrics - mockServiceNameAndSource func() (string, string) - mockGetEC2InfoFromEntityStore func() entitystore.EC2Info - want map[string]any + name string + checkDatapointAttributeRemoval bool + metrics pmetric.Metrics + mockServiceNameAndSource func() (string, string) + mockGetEC2InfoFromEntityStore func() entitystore.EC2Info + want map[string]any + wantDatapointAttributes map[string]any }{ { name: "EmptyMetrics", @@ -455,7 +457,7 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { want: map[string]any{ entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", - entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", + entityattributes.AttributeEntityServiceNameSource: "Unknown", entityattributes.AttributeEntityPlatformType: "AWS::EC2", entityattributes.AttributeEntityInstanceID: "i-123456789", entityattributes.AttributeEntityAwsAccountId: "0123456789012", @@ -481,6 +483,37 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { name: "DatapointAttributeServiceNameAndEnvironment", metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment"), mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), + want: map[string]any{ + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", + entityattributes.AttributeEntityServiceNameSource: "Unknown", + }, + }, + { + name: "DatapointAttributeServiceAndEnvironmentNameUserConfiguration", + checkDatapointAttributeRemoval: true, + metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment", entityattributes.AttributeServiceNameSource, entityattributes.AttributeServiceNameSourceUserConfig, entityattributes.AttributeDeploymentEnvironmentSource, entityattributes.AttributeServiceNameSourceUserConfig), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), + want: map[string]any{ + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", + entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", + }, + wantDatapointAttributes: map[string]any{}, + }, + { + name: "DatapointAttributeServiceNameUserConfigurationAndUserEnvironment", + checkDatapointAttributeRemoval: true, + metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment", entityattributes.AttributeServiceNameSource, entityattributes.AttributeServiceNameSourceUserConfig), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), want: map[string]any{ entityattributes.AttributeEntityType: "Service", entityattributes.AttributeEntityServiceName: "test-service", @@ -490,6 +523,27 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { entityattributes.AttributeEntityAwsAccountId: "0123456789012", entityattributes.AttributeEntityServiceNameSource: "UserConfiguration", }, + wantDatapointAttributes: map[string]any{ + attributeDeploymentEnvironment: "test-environment", + }, + }, + { + name: "DatapointAttributeEnvironmentNameUserConfigurationAndUserServiceName", + checkDatapointAttributeRemoval: true, + metrics: generateDatapointMetrics(attributeServiceName, "test-service", attributeDeploymentEnvironment, "test-environment", entityattributes.AttributeDeploymentEnvironmentSource, entityattributes.AttributeServiceNameSourceUserConfig), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), + want: map[string]any{ + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityDeploymentEnvironment: "test-environment", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", + entityattributes.AttributeEntityServiceNameSource: "Unknown", + }, + wantDatapointAttributes: map[string]any{ + attributeServiceName: "test-service", + }, }, } @@ -511,6 +565,9 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { if rm.Len() > 0 { assert.Equal(t, tt.want, rm.At(0).Resource().Attributes().AsRaw()) } + if tt.checkDatapointAttributeRemoval { + assert.Equal(t, tt.wantDatapointAttributes, rm.At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes().AsRaw()) + } getServiceNameSource = resetServiceNameSource }) } diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.conf b/translator/tocwconfig/sampleConfig/compass_linux_config.conf index c21200ebf6..deab98289f 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.conf +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.conf @@ -56,6 +56,8 @@ "aws:AggregationInterval" = "60s" "deployment.environment" = "plugin-level-environment" "service.name" = "plugin-level-service" + "service.name.source" = "UserConfiguration" + "deployment.environment.source" = "UserConfiguration" [[inputs.statsd]] interval = "10s" @@ -66,6 +68,8 @@ "aws:AggregationInterval" = "60s" "deployment.environment" = "agent-level-environment" "service.name" = "metric-level-service" + "service.name.source" = "UserConfiguration" + "deployment.environment.source" = "UserConfiguration" [outputs] diff --git a/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go b/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go index ed9cae0df8..04d9201996 100644 --- a/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go +++ b/translator/translate/metrics/metrics_collect/collectd/ruleDeploymentEnvironment.go @@ -4,6 +4,7 @@ package collected import ( + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -19,9 +20,9 @@ func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (string, interfac parentKeyVal := metrics.GlobalMetricConfig.DeploymentEnvironment if returnVal != "" { - return common.Tags, map[string]interface{}{returnKey: returnVal} + return common.Tags, map[string]interface{}{returnKey: returnVal, entityattributes.AttributeDeploymentEnvironmentSource: entityattributes.AttributeServiceNameSourceUserConfig} } else if parentKeyVal != "" { - return common.Tags, map[string]interface{}{returnKey: parentKeyVal} + return common.Tags, map[string]interface{}{returnKey: parentKeyVal, entityattributes.AttributeDeploymentEnvironmentSource: entityattributes.AttributeServiceNameSourceUserConfig} } return "", nil } diff --git a/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go b/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go index 45fe4ddee2..dc7113580a 100644 --- a/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go +++ b/translator/translate/metrics/metrics_collect/collectd/ruleServiceName.go @@ -4,6 +4,7 @@ package collected import ( + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -19,9 +20,9 @@ func (obj *ServiceName) ApplyRule(input interface{}) (string, interface{}) { parentKeyVal := metrics.GlobalMetricConfig.ServiceName if returnVal != "" { - return common.Tags, map[string]interface{}{returnKey: returnVal} + return common.Tags, map[string]interface{}{returnKey: returnVal, entityattributes.AttributeServiceNameSource: entityattributes.AttributeServiceNameSourceUserConfig} } else if parentKeyVal != "" { - return common.Tags, map[string]interface{}{returnKey: parentKeyVal} + return common.Tags, map[string]interface{}{returnKey: parentKeyVal, entityattributes.AttributeServiceNameSource: entityattributes.AttributeServiceNameSourceUserConfig} } return "", nil } diff --git a/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go b/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go index 02817e5eb5..bb037a81f4 100644 --- a/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go +++ b/translator/translate/metrics/metrics_collect/statsd/ruleDeploymentEnvironment.go @@ -4,6 +4,7 @@ package statsd import ( + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -19,9 +20,9 @@ func (obj *DeploymentEnvironment) ApplyRule(input interface{}) (string, interfac parentKeyVal := metrics.GlobalMetricConfig.DeploymentEnvironment if returnVal != "" { - return common.Tags, map[string]interface{}{returnKey: returnVal} + return common.Tags, map[string]interface{}{returnKey: returnVal, entityattributes.AttributeDeploymentEnvironmentSource: entityattributes.AttributeServiceNameSourceUserConfig} } else if parentKeyVal != "" { - return common.Tags, map[string]interface{}{returnKey: parentKeyVal} + return common.Tags, map[string]interface{}{returnKey: parentKeyVal, entityattributes.AttributeDeploymentEnvironmentSource: entityattributes.AttributeServiceNameSourceUserConfig} } return "", nil } diff --git a/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go b/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go index cd80e706c2..10832c9e60 100644 --- a/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go +++ b/translator/translate/metrics/metrics_collect/statsd/ruleServiceName.go @@ -4,6 +4,7 @@ package statsd import ( + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/translate/metrics" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" @@ -19,9 +20,9 @@ func (obj *ServiceName) ApplyRule(input interface{}) (string, interface{}) { parentKeyVal := metrics.GlobalMetricConfig.ServiceName if returnVal != "" { - return common.Tags, map[string]interface{}{returnKey: returnVal} + return common.Tags, map[string]interface{}{returnKey: returnVal, entityattributes.AttributeServiceNameSource: entityattributes.AttributeServiceNameSourceUserConfig} } else if parentKeyVal != "" { - return common.Tags, map[string]interface{}{returnKey: parentKeyVal} + return common.Tags, map[string]interface{}{returnKey: parentKeyVal, entityattributes.AttributeServiceNameSource: entityattributes.AttributeServiceNameSourceUserConfig} } return "", nil } From a1949778ae40236504d7b8418c8f156804d63b12 Mon Sep 17 00:00:00 2001 From: Jason Polanco Date: Mon, 21 Oct 2024 14:57:34 -0400 Subject: [PATCH 33/47] [Compass] Implement Deployment Environment Defaults (#854) --- .../entityattributes/entityattributes.go | 4 ++ plugins/processors/awsentity/processor.go | 8 +++ .../processors/awsentity/processor_test.go | 63 ++++++++++++++----- 3 files changed, 61 insertions(+), 14 deletions(-) diff --git a/plugins/processors/awsentity/entityattributes/entityattributes.go b/plugins/processors/awsentity/entityattributes/entityattributes.go index 5378221803..a5d2898901 100644 --- a/plugins/processors/awsentity/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/entityattributes/entityattributes.go @@ -51,6 +51,10 @@ const ( Platform = "Platform" InstanceID = "InstanceID" AutoscalingGroup = "AutoScalingGroup" + + // The following are values used for the environment fallbacks required on EC2 + DeploymentEnvironmentFallbackPrefix = "ec2:" + DeploymentEnvironmentDefault = DeploymentEnvironmentFallbackPrefix + "default" ) // KeyAttributeEntityToShortNameMap is used to map key attributes from otel to the actual values used in the Entity object diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 1fa1dfadfd..526230077b 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -219,6 +219,14 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric entityPlatformType = entityattributes.AttributeEntityEC2Platform ec2Info = getEC2InfoFromEntityStore() + if entityEnvironmentName == EMPTY { + if ec2Info.AutoScalingGroup != EMPTY { + entityEnvironmentName = entityattributes.DeploymentEnvironmentFallbackPrefix + ec2Info.AutoScalingGroup + } else { + entityEnvironmentName = entityattributes.DeploymentEnvironmentDefault + } + } + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityType, entityattributes.Service) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceName, entityServiceName) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index a6e5c6d830..4d2f9216ce 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -289,13 +289,14 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { mockServiceNameSource: newMockGetServiceNameAndSource("test-service-name", "Instrumentation"), mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), want: map[string]any{ - entityattributes.AttributeEntityType: "Service", - entityattributes.AttributeEntityServiceName: "test-service", - entityattributes.AttributeEntityPlatformType: "AWS::EC2", - entityattributes.AttributeEntityInstanceID: "i-123456789", - entityattributes.AttributeEntityAwsAccountId: "0123456789012", - entityattributes.AttributeEntityServiceNameSource: "Unknown", - attributeServiceName: "test-service", + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", + entityattributes.AttributeEntityServiceNameSource: "Unknown", + entityattributes.AttributeEntityDeploymentEnvironment: "ec2:default", + attributeServiceName: "test-service", }, }, { @@ -355,6 +356,39 @@ func TestProcessMetricsResourceAttributeScraping(t *testing.T) { semconv.AttributeK8SNodeName: "test-node", }, }, + { + name: "ResourceAttributeEnvironmentFallbackToASG", + platform: config.ModeEC2, + metrics: generateMetrics(), + mockServiceNameSource: newMockGetServiceNameAndSource("unknown_service", "Unknown"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", "test-asg"), + want: map[string]any{ + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "unknown_service", + entityattributes.AttributeEntityDeploymentEnvironment: "ec2:test-asg", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", + entityattributes.AttributeEntityServiceNameSource: "Unknown", + entityattributes.AttributeEntityAutoScalingGroup: "test-asg", + }, + }, + { + name: "ResourceAttributeEnvironmentFallbackToDefault", + platform: config.ModeEC2, + metrics: generateMetrics(), + mockServiceNameSource: newMockGetServiceNameAndSource("unknown_service", "Unknown"), + mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", ""), + want: map[string]any{ + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "unknown_service", + entityattributes.AttributeEntityDeploymentEnvironment: "ec2:default", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", + entityattributes.AttributeEntityServiceNameSource: "Unknown", + }, + }, } for _, tt := range tests { @@ -455,13 +489,14 @@ func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { metrics: generateDatapointMetrics(attributeServiceName, "test-service"), mockGetEC2InfoFromEntityStore: newMockGetEC2InfoFromEntityStore("i-123456789", "0123456789012", "auto-scaling"), want: map[string]any{ - entityattributes.AttributeEntityType: "Service", - entityattributes.AttributeEntityServiceName: "test-service", - entityattributes.AttributeEntityServiceNameSource: "Unknown", - entityattributes.AttributeEntityPlatformType: "AWS::EC2", - entityattributes.AttributeEntityInstanceID: "i-123456789", - entityattributes.AttributeEntityAwsAccountId: "0123456789012", - entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", + entityattributes.AttributeEntityType: "Service", + entityattributes.AttributeEntityServiceName: "test-service", + entityattributes.AttributeEntityServiceNameSource: "Unknown", + entityattributes.AttributeEntityPlatformType: "AWS::EC2", + entityattributes.AttributeEntityInstanceID: "i-123456789", + entityattributes.AttributeEntityAwsAccountId: "0123456789012", + entityattributes.AttributeEntityAutoScalingGroup: "auto-scaling", + entityattributes.AttributeEntityDeploymentEnvironment: "ec2:auto-scaling", }, }, { From 4e4d072479c73d5a22f0aeb97959878684fe241b Mon Sep 17 00:00:00 2001 From: Pooja Reddy Nathala Date: Wed, 23 Oct 2024 15:50:38 -0400 Subject: [PATCH 34/47] Revert "Prometheus translation for entity emission on EC2 and K8s (#812)" This reverts commit 68a041bb96855065c541dd94cabbb62e921378a0. --- .../sampleConfig/prometheus_config_linux.yaml | 288 ------------------ .../prometheus_config_windows.yaml | 288 ------------------ .../otel/pipeline/prometheus/translator.go | 34 +-- .../pipeline/prometheus/translator_test.go | 31 +- 4 files changed, 9 insertions(+), 632 deletions(-) diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml index 3b9747ba99..ce4ad17041 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_linux.yaml @@ -86,293 +86,6 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 30s - resourcedetection: - aks: - resource_attributes: - cloud.platform: - enabled: true - cloud.provider: - enabled: true - k8s.cluster.name: - enabled: false - azure: - resource_attributes: - azure.resourcegroup.name: - enabled: true - azure.vm.name: - enabled: true - azure.vm.scaleset.name: - enabled: true - azure.vm.size: - enabled: true - cloud.account.id: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - host.id: - enabled: true - host.name: - enabled: true - tags: [] - compression: "" - consul: - address: "" - datacenter: "" - namespace: "" - resource_attributes: - cloud.region: - enabled: true - host.id: - enabled: true - host.name: - enabled: true - token_file: "" - detectors: - - eks - - env - - ec2 - disable_keep_alives: false - docker: - resource_attributes: - host.name: - enabled: true - os.type: - enabled: true - ec2: - resource_attributes: - cloud.account.id: - enabled: true - cloud.availability_zone: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - host.id: - enabled: true - host.image.id: - enabled: true - host.name: - enabled: true - host.type: - enabled: true - tags: - - ^kubernetes.io/cluster/.*$ - - ^aws:autoscaling:groupName - ecs: - resource_attributes: - aws.ecs.cluster.arn: - enabled: true - aws.ecs.launchtype: - enabled: true - aws.ecs.task.arn: - enabled: true - aws.ecs.task.family: - enabled: true - aws.ecs.task.id: - enabled: true - aws.ecs.task.revision: - enabled: true - aws.log.group.arns: - enabled: true - aws.log.group.names: - enabled: true - aws.log.stream.arns: - enabled: true - aws.log.stream.names: - enabled: true - cloud.account.id: - enabled: true - cloud.availability_zone: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - eks: - resource_attributes: - cloud.platform: - enabled: true - cloud.provider: - enabled: true - k8s.cluster.name: - enabled: false - elasticbeanstalk: - resource_attributes: - cloud.platform: - enabled: true - cloud.provider: - enabled: true - deployment.environment: - enabled: true - service.instance.id: - enabled: true - service.version: - enabled: true - endpoint: "" - gcp: - resource_attributes: - cloud.account.id: - enabled: true - cloud.availability_zone: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - faas.id: - enabled: true - faas.instance: - enabled: true - faas.name: - enabled: true - faas.version: - enabled: true - gcp.cloud_run.job.execution: - enabled: true - gcp.cloud_run.job.task_index: - enabled: true - gcp.gce.instance.hostname: - enabled: false - gcp.gce.instance.name: - enabled: false - host.id: - enabled: true - host.name: - enabled: true - host.type: - enabled: true - k8s.cluster.name: - enabled: true - heroku: - resource_attributes: - cloud.provider: - enabled: true - heroku.app.id: - enabled: true - heroku.dyno.id: - enabled: true - heroku.release.commit: - enabled: true - heroku.release.creation_timestamp: - enabled: true - service.instance.id: - enabled: true - service.name: - enabled: true - service.version: - enabled: true - http2_ping_timeout: 0s - http2_read_idle_timeout: 0s - idle_conn_timeout: 1m30s - k8snode: - auth_type: serviceAccount - context: "" - kube_config_path: "" - node_from_env_var: "" - resource_attributes: - k8s.node.name: - enabled: true - k8s.node.uid: - enabled: true - lambda: - resource_attributes: - aws.log.group.names: - enabled: true - aws.log.stream.names: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - faas.instance: - enabled: true - faas.max_memory: - enabled: true - faas.name: - enabled: true - faas.version: - enabled: true - max_idle_conns: 100 - openshift: - address: "" - resource_attributes: - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - k8s.cluster.name: - enabled: true - tls: - ca_file: "" - cert_file: "" - include_system_ca_certs_pool: false - insecure: false - insecure_skip_verify: false - key_file: "" - max_version: "" - min_version: "" - reload_interval: 0s - server_name_override: "" - token: "" - override: true - proxy_url: "" - read_buffer_size: 0 - system: - resource_attributes: - host.arch: - enabled: false - host.cpu.cache.l2.size: - enabled: false - host.cpu.family: - enabled: false - host.cpu.model.id: - enabled: false - host.cpu.model.name: - enabled: false - host.cpu.stepping: - enabled: false - host.cpu.vendor.id: - enabled: false - host.id: - enabled: false - host.ip: - enabled: false - host.mac: - enabled: false - host.name: - enabled: true - os.description: - enabled: false - os.type: - enabled: true - timeout: 2s - tls: - ca_file: "" - cert_file: "" - include_system_ca_certs_pool: false - insecure: false - insecure_skip_verify: false - key_file: "" - max_version: "" - min_version: "" - reload_interval: 0s - server_name_override: "" - write_buffer_size: 0 receivers: telegraf_prometheus: collection_interval: 1m0s @@ -388,7 +101,6 @@ service: - awsemf/prometheus processors: - batch/prometheus - - resourcedetection receivers: - telegraf_prometheus telemetry: diff --git a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml index cb615612ed..2a47f34ae3 100644 --- a/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/prometheus_config_windows.yaml @@ -68,293 +68,6 @@ processors: send_batch_max_size: 0 send_batch_size: 8192 timeout: 5s - resourcedetection: - aks: - resource_attributes: - cloud.platform: - enabled: true - cloud.provider: - enabled: true - k8s.cluster.name: - enabled: false - azure: - resource_attributes: - azure.resourcegroup.name: - enabled: true - azure.vm.name: - enabled: true - azure.vm.scaleset.name: - enabled: true - azure.vm.size: - enabled: true - cloud.account.id: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - host.id: - enabled: true - host.name: - enabled: true - tags: [] - compression: "" - consul: - address: "" - datacenter: "" - namespace: "" - resource_attributes: - cloud.region: - enabled: true - host.id: - enabled: true - host.name: - enabled: true - token_file: "" - detectors: - - eks - - env - - ec2 - disable_keep_alives: false - docker: - resource_attributes: - host.name: - enabled: true - os.type: - enabled: true - ec2: - resource_attributes: - cloud.account.id: - enabled: true - cloud.availability_zone: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - host.id: - enabled: true - host.image.id: - enabled: true - host.name: - enabled: true - host.type: - enabled: true - tags: - - ^kubernetes.io/cluster/.*$ - - ^aws:autoscaling:groupName - ecs: - resource_attributes: - aws.ecs.cluster.arn: - enabled: true - aws.ecs.launchtype: - enabled: true - aws.ecs.task.arn: - enabled: true - aws.ecs.task.family: - enabled: true - aws.ecs.task.id: - enabled: true - aws.ecs.task.revision: - enabled: true - aws.log.group.arns: - enabled: true - aws.log.group.names: - enabled: true - aws.log.stream.arns: - enabled: true - aws.log.stream.names: - enabled: true - cloud.account.id: - enabled: true - cloud.availability_zone: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - eks: - resource_attributes: - cloud.platform: - enabled: true - cloud.provider: - enabled: true - k8s.cluster.name: - enabled: false - elasticbeanstalk: - resource_attributes: - cloud.platform: - enabled: true - cloud.provider: - enabled: true - deployment.environment: - enabled: true - service.instance.id: - enabled: true - service.version: - enabled: true - endpoint: "" - gcp: - resource_attributes: - cloud.account.id: - enabled: true - cloud.availability_zone: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - faas.id: - enabled: true - faas.instance: - enabled: true - faas.name: - enabled: true - faas.version: - enabled: true - gcp.cloud_run.job.execution: - enabled: true - gcp.cloud_run.job.task_index: - enabled: true - gcp.gce.instance.hostname: - enabled: false - gcp.gce.instance.name: - enabled: false - host.id: - enabled: true - host.name: - enabled: true - host.type: - enabled: true - k8s.cluster.name: - enabled: true - heroku: - resource_attributes: - cloud.provider: - enabled: true - heroku.app.id: - enabled: true - heroku.dyno.id: - enabled: true - heroku.release.commit: - enabled: true - heroku.release.creation_timestamp: - enabled: true - service.instance.id: - enabled: true - service.name: - enabled: true - service.version: - enabled: true - http2_ping_timeout: 0s - http2_read_idle_timeout: 0s - idle_conn_timeout: 1m30s - k8snode: - auth_type: serviceAccount - context: "" - kube_config_path: "" - node_from_env_var: "" - resource_attributes: - k8s.node.name: - enabled: true - k8s.node.uid: - enabled: true - lambda: - resource_attributes: - aws.log.group.names: - enabled: true - aws.log.stream.names: - enabled: true - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - faas.instance: - enabled: true - faas.max_memory: - enabled: true - faas.name: - enabled: true - faas.version: - enabled: true - max_idle_conns: 100 - openshift: - address: "" - resource_attributes: - cloud.platform: - enabled: true - cloud.provider: - enabled: true - cloud.region: - enabled: true - k8s.cluster.name: - enabled: true - tls: - ca_file: "" - cert_file: "" - include_system_ca_certs_pool: false - insecure: false - insecure_skip_verify: false - key_file: "" - max_version: "" - min_version: "" - reload_interval: 0s - server_name_override: "" - token: "" - override: true - proxy_url: "" - read_buffer_size: 0 - system: - resource_attributes: - host.arch: - enabled: false - host.cpu.cache.l2.size: - enabled: false - host.cpu.family: - enabled: false - host.cpu.model.id: - enabled: false - host.cpu.model.name: - enabled: false - host.cpu.stepping: - enabled: false - host.cpu.vendor.id: - enabled: false - host.id: - enabled: false - host.ip: - enabled: false - host.mac: - enabled: false - host.name: - enabled: true - os.description: - enabled: false - os.type: - enabled: true - timeout: 2s - tls: - ca_file: "" - cert_file: "" - include_system_ca_certs_pool: false - insecure: false - insecure_skip_verify: false - key_file: "" - max_version: "" - min_version: "" - reload_interval: 0s - server_name_override: "" - write_buffer_size: 0 receivers: telegraf_prometheus: collection_interval: 1m0s @@ -370,7 +83,6 @@ service: - awsemf/prometheus processors: - batch/prometheus - - resourcedetection receivers: - telegraf_prometheus telemetry: diff --git a/translator/translate/otel/pipeline/prometheus/translator.go b/translator/translate/otel/pipeline/prometheus/translator.go index 6c9e54defc..2943273c59 100644 --- a/translator/translate/otel/pipeline/prometheus/translator.go +++ b/translator/translate/otel/pipeline/prometheus/translator.go @@ -9,16 +9,12 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" - "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs/metrics_collected/prometheus" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/exporter/awsemf" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/extension/agenthealth" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/awsentity" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/batchprocessor" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/processor/resourcedetection" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/receiver/adapter" - "github.com/aws/amazon-cloudwatch-agent/translator/util/ecsutil" ) const ( @@ -45,34 +41,12 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators if conf == nil || !conf.IsSet(key) { return nil, &common.MissingKeyError{ID: t.ID(), JsonKey: key} } - return &common.ComponentTranslators{ - Receivers: common.NewTranslatorMap(adapter.NewTranslator(prometheus.SectionKey, key, time.Minute)), - Processors: t.translateProcessors(), + Receivers: common.NewTranslatorMap(adapter.NewTranslator(prometheus.SectionKey, key, time.Minute)), + Processors: common.NewTranslatorMap( + batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" + ), Exporters: common.NewTranslatorMap(awsemf.NewTranslatorWithName(pipelineName)), Extensions: common.NewTranslatorMap(agenthealth.NewTranslator(component.DataTypeLogs, []string{agenthealth.OperationPutLogEvents})), }, nil } - -func (t *translator) translateProcessors() common.TranslatorMap[component.Config] { - mode := context.CurrentContext().KubernetesMode() - // if we are on kubernetes or ECS we do not want resource detection processor - // if we are on Kubernetes, enable entity processor - if mode != "" { - return common.NewTranslatorMap( - batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" - awsentity.NewTranslatorWithEntityType(awsentity.Service), - ) - } else if mode != "" || ecsutil.GetECSUtilSingleton().IsECS() { - return common.NewTranslatorMap( - batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" - ) - } else { - // we are on ec2/onprem - return common.NewTranslatorMap( - batchprocessor.NewTranslatorWithNameAndSection(pipelineName, common.LogsKey), // prometheus sits under metrics_collected in "logs" - resourcedetection.NewTranslator(), - ) - } - -} diff --git a/translator/translate/otel/pipeline/prometheus/translator_test.go b/translator/translate/otel/pipeline/prometheus/translator_test.go index 628ef5e465..b83aa7eea0 100644 --- a/translator/translate/otel/pipeline/prometheus/translator_test.go +++ b/translator/translate/otel/pipeline/prometheus/translator_test.go @@ -12,8 +12,6 @@ import ( "go.opentelemetry.io/collector/confmap" "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" - translatorConfig "github.com/aws/amazon-cloudwatch-agent/translator/config" - "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" ) @@ -27,16 +25,15 @@ func TestTranslator(t *testing.T) { cit := NewTranslator() require.EqualValues(t, "metrics/prometheus", cit.ID().String()) testCases := map[string]struct { - input map[string]interface{} - kubernetesMode string - want *want - wantErr error + input map[string]interface{} + want *want + wantErr error }{ "WithoutPrometheusKey": { input: map[string]interface{}{}, wantErr: &common.MissingKeyError{ID: cit.ID(), JsonKey: "logs::metrics_collected::prometheus"}, }, - "WithPrometheusKeyAndOnK8s": { + "WithPrometheusKey": { input: map[string]interface{}{ "logs": map[string]interface{}{ "metrics_collected": map[string]interface{}{ @@ -44,26 +41,9 @@ func TestTranslator(t *testing.T) { }, }, }, - kubernetesMode: translatorConfig.ModeEKS, want: &want{ receivers: []string{"telegraf_prometheus"}, - processors: []string{"batch/prometheus", "awsentity/service"}, - exporters: []string{"awsemf/prometheus"}, - extensions: []string{"agenthealth/logs"}, - }, - }, - "WithPrometheusKeyAndNotOnK8s": { - input: map[string]interface{}{ - "logs": map[string]interface{}{ - "metrics_collected": map[string]interface{}{ - "prometheus": nil, - }, - }, - }, - kubernetesMode: "", - want: &want{ - receivers: []string{"telegraf_prometheus"}, - processors: []string{"batch/prometheus", "resourcedetection"}, + processors: []string{"batch/prometheus"}, exporters: []string{"awsemf/prometheus"}, extensions: []string{"agenthealth/logs"}, }, @@ -72,7 +52,6 @@ func TestTranslator(t *testing.T) { for name, testCase := range testCases { t.Run(name, func(t *testing.T) { conf := confmap.NewFromStringMap(testCase.input) - context.CurrentContext().SetKubernetesMode(testCase.kubernetesMode) got, err := cit.Translate(conf) assert.Equal(t, testCase.wantErr, err) if testCase.want == nil { From b9f9109db8c9cfc1ca73f57d4a716360c675a2b7 Mon Sep 17 00:00:00 2001 From: Pooja Reddy Nathala Date: Wed, 23 Oct 2024 15:51:34 -0400 Subject: [PATCH 35/47] Revert "[Prometheus] Add relabel configs for pod service discovery to ingest k8s node, namespace, and pod entity data (#814)" This reverts commit b2fa315522a7b893d53e32382a614a4c64aaab32. --- plugins/inputs/prometheus/start.go | 33 --------------- .../k8sattributescraper.go | 22 ---------- .../k8sattributescraper_test.go | 41 ------------------- 3 files changed, 96 deletions(-) diff --git a/plugins/inputs/prometheus/start.go b/plugins/inputs/prometheus/start.go index b3538eb73c..d5b81e1fe5 100644 --- a/plugins/inputs/prometheus/start.go +++ b/plugins/inputs/prometheus/start.go @@ -39,7 +39,6 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" _ "github.com/prometheus/prometheus/discovery/install" - "github.com/prometheus/prometheus/discovery/kubernetes" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" @@ -287,11 +286,6 @@ const ( savedScrapeInstanceLabel = "cwagent_saved_scrape_instance" scrapeInstanceLabel = "__address__" savedScrapeNameLabel = "cwagent_saved_scrape_name" // just arbitrary name that end user won't override in relabel config - - // Labels for Entity population - EntityK8sPodLabel = "cwagent_entity_k8s_pod_name" - EntityK8sNamespaceLabel = "cwagent_entity_k8s_namespace" - EntityK8sNodeLabel = "cwagent_entity_K8s_node" ) func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config) error) (err error) { @@ -337,33 +331,6 @@ func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config }, } - for _, sdc := range sc.ServiceDiscoveryConfigs { - if sdc.(*kubernetes.SDConfig).Role == kubernetes.RolePod { - relabelConfigs = append(relabelConfigs, []*relabel.Config{ - { - Action: relabel.Replace, - Regex: relabel.MustNewRegexp("(.*)"), - Replacement: "$1", - SourceLabels: model.LabelNames{"__meta_kubernetes_pod_name"}, - TargetLabel: EntityK8sPodLabel, - }, - { - Action: relabel.Replace, - Regex: relabel.MustNewRegexp("(.*)"), - Replacement: "$1", - SourceLabels: model.LabelNames{"__meta_kubernetes_namespace"}, - TargetLabel: EntityK8sNamespaceLabel, - }, - { - Action: relabel.Replace, - Regex: relabel.MustNewRegexp("(.*)"), - Replacement: "$1", - SourceLabels: model.LabelNames{"__meta_kubernetes_pod_node_name"}, - TargetLabel: EntityK8sNodeLabel, - }, - }...) - } - } level.Info(logger).Log("msg", "Add extra relabel_configs and metric_relabel_configs to save job, instance and __name__ before user relabel") sc.RelabelConfigs = append(relabelConfigs, sc.RelabelConfigs...) diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go index 6301166f1a..a563f3da96 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper.go @@ -6,8 +6,6 @@ package k8sattributescraper import ( "go.opentelemetry.io/collector/pdata/pcommon" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" - - "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/prometheus" ) var ( @@ -48,31 +46,11 @@ func NewK8sAttributeScraper(clusterName string) *K8sAttributeScraper { func (e *K8sAttributeScraper) Scrape(rm pcommon.Resource) { resourceAttrs := rm.Attributes() - e.relabelPrometheus(resourceAttrs) - e.scrapeNamespace(resourceAttrs) e.scrapeWorkload(resourceAttrs) e.scrapeNode(resourceAttrs) } -func (e *K8sAttributeScraper) relabelPrometheus(p pcommon.Map) { - // TODO: Retrieve workload from pod label - if podName, exists := p.Get(prometheus.EntityK8sPodLabel); exists { - p.PutStr(semconv.AttributeK8SPodName, podName.Str()) - p.Remove(prometheus.EntityK8sPodLabel) - } - - if namespace, exists := p.Get(prometheus.EntityK8sNamespaceLabel); exists { - p.PutStr(semconv.AttributeK8SNamespaceName, namespace.Str()) - p.Remove(prometheus.EntityK8sNamespaceLabel) - } - - if nodeName, exists := p.Get(prometheus.EntityK8sNodeLabel); exists { - p.PutStr(semconv.AttributeK8SNodeName, nodeName.Str()) - p.Remove(prometheus.EntityK8sNodeLabel) - } -} - func (e *K8sAttributeScraper) scrapeNamespace(p pcommon.Map) { for _, namespace := range namespaceAllowlist { if namespaceAttr, ok := p.Get(namespace); ok { diff --git a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go index a1b37644bf..81e46b35b3 100644 --- a/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go +++ b/plugins/processors/awsentity/internal/k8sattributescraper/k8sattributescraper_test.go @@ -4,15 +4,12 @@ package k8sattributescraper import ( - "fmt" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" - - "github.com/aws/amazon-cloudwatch-agent/plugins/inputs/prometheus" ) func TestNewK8sAttributeScraper(t *testing.T) { @@ -231,44 +228,6 @@ func Test_k8sattributescraper_scrapeWorkload(t *testing.T) { } } -func TestK8sAttributeScraper_relabelPrometheus(t *testing.T) { - tests := []struct { - name string - attributes pcommon.Map - want pcommon.Map - }{ - { - name: "PrometheusPod", - attributes: getAttributeMap(map[string]any{ - prometheus.EntityK8sPodLabel: "test-pod", - prometheus.EntityK8sNamespaceLabel: "test-namespace", - prometheus.EntityK8sNodeLabel: "test-node", - }), - want: getAttributeMap(map[string]any{ - semconv.AttributeK8SPodName: "test-pod", - semconv.AttributeK8SNamespaceName: "test-namespace", - semconv.AttributeK8SNodeName: "test-node", - }), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - e := &K8sAttributeScraper{} - e.relabelPrometheus(tt.attributes) - assert.Equal(t, tt.attributes.Len(), tt.want.Len()) - tt.want.Range(func(k string, v pcommon.Value) bool { - actualValue, exists := tt.attributes.Get(k) - if !exists { - assert.Fail(t, fmt.Sprintf("%s does not exist in the attribute map", k)) - return false - } - assert.Equal(t, actualValue.Str(), v.Str()) - return true - }) - }) - } -} - func getAttributeMap(attributes map[string]any) pcommon.Map { attrMap := pcommon.NewMap() attrMap.FromRaw(attributes) From fc9a9c3ad5a8a85d9c330ed3b21fc998a3b91080 Mon Sep 17 00:00:00 2001 From: Pooja Reddy Nathala Date: Wed, 23 Oct 2024 15:55:20 -0400 Subject: [PATCH 36/47] fixed lint errors --- .../files/collect_list/collect_list_test.go | 2 +- .../applicationsignals/translator_test.go | 22 +++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go b/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go index 5f8bfe89b4..5f5e93b115 100644 --- a/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go +++ b/translator/translate/logs/logs_collected/files/collect_list/collect_list_test.go @@ -5,7 +5,6 @@ package collect_list import ( "encoding/json" - "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs" "os" "path/filepath" "regexp" @@ -18,6 +17,7 @@ import ( "github.com/aws/amazon-cloudwatch-agent/translator" "github.com/aws/amazon-cloudwatch-agent/translator/context" "github.com/aws/amazon-cloudwatch-agent/translator/translate/agent" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/logs" ) func TestFileConfig(t *testing.T) { diff --git a/translator/translate/otel/pipeline/applicationsignals/translator_test.go b/translator/translate/otel/pipeline/applicationsignals/translator_test.go index c4d602bfe7..da566e57bd 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator_test.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator_test.go @@ -105,11 +105,11 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { tt := NewTranslator(component.DataTypeMetrics) assert.EqualValues(t, "metrics/application_signals", tt.ID().String()) testCases := map[string]struct { - input map[string]interface{} - want *want - wantErr error - detector func() (eksdetector.Detector, error) - isEKSCache func() eksdetector.IsEKSCache + input map[string]interface{} + want *want + wantErr error + detector func() (eksdetector.Detector, error) + isEKSCache func() eksdetector.IsEKSCache kubernetesMode string }{ "WithoutMetricsCollectedKey": { @@ -130,8 +130,8 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, - detector: eksdetector.TestEKSDetector, - isEKSCache: eksdetector.TestIsEKSCacheEKS, + detector: eksdetector.TestEKSDetector, + isEKSCache: eksdetector.TestIsEKSCacheEKS, kubernetesMode: config.ModeEKS, }, "WithAppSignalsAndLoggingEnabled": { @@ -151,8 +151,8 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { exporters: []string{"debug/application_signals", "awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, - detector: eksdetector.TestEKSDetector, - isEKSCache: eksdetector.TestIsEKSCacheEKS, + detector: eksdetector.TestEKSDetector, + isEKSCache: eksdetector.TestIsEKSCacheEKS, kubernetesMode: config.ModeEKS, }, "WithAppSignalsEnabledK8s": { @@ -169,8 +169,8 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, - detector: eksdetector.TestK8sDetector, - isEKSCache: eksdetector.TestIsEKSCacheK8s, + detector: eksdetector.TestK8sDetector, + isEKSCache: eksdetector.TestIsEKSCacheK8s, kubernetesMode: config.ModeEKS, }, } From 195ea347125f5769cd9a0f28dc5ecac68aaa5d20 Mon Sep 17 00:00:00 2001 From: Varun <48163435+varunch77@users.noreply.github.com> Date: Wed, 23 Oct 2024 10:03:39 -0400 Subject: [PATCH 37/47] Add unit test to ensure pod, env, and certificate info is not present in logs (#850) --- extension/server/extension_test.go | 112 ++++++++++++++++++++++++++--- 1 file changed, 103 insertions(+), 9 deletions(-) diff --git a/extension/server/extension_test.go b/extension/server/extension_test.go index f6c801acf1..02145cf0fa 100644 --- a/extension/server/extension_test.go +++ b/extension/server/extension_test.go @@ -4,6 +4,7 @@ package server import ( + "bytes" "context" "crypto/tls" "encoding/json" @@ -16,6 +17,7 @@ import ( "github.com/jellydator/ttlcache/v3" "github.com/stretchr/testify/assert" "go.uber.org/zap" + "go.uber.org/zap/zapcore" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" ) @@ -24,6 +26,19 @@ type mockEntityStore struct { podToServiceEnvironmentMap *ttlcache.Cache[string, entitystore.ServiceEnvironment] } +// This helper function creates a test logger +// so that it can send the log messages into a +// temporary buffer for pattern matching +func CreateTestLogger(buf *bytes.Buffer) *zap.Logger { + writer := zapcore.AddSync(buf) + + // Create a custom zapcore.Core that writes to the buffer + encoder := zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()) + core := zapcore.NewCore(encoder, writer, zapcore.DebugLevel) + logger := zap.New(core) + return logger +} + func newMockEntityStore() *mockEntityStore { return &mockEntityStore{ podToServiceEnvironmentMap: ttlcache.New[string, entitystore.ServiceEnvironment]( @@ -32,13 +47,18 @@ func newMockEntityStore() *mockEntityStore { } } -func (es *mockEntityStore) AddPodServiceEnvironmentMapping(podName string, service string, env string) { +func (es *mockEntityStore) AddPodServiceEnvironmentMapping(podName string, service string, env string, serviceSource string) { es.podToServiceEnvironmentMap.Set(podName, entitystore.ServiceEnvironment{ - ServiceName: service, - Environment: env, + ServiceName: service, + Environment: env, + ServiceNameSource: serviceSource, }, time.Hour) } +func (es *mockEntityStore) GetPodServiceEnvironmentMapping() *ttlcache.Cache[string, entitystore.ServiceEnvironment] { + return es.podToServiceEnvironmentMap +} + func newMockGetPodServiceEnvironmentMapping(es *mockEntityStore) func() *ttlcache.Cache[string, entitystore.ServiceEnvironment] { return func() *ttlcache.Cache[string, entitystore.ServiceEnvironment] { return es.podToServiceEnvironmentMap @@ -175,12 +195,14 @@ func TestK8sPodToServiceMapHandler(t *testing.T) { name: "HappyPath", want: setupTTLCacheForTesting(map[string]entitystore.ServiceEnvironment{ "pod1": { - ServiceName: "service1", - Environment: "env1", + ServiceName: "service1", + Environment: "env1", + ServiceNameSource: "source1", }, "pod2": { - ServiceName: "service2", - Environment: "env2", + ServiceName: "service2", + Environment: "env2", + ServiceNameSource: "source2", }, }), }, @@ -196,8 +218,8 @@ func TestK8sPodToServiceMapHandler(t *testing.T) { es := newMockEntityStore() getPodServiceEnvironmentMapping = newMockGetPodServiceEnvironmentMapping(es) if !tt.emptyMap { - es.AddPodServiceEnvironmentMapping("pod1", "service1", "env1") - es.AddPodServiceEnvironmentMapping("pod2", "service2", "env2") + es.AddPodServiceEnvironmentMapping("pod1", "service1", "env1", "source1") + es.AddPodServiceEnvironmentMapping("pod2", "service2", "env2", "source2") } w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) @@ -323,3 +345,75 @@ func setupTTLCacheForTesting(podToServiceMap map[string]entitystore.ServiceEnvir } return cache } + +func TestServerNoSensitiveInfoInLogs(t *testing.T) { + // Create a buffer to capture log output + var buf bytes.Buffer + logger := CreateTestLogger(&buf) + + config := &Config{ + TLSCertPath: "./testdata/example-server-cert.pem", + TLSKeyPath: "./testdata/example-server-key.pem", + TLSCAPath: "./testdata/example-CA-cert.pem", + ListenAddress: ":8080", + } + + tests := []struct { + name string + setupMockData func(*mockEntityStore) + }{ + { + name: "EmptyPodServiceMap", + setupMockData: func(es *mockEntityStore) {}, + }, + { + name: "PopulatedPodServiceMap", + setupMockData: func(es *mockEntityStore) { + es.AddPodServiceEnvironmentMapping("pod1", "service1", "env1", "source1") + es.AddPodServiceEnvironmentMapping("pod2", "service2", "env2", "source2") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Clear the buffer before each test + buf.Reset() + + server := NewServer(logger, config) + es := newMockEntityStore() + tt.setupMockData(es) + getPodServiceEnvironmentMapping = newMockGetPodServiceEnvironmentMapping(es) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + server.k8sPodToServiceMapHandler(c) + + // Check logs for sensitive information + logOutput := buf.String() + assertNoSensitiveInfo(t, logOutput, config, es) + }) + } +} + +func assertNoSensitiveInfo(t *testing.T, logOutput string, config *Config, es *mockEntityStore) { + confidentialInfo := []string{ + "-----BEGIN CERTIFICATE-----", + "-----END CERTIFICATE-----", + "-----BEGIN RSA PRIVATE KEY-----", + "-----END RSA PRIVATE KEY-----", + } + + for _, pattern := range confidentialInfo { + assert.NotContains(t, logOutput, pattern) + } + + // Iterate through the pod service environment mapping + podServiceMap := es.GetPodServiceEnvironmentMapping() + for pod, serviceEnv := range podServiceMap.Items() { + assert.NotContains(t, logOutput, pod) + assert.NotContains(t, logOutput, serviceEnv.Value().ServiceName) + assert.NotContains(t, logOutput, serviceEnv.Value().Environment) + assert.NotContains(t, logOutput, serviceEnv.Value().ServiceNameSource) + } +} From 9849658464567a9bba0eeeebcfe2185111470bb8 Mon Sep 17 00:00:00 2001 From: Lisa Guo Date: Wed, 23 Oct 2024 16:11:59 +0100 Subject: [PATCH 38/47] [cloudwatch] Remove extra metric datum in PMD calls. Send entity per MetricDatum instead of using a ttlcache. Fix unit tests (#848) --- plugins/outputs/cloudwatch/cloudwatch.go | 81 +++++++------ plugins/outputs/cloudwatch/cloudwatch_test.go | 110 +++++++++++++++--- 2 files changed, 142 insertions(+), 49 deletions(-) diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 054c0879b9..124ff76d22 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -17,7 +17,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/jellydator/ttlcache/v3" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" @@ -61,19 +60,18 @@ type CloudWatch struct { // todo: may want to increase the size of the chan since the type changed. // 1 telegraf Metric could have many Fields. // Each field corresponds to a MetricDatum. - metricChan chan *aggregationDatum - datumBatchChan chan []*cloudwatch.MetricDatum - metricDatumBatch *MetricDatumBatch - shutdownChan chan struct{} - retries int - publisher *publisher.Publisher - retryer *retryer.LogThrottleRetryer - droppingOriginMetrics collections.Set[string] - aggregator Aggregator - aggregatorShutdownChan chan struct{} - aggregatorWaitGroup sync.WaitGroup - lastRequestBytes int - entityToMetricDatumCache *ttlcache.Cache[string, []*cloudwatch.MetricDatum] + metricChan chan *aggregationDatum + datumBatchChan chan map[string][]*cloudwatch.MetricDatum + metricDatumBatch *MetricDatumBatch + shutdownChan chan struct{} + retries int + publisher *publisher.Publisher + retryer *retryer.LogThrottleRetryer + droppingOriginMetrics collections.Set[string] + aggregator Aggregator + aggregatorShutdownChan chan struct{} + aggregatorWaitGroup sync.WaitGroup + lastRequestBytes int } // Compile time interface check. @@ -117,7 +115,6 @@ func (c *CloudWatch) Start(_ context.Context, host component.Host) error { c.config.RollupDimensions = GetUniqueRollupList(c.config.RollupDimensions) c.svc = svc c.retryer = logThrottleRetryer - c.entityToMetricDatumCache = ttlcache.New[string, []*cloudwatch.MetricDatum](ttlcache.WithTTL[string, []*cloudwatch.MetricDatum](5 * time.Minute)) c.startRoutines() return nil } @@ -125,13 +122,12 @@ func (c *CloudWatch) Start(_ context.Context, host component.Host) error { func (c *CloudWatch) startRoutines() { setNewDistributionFunc(c.config.MaxValuesPerDatum) c.metricChan = make(chan *aggregationDatum, metricChanBufferSize) - c.datumBatchChan = make(chan []*cloudwatch.MetricDatum, datumBatchChanBufferSize) + c.datumBatchChan = make(chan map[string][]*cloudwatch.MetricDatum, datumBatchChanBufferSize) c.shutdownChan = make(chan struct{}) c.aggregatorShutdownChan = make(chan struct{}) c.aggregator = NewAggregator(c.metricChan, c.aggregatorShutdownChan, &c.aggregatorWaitGroup) perRequestConstSize := overallConstPerRequestSize + len(c.config.Namespace) + namespaceOverheads c.metricDatumBatch = newMetricDatumBatch(c.config.MaxDatumsPerCall, perRequestConstSize) - go c.entityToMetricDatumCache.Start() go c.pushMetricDatum() go c.publish() } @@ -152,7 +148,6 @@ func (c *CloudWatch) Shutdown(ctx context.Context) error { close(c.shutdownChan) c.publisher.Close() c.retryer.Stop() - c.entityToMetricDatumCache.Stop() log.Println("D! Stopped the CloudWatch output plugin") return nil } @@ -177,11 +172,13 @@ func (c *CloudWatch) pushMetricDatum() { for { select { case metric := <-c.metricChan: - datums := c.BuildMetricDatum(metric) + entity, datums := c.BuildMetricDatum(metric) numberOfPartitions := len(datums) for i := 0; i < numberOfPartitions; i++ { - c.metricDatumBatch.Partition = append(c.metricDatumBatch.Partition, datums[i]) + entityStr := entityToString(entity) + c.metricDatumBatch.Partition[entityStr] = append(c.metricDatumBatch.Partition[entityStr], datums[i]) c.metricDatumBatch.Size += payload(datums[i]) + c.metricDatumBatch.Count++ if c.metricDatumBatch.isFull() { // if batch is full c.datumBatchChan <- c.metricDatumBatch.Partition @@ -203,30 +200,33 @@ func (c *CloudWatch) pushMetricDatum() { type MetricDatumBatch struct { MaxDatumsPerCall int - Partition []*cloudwatch.MetricDatum + Partition map[string][]*cloudwatch.MetricDatum BeginTime time.Time Size int + Count int perRequestConstSize int } func newMetricDatumBatch(maxDatumsPerCall, perRequestConstSize int) *MetricDatumBatch { return &MetricDatumBatch{ MaxDatumsPerCall: maxDatumsPerCall, - Partition: make([]*cloudwatch.MetricDatum, 0, maxDatumsPerCall), + Partition: map[string][]*cloudwatch.MetricDatum{}, BeginTime: time.Now(), Size: perRequestConstSize, + Count: 0, perRequestConstSize: perRequestConstSize, } } func (b *MetricDatumBatch) clear() { - b.Partition = make([]*cloudwatch.MetricDatum, 0, b.MaxDatumsPerCall) + b.Partition = map[string][]*cloudwatch.MetricDatum{} b.BeginTime = time.Now() b.Size = b.perRequestConstSize + b.Count = 0 } func (b *MetricDatumBatch) isFull() bool { - return len(b.Partition) >= b.MaxDatumsPerCall || b.Size >= bottomLinePayloadSizeInBytesToPublish + return b.Count >= b.MaxDatumsPerCall || b.Size >= bottomLinePayloadSizeInBytesToPublish } func (c *CloudWatch) timeToPublish(b *MetricDatumBatch) bool { @@ -339,27 +339,37 @@ func (c *CloudWatch) backoffSleep() { time.Sleep(d) } -func createEntityMetricData(entityToMetricDatumCache *ttlcache.Cache[string, []*cloudwatch.MetricDatum]) []*cloudwatch.EntityMetricData { +func createEntityMetricData(entityToMetrics map[string][]*cloudwatch.MetricDatum) []*cloudwatch.EntityMetricData { var entityMetricData []*cloudwatch.EntityMetricData - for _, item := range entityToMetricDatumCache.Items() { - entity := stringToEntity(item.Key()) + for entityStr, metrics := range entityToMetrics { + if entityStr == "" { + continue + } + entity := stringToEntity(entityStr) entityMetricData = append(entityMetricData, &cloudwatch.EntityMetricData{ Entity: &entity, - MetricData: item.Value(), + MetricData: metrics, }) } return entityMetricData } func (c *CloudWatch) WriteToCloudWatch(req interface{}) { - datums := req.([]*cloudwatch.MetricDatum) - entityMetricData := createEntityMetricData(c.entityToMetricDatumCache) + entityToMetricDatum := req.(map[string][]*cloudwatch.MetricDatum) + + // PMD requires PutMetricData to have MetricData + metricData := entityToMetricDatum[""] + if _, ok := entityToMetricDatum[""]; !ok { + metricData = []*cloudwatch.MetricDatum{} + } + params := &cloudwatch.PutMetricDataInput{ - MetricData: datums, + MetricData: metricData, Namespace: aws.String(c.config.Namespace), - EntityMetricData: entityMetricData, + EntityMetricData: createEntityMetricData(entityToMetricDatum), StrictEntityValidation: aws.Bool(false), } + var err error for i := 0; i < defaultRetryCount; i++ { _, err = c.svc.PutMetricData(params) @@ -395,14 +405,14 @@ func (c *CloudWatch) WriteToCloudWatch(req interface{}) { // BuildMetricDatum may just return the datum as-is. // Or it might expand it into many datums due to dimension aggregation. // There may also be more datums due to resize() on a distribution. -func (c *CloudWatch) BuildMetricDatum(metric *aggregationDatum) []*cloudwatch.MetricDatum { +func (c *CloudWatch) BuildMetricDatum(metric *aggregationDatum) (cloudwatch.Entity, []*cloudwatch.MetricDatum) { var datums []*cloudwatch.MetricDatum var distList []distribution.Distribution if metric.distribution != nil { if metric.distribution.Size() == 0 { log.Printf("E! metric has a distribution with no entries, %s", *metric.MetricName) - return datums + return metric.entity, datums } if metric.distribution.Unit() != "" { metric.SetUnit(metric.distribution.Unit()) @@ -457,8 +467,7 @@ func (c *CloudWatch) BuildMetricDatum(metric *aggregationDatum) []*cloudwatch.Me } } } - c.entityToMetricDatumCache.Set(entityToString(metric.entity), datums, ttlcache.DefaultTTL) - return datums + return metric.entity, datums } func (c *CloudWatch) IsDropping(metricName string) bool { diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index cffc9c5422..0d05a2ee7c 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -19,7 +19,6 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/jellydator/ttlcache/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -204,13 +203,13 @@ func TestBuildMetricDatumDropUnsupported(t *testing.T) { distribution.MinValue * 1.001, } for _, testCase := range testCases { - got := cw.BuildMetricDatum(&aggregationDatum{ + _, datums := cw.BuildMetricDatum(&aggregationDatum{ MetricDatum: cloudwatch.MetricDatum{ MetricName: aws.String("test"), Value: aws.Float64(testCase), }, }) - assert.Empty(t, got) + assert.Empty(t, datums) } } @@ -334,7 +333,9 @@ func TestIsFlushable(t *testing.T) { Dimensions: BuildDimensions(tags), Timestamp: aws.Time(time.Now()), } - batch.Partition = append(batch.Partition, &datum) + batch.Partition = map[string][]*cloudwatch.MetricDatum{ + "TestEntity": append([]*cloudwatch.MetricDatum{}, &datum), + } assert.False(cw.timeToPublish(batch)) time.Sleep(time.Second + cw.config.ForceFlushInterval) assert.True(cw.timeToPublish(batch)) @@ -352,13 +353,19 @@ func TestIsFull(t *testing.T) { Dimensions: BuildDimensions(tags), Timestamp: aws.Time(time.Now()), } + batch.Partition = map[string][]*cloudwatch.MetricDatum{ + "TestEntity": {}, + } + partition := batch.Partition["TestEntity"] for i := 0; i < 3; { - batch.Partition = append(batch.Partition, &datum) + batch.Partition["TestEntity"] = append(partition, &datum) + batch.Count++ i++ } assert.False(batch.isFull()) for i := 0; i < defaultMaxDatumsPerCall-3; { - batch.Partition = append(batch.Partition, &datum) + batch.Partition["TestEntity"] = append(partition, &datum) + batch.Count++ i++ } assert.True(batch.isFull()) @@ -387,7 +394,6 @@ func newCloudWatchClient( MaxDatumsPerCall: defaultMaxDatumsPerCall, MaxValuesPerDatum: defaultMaxValuesPerDatum, }, - entityToMetricDatumCache: ttlcache.New[string, []*cloudwatch.MetricDatum](), } cloudwatch.startRoutines() return cloudwatch @@ -575,11 +581,11 @@ func TestBackoffRetries(t *testing.T) { // Take 1 item out of the channel and verify it is no longer full. func TestCloudWatch_metricDatumBatchFull(t *testing.T) { c := &CloudWatch{ - datumBatchChan: make(chan []*cloudwatch.MetricDatum, datumBatchChanBufferSize), + datumBatchChan: make(chan map[string][]*cloudwatch.MetricDatum, datumBatchChanBufferSize), } assert.False(t, c.metricDatumBatchFull()) for i := 0; i < datumBatchChanBufferSize; i++ { - c.datumBatchChan <- []*cloudwatch.MetricDatum{} + c.datumBatchChan <- map[string][]*cloudwatch.MetricDatum{} } assert.True(t, c.metricDatumBatchFull()) <-c.datumBatchChan @@ -601,18 +607,96 @@ func TestCreateEntityMetricData(t *testing.T) { "Platform": aws.String("AWS::EC2"), }, } - entityToAttributesMap := ttlcache.New[string, []*cloudwatch.MetricDatum](ttlcache.WithTTL[string, []*cloudwatch.MetricDatum](5 * time.Minute)) metrics := createTestMetrics(1, 1, 1, "s") assert.Equal(t, 7, metrics.ResourceMetrics().At(0).Resource().Attributes().Len()) aggregations := ConvertOtelMetrics(metrics) assert.Equal(t, 0, metrics.ResourceMetrics().At(0).Resource().Attributes().Len()) - metricDatum := cw.BuildMetricDatum(aggregations[0]) - entityToAttributesMap.Set(entityToString(entity), metricDatum, ttlcache.DefaultTTL) + entity, metricDatum := cw.BuildMetricDatum(aggregations[0]) + + entityToMetrics := map[string][]*cloudwatch.MetricDatum{ + entityToString(entity): metricDatum, + } wantedEntityMetricData := []*cloudwatch.EntityMetricData{ { Entity: &entity, MetricData: metricDatum, }, } - assert.Equal(t, wantedEntityMetricData, createEntityMetricData(entityToAttributesMap)) + assert.Equal(t, wantedEntityMetricData, createEntityMetricData(entityToMetrics)) +} + +func TestWriteToCloudWatchEntity(t *testing.T) { + timestampNow := aws.Time(time.Now()) + expectedPMDInput := &cloudwatch.PutMetricDataInput{ + Namespace: aws.String(""), + StrictEntityValidation: aws.Bool(false), + EntityMetricData: []*cloudwatch.EntityMetricData{ + { + Entity: &cloudwatch.Entity{ + Attributes: map[string]*string{}, + KeyAttributes: map[string]*string{ + "Environment": aws.String("Environment"), + "Service": aws.String("Service"), + }, + }, + MetricData: []*cloudwatch.MetricDatum{ + { + MetricName: aws.String("TestMetricWithEntity"), + Value: aws.Float64(1), + Timestamp: timestampNow, + Dimensions: []*cloudwatch.Dimension{ + {Name: aws.String("Class"), Value: aws.String("class")}, + {Name: aws.String("Object"), Value: aws.String("object")}, + }, + }, + }, + }, + }, + MetricData: []*cloudwatch.MetricDatum{ + { + MetricName: aws.String("TestMetricNoEntity"), + Value: aws.Float64(1), + Timestamp: timestampNow, + Dimensions: []*cloudwatch.Dimension{ + {Name: aws.String("Class"), Value: aws.String("class")}, + {Name: aws.String("Object"), Value: aws.String("object")}, + }, + }, + }, + } + + var input *cloudwatch.PutMetricDataInput + svc := new(mockCloudWatchClient) + svc.On("PutMetricData", &cloudwatch.PutMetricDataInput{}).Return(&cloudwatch.PutMetricDataOutput{}, nil) + svc.On("PutMetricData", mock.Anything).Run(func(args mock.Arguments) { + input = args.Get(0).(*cloudwatch.PutMetricDataInput) + }).Return(&cloudwatch.PutMetricDataOutput{}, nil) + + cw := newCloudWatchClient(svc, time.Second) + cw.WriteToCloudWatch(map[string][]*cloudwatch.MetricDatum{ + "": { + { + MetricName: aws.String("TestMetricNoEntity"), + Value: aws.Float64(1), + Timestamp: timestampNow, + Dimensions: []*cloudwatch.Dimension{ + {Name: aws.String("Class"), Value: aws.String("class")}, + {Name: aws.String("Object"), Value: aws.String("object")}, + }, + }, + }, + "|Environment:Environment;Service:Service": { + { + MetricName: aws.String("TestMetricWithEntity"), + Value: aws.Float64(1), + Timestamp: timestampNow, + Dimensions: []*cloudwatch.Dimension{ + {Name: aws.String("Class"), Value: aws.String("class")}, + {Name: aws.String("Object"), Value: aws.String("object")}, + }, + }, + }, + }) + + assert.Equal(t, expectedPMDInput, input) } From 522152700f1fe1b55f880f93909a4819b94c6dbb Mon Sep 17 00:00:00 2001 From: Dinakar Chappa Date: Thu, 24 Oct 2024 14:45:37 -0400 Subject: [PATCH 39/47] Fixes entity attribute variable names (#840) Co-authored-by: Zhihong Lin --- plugins/outputs/cloudwatch/convert_otel.go | 4 +- .../outputs/cloudwatch/convert_otel_test.go | 80 +++++++++++-------- .../entityattributes/entityattributes.go | 53 +++++++++--- 3 files changed, 89 insertions(+), 48 deletions(-) diff --git a/plugins/outputs/cloudwatch/convert_otel.go b/plugins/outputs/cloudwatch/convert_otel.go index 78c2d00b94..97c3a2e217 100644 --- a/plugins/outputs/cloudwatch/convert_otel.go +++ b/plugins/outputs/cloudwatch/convert_otel.go @@ -189,8 +189,8 @@ func fetchEntityFields(resourceAttributes pcommon.Map) cloudwatch.Entity { keyAttributesMap := map[string]*string{} attributeMap := map[string]*string{} - processEntityAttributes(entityattributes.KeyAttributeEntityToShortNameMap, keyAttributesMap, resourceAttributes) - processEntityAttributes(entityattributes.AttributeEntityToShortNameMap, attributeMap, resourceAttributes) + processEntityAttributes(entityattributes.GetKeyAttributeEntityShortNameMap(), keyAttributesMap, resourceAttributes) + processEntityAttributes(entityattributes.GetAttributeEntityShortNameMap(), attributeMap, resourceAttributes) removeEntityFields(resourceAttributes) return cloudwatch.Entity{ diff --git a/plugins/outputs/cloudwatch/convert_otel_test.go b/plugins/outputs/cloudwatch/convert_otel_test.go index a2992da5b2..d9d6b5cd38 100644 --- a/plugins/outputs/cloudwatch/convert_otel_test.go +++ b/plugins/outputs/cloudwatch/convert_otel_test.go @@ -18,6 +18,8 @@ import ( "github.com/aws/amazon-cloudwatch-agent/metric/distribution/regular" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" ) const ( @@ -232,9 +234,9 @@ func TestConvertOtelMetrics_Entity(t *testing.T) { "AwsAccountId": aws.String("0123456789012"), }, Attributes: map[string]*string{ - "InstanceID": aws.String("i-123456789"), - "Platform": aws.String("AWS::EC2"), - "AutoScalingGroup": aws.String("asg-123"), + "EC2.InstanceId": aws.String("i-123456789"), + "PlatformType": aws.String("AWS::EC2"), + "EC2.AutoScalingGroup": aws.String("asg-123"), }, } assert.Equal(t, 1, len(datums)) @@ -245,14 +247,13 @@ func TestConvertOtelMetrics_Entity(t *testing.T) { func TestProcessAndRemoveEntityAttributes(t *testing.T) { testCases := []struct { name string - entityMap []map[string]string resourceAttributes map[string]any wantedAttributes map[string]*string leftoverAttributes map[string]any + kubernetesMode string }{ { - name: "key_attributes", - entityMap: []map[string]string{entityattributes.KeyAttributeEntityToShortNameMap}, + name: "key_attributes", resourceAttributes: map[string]any{ entityattributes.AttributeEntityServiceName: "my-service", entityattributes.AttributeEntityDeploymentEnvironment: "my-environment", @@ -264,8 +265,7 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { leftoverAttributes: make(map[string]any), }, { - name: "non-key_attributes", - entityMap: []map[string]string{entityattributes.AttributeEntityToShortNameMap}, + name: "non-key_attributes", resourceAttributes: map[string]any{ entityattributes.AttributeEntityCluster: "my-cluster", entityattributes.AttributeEntityNamespace: "my-namespace", @@ -273,16 +273,16 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { entityattributes.AttributeEntityWorkload: "my-workload", }, wantedAttributes: map[string]*string{ - entityattributes.Cluster: aws.String("my-cluster"), - entityattributes.Namespace: aws.String("my-namespace"), - entityattributes.Node: aws.String("my-node"), - entityattributes.Workload: aws.String("my-workload"), + entityattributes.EksCluster: aws.String("my-cluster"), + entityattributes.NamespaceField: aws.String("my-namespace"), + entityattributes.Node: aws.String("my-node"), + entityattributes.Workload: aws.String("my-workload"), }, leftoverAttributes: make(map[string]any), + kubernetesMode: config.ModeEKS, }, { - name: "key_and_non_key_attributes", - entityMap: []map[string]string{entityattributes.KeyAttributeEntityToShortNameMap, entityattributes.AttributeEntityToShortNameMap}, + name: "key_and_non_key_attributes", resourceAttributes: map[string]any{ entityattributes.AttributeEntityServiceName: "my-service", entityattributes.AttributeEntityDeploymentEnvironment: "my-environment", @@ -294,16 +294,16 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { wantedAttributes: map[string]*string{ entityattributes.ServiceName: aws.String("my-service"), entityattributes.DeploymentEnvironment: aws.String("my-environment"), - entityattributes.Cluster: aws.String("my-cluster"), - entityattributes.Namespace: aws.String("my-namespace"), + entityattributes.K8sCluster: aws.String("my-cluster"), + entityattributes.NamespaceField: aws.String("my-namespace"), entityattributes.Node: aws.String("my-node"), entityattributes.Workload: aws.String("my-workload"), }, leftoverAttributes: make(map[string]any), + kubernetesMode: config.ModeK8sEC2, }, { - name: "key_and_non_key_attributes_plus_extras", - entityMap: []map[string]string{entityattributes.KeyAttributeEntityToShortNameMap, entityattributes.AttributeEntityToShortNameMap}, + name: "key_and_non_key_attributes_plus_extras", resourceAttributes: map[string]any{ "extra_attribute": "extra_value", entityattributes.AttributeEntityServiceName: "my-service", @@ -316,18 +316,18 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { wantedAttributes: map[string]*string{ entityattributes.ServiceName: aws.String("my-service"), entityattributes.DeploymentEnvironment: aws.String("my-environment"), - entityattributes.Cluster: aws.String("my-cluster"), - entityattributes.Namespace: aws.String("my-namespace"), + entityattributes.K8sCluster: aws.String("my-cluster"), + entityattributes.NamespaceField: aws.String("my-namespace"), entityattributes.Node: aws.String("my-node"), entityattributes.Workload: aws.String("my-workload"), }, leftoverAttributes: map[string]any{ "extra_attribute": "extra_value", }, + kubernetesMode: config.ModeK8sOnPrem, }, { - name: "key_and_non_key_attributes_plus_unsupported_entity_field", - entityMap: []map[string]string{entityattributes.KeyAttributeEntityToShortNameMap, entityattributes.AttributeEntityToShortNameMap}, + name: "key_and_non_key_attributes_plus_unsupported_entity_field", resourceAttributes: map[string]any{ entityattributes.AWSEntityPrefix + "not.real.values": "unsupported", entityattributes.AttributeEntityServiceName: "my-service", @@ -340,22 +340,32 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { wantedAttributes: map[string]*string{ entityattributes.ServiceName: aws.String("my-service"), entityattributes.DeploymentEnvironment: aws.String("my-environment"), - entityattributes.Cluster: aws.String("my-cluster"), - entityattributes.Namespace: aws.String("my-namespace"), + entityattributes.EksCluster: aws.String("my-cluster"), + entityattributes.NamespaceField: aws.String("my-namespace"), entityattributes.Node: aws.String("my-node"), entityattributes.Workload: aws.String("my-workload"), }, leftoverAttributes: map[string]any{}, + kubernetesMode: config.ModeEKS, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + // resetting fields for current test case + ctx := context.CurrentContext() + ctx.SetKubernetesMode(tc.kubernetesMode) + entityAttrMap := []map[string]string{entityattributes.GetKeyAttributeEntityShortNameMap()} + if tc.kubernetesMode != "" { + delete(entityattributes.GetAttributeEntityShortNameMap(), entityattributes.AttributeEntityCluster) + entityAttrMap = append(entityAttrMap, entityattributes.GetAttributeEntityShortNameMap()) + } + attrs := pcommon.NewMap() err := attrs.FromRaw(tc.resourceAttributes) assert.Nil(t, err) targetMap := make(map[string]*string) - for _, entityMap := range tc.entityMap { + for _, entityMap := range entityAttrMap { processEntityAttributes(entityMap, targetMap, attrs) } removeEntityFields(attrs) @@ -375,17 +385,19 @@ func TestFetchEntityFields(t *testing.T) { resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNamespace, "my-namespace") resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityWorkload, "my-workload") assert.Equal(t, 7, resourceMetrics.Resource().Attributes().Len()) + context.CurrentContext().SetKubernetesMode(config.ModeEKS) - expectedEntity := cloudwatch.Entity{KeyAttributes: map[string]*string{ - entityattributes.EntityType: aws.String(entityattributes.Service), - entityattributes.ServiceName: aws.String("my-service"), - entityattributes.DeploymentEnvironment: aws.String("my-environment"), - }, + expectedEntity := cloudwatch.Entity{ + KeyAttributes: map[string]*string{ + entityattributes.EntityType: aws.String(entityattributes.Service), + entityattributes.ServiceName: aws.String("my-service"), + entityattributes.DeploymentEnvironment: aws.String("my-environment"), + }, Attributes: map[string]*string{ - entityattributes.Node: aws.String("my-node"), - entityattributes.Cluster: aws.String("my-cluster"), - entityattributes.Namespace: aws.String("my-namespace"), - entityattributes.Workload: aws.String("my-workload"), + entityattributes.Node: aws.String("my-node"), + entityattributes.EksCluster: aws.String("my-cluster"), + entityattributes.NamespaceField: aws.String("my-namespace"), + entityattributes.Workload: aws.String("my-workload"), }, } entity := fetchEntityFields(resourceMetrics.Resource().Attributes()) diff --git a/plugins/processors/awsentity/entityattributes/entityattributes.go b/plugins/processors/awsentity/entityattributes/entityattributes.go index a5d2898901..c95b44d75d 100644 --- a/plugins/processors/awsentity/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/entityattributes/entityattributes.go @@ -3,6 +3,11 @@ package entityattributes +import ( + "github.com/aws/amazon-cloudwatch-agent/translator/config" + "github.com/aws/amazon-cloudwatch-agent/translator/context" +) + const ( // The following are the possible values for EntityType config options @@ -44,13 +49,15 @@ const ( ResourceType = "ResourceType" Identifier = "Identifier" AwsAccountId = "AwsAccountId" - Cluster = "Cluster" - Workload = "Workload" - Node = "Node" - ServiceNameSource = "Source" - Platform = "Platform" - InstanceID = "InstanceID" - AutoscalingGroup = "AutoScalingGroup" + EksCluster = "EKS.Cluster" + K8sCluster = "K8s.Cluster" + NamespaceField = "K8s.Namespace" + Workload = "K8s.Workload" + Node = "K8s.Node" + ServiceNameSource = "AWS.ServiceNameSource" + Platform = "PlatformType" + InstanceID = "EC2.InstanceId" + AutoscalingGroup = "EC2.AutoScalingGroup" // The following are values used for the environment fallbacks required on EC2 DeploymentEnvironmentFallbackPrefix = "ec2:" @@ -58,7 +65,7 @@ const ( ) // KeyAttributeEntityToShortNameMap is used to map key attributes from otel to the actual values used in the Entity object -var KeyAttributeEntityToShortNameMap = map[string]string{ +var keyAttributeEntityToShortNameMap = map[string]string{ AttributeEntityType: EntityType, AttributeEntityResourceType: ResourceType, AttributeEntityIdentifier: Identifier, @@ -67,10 +74,9 @@ var KeyAttributeEntityToShortNameMap = map[string]string{ AttributeEntityDeploymentEnvironment: DeploymentEnvironment, } -// AttributeEntityToShortNameMap is used to map attributes from otel to the actual values used in the Entity object -var AttributeEntityToShortNameMap = map[string]string{ - AttributeEntityCluster: Cluster, - AttributeEntityNamespace: Namespace, +// attributeEntityToShortNameMap is used to map attributes from otel to the actual values used in the Entity object +var attributeEntityToShortNameMap = map[string]string{ + AttributeEntityNamespace: NamespaceField, AttributeEntityWorkload: Workload, AttributeEntityNode: Node, AttributeEntityPlatformType: Platform, @@ -79,6 +85,18 @@ var AttributeEntityToShortNameMap = map[string]string{ AttributeEntityServiceNameSource: ServiceNameSource, } +func GetKeyAttributeEntityShortNameMap() map[string]string { + return keyAttributeEntityToShortNameMap +} + +// Cluster attribute prefix could be either EKS or K8s. We set the field once at runtime. +func GetAttributeEntityShortNameMap() map[string]string { + if _, ok := attributeEntityToShortNameMap[AttributeEntityCluster]; !ok { + attributeEntityToShortNameMap[AttributeEntityCluster] = clusterType() + } + return attributeEntityToShortNameMap +} + // Container Insights attributes used for scraping EKS related information const ( NodeName = "NodeName" @@ -86,3 +104,14 @@ const ( // PodName in Container Insights is the workload(Deployment, Daemonset, etc) name PodName = "PodName" ) + +func clusterType() string { + ctx := context.CurrentContext() + mode := ctx.KubernetesMode() + if mode == config.ModeEKS { + return EksCluster + } else if mode == config.ModeK8sEC2 || mode == config.ModeK8sOnPrem { + return K8sCluster + } + return "" +} From dad4f5130c51f0d91353cce4bd8ca0136f10ef6b Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Fri, 25 Oct 2024 16:43:47 -0400 Subject: [PATCH 40/47] Add nop receiver and exporter to allowed components (#861) Co-authored-by: Pooja Reddy Nathala --- internal/util/collections/collections.go | 4 +- service/defaultcomponents/components.go | 4 + service/defaultcomponents/components_test.go | 2 + translator/translate/agent/agent_test.go | 18 ++++ translator/translate/logs/logs_test.go | 39 ++++++++ .../otel/pipeline/nop/translator_test.go | 89 +++++++++++++++++++ 6 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 translator/translate/otel/pipeline/nop/translator_test.go diff --git a/internal/util/collections/collections.go b/internal/util/collections/collections.go index 54e82100cb..dd352fa8c4 100644 --- a/internal/util/collections/collections.go +++ b/internal/util/collections/collections.go @@ -61,12 +61,12 @@ func NewPair[K any, V any](key K, value V) *Pair[K, V] { // Set is a map with a comparable K key and no // meaningful value. -type Set[K comparable] map[K]any +type Set[K comparable] map[K]struct{} // Add keys to the Set. func (s Set[K]) Add(keys ...K) { for _, key := range keys { - s[key] = nil + s[key] = struct{}{} } } diff --git a/service/defaultcomponents/components.go b/service/defaultcomponents/components.go index 3ab39a3516..9fc9661522 100644 --- a/service/defaultcomponents/components.go +++ b/service/defaultcomponents/components.go @@ -42,6 +42,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/debugexporter" + "go.opentelemetry.io/collector/exporter/nopexporter" "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/extension/ballastextension" "go.opentelemetry.io/collector/extension/zpagesextension" @@ -50,6 +51,7 @@ import ( "go.opentelemetry.io/collector/processor/batchprocessor" "go.opentelemetry.io/collector/processor/memorylimiterprocessor" "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/nopreceiver" "go.opentelemetry.io/collector/receiver/otlpreceiver" "github.com/aws/amazon-cloudwatch-agent/extension/agenthealth" @@ -75,6 +77,7 @@ func Factories() (otelcol.Factories, error) { jaegerreceiver.NewFactory(), jmxreceiver.NewFactory(), kafkareceiver.NewFactory(), + nopreceiver.NewFactory(), otlpreceiver.NewFactory(), prometheusreceiver.NewFactory(), statsdreceiver.NewFactory(), @@ -117,6 +120,7 @@ func Factories() (otelcol.Factories, error) { awsxrayexporter.NewFactory(), cloudwatch.NewFactory(), debugexporter.NewFactory(), + nopexporter.NewFactory(), prometheusremotewriteexporter.NewFactory(), ); err != nil { return otelcol.Factories{}, err diff --git a/service/defaultcomponents/components_test.go b/service/defaultcomponents/components_test.go index 116f5102cf..e912749dfa 100644 --- a/service/defaultcomponents/components_test.go +++ b/service/defaultcomponents/components_test.go @@ -24,6 +24,7 @@ func TestComponents(t *testing.T) { "jaeger", "jmx", "kafka", + "nop", "otlp", "prometheus", "statsd", @@ -72,6 +73,7 @@ func TestComponents(t *testing.T) { "awscloudwatch", "awsxray", "debug", + "nop", "prometheusremotewrite", } gotExporters := collections.MapSlice(maps.Keys(factories.Exporters), component.Type.String) diff --git a/translator/translate/agent/agent_test.go b/translator/translate/agent/agent_test.go index 4b3f221aed..f1158d4e29 100644 --- a/translator/translate/agent/agent_test.go +++ b/translator/translate/agent/agent_test.go @@ -174,3 +174,21 @@ func restoreProxyEnv() { os.Setenv("https_proxy", httpsProxy) os.Setenv("no_proxy", noProxy) } + +func TestAgentServiceAndEnvironmentConfig(t *testing.T) { + agentServiceAndEnvironmentConfig(t, config.OS_TYPE_LINUX) + agentServiceAndEnvironmentConfig(t, config.OS_TYPE_DARWIN) +} + +func agentServiceAndEnvironmentConfig(t *testing.T, osType string) { + a := new(Agent) + translator.SetTargetPlatform(osType) + var input interface{} + err := json.Unmarshal([]byte(`{"agent":{"region": "us-west-2", "service.name": "my-service", "deployment.environment":"test-environment"}}`), &input) + if err != nil { + assert.Fail(t, err.Error()) + } + _, _ = a.ApplyRule(input) + assert.Equal(t, "my-service", Global_Config.ServiceName) + assert.Equal(t, "test-environment", Global_Config.DeploymentEnvironment) +} diff --git a/translator/translate/logs/logs_test.go b/translator/translate/logs/logs_test.go index 923386164f..fe1dc0583d 100644 --- a/translator/translate/logs/logs_test.go +++ b/translator/translate/logs/logs_test.go @@ -200,3 +200,42 @@ func TestLogs_EndpointOverride(t *testing.T) { ctx.SetMode(config.ModeEC2) //reset back to default mode } + +func TestLogs_ServiceAndEnvironment(t *testing.T) { + l := new(Logs) + agent.Global_Config.Region = "us-east-1" + agent.Global_Config.RegionType = "any" + + context.ResetContext() + + var input interface{} + err := json.Unmarshal([]byte(`{"logs":{"service.name": "my-service", + "deployment.environment": "ec2:group","log_stream_name":"LOG_STREAM_NAME"}}`), &input) + if err != nil { + assert.Fail(t, err.Error()) + } + + _, _ = l.ApplyRule(input) + assert.Equal(t, "my-service", GlobalLogConfig.ServiceName) + assert.Equal(t, "ec2:group", GlobalLogConfig.DeploymentEnvironment) +} + +func TestLogs_ServiceAndEnvironmentMissing(t *testing.T) { + l := new(Logs) + agent.Global_Config.Region = "us-east-1" + agent.Global_Config.RegionType = "any" + agent.Global_Config.DeploymentEnvironment = "ec2:group" + agent.Global_Config.ServiceName = "my-service" + + context.ResetContext() + + var input interface{} + err := json.Unmarshal([]byte(`{"logs":{"log_stream_name":"LOG_STREAM_NAME"}}`), &input) + if err != nil { + assert.Fail(t, err.Error()) + } + + _, _ = l.ApplyRule(input) + assert.Equal(t, "my-service", GlobalLogConfig.ServiceName) + assert.Equal(t, "ec2:group", GlobalLogConfig.DeploymentEnvironment) +} diff --git a/translator/translate/otel/pipeline/nop/translator_test.go b/translator/translate/otel/pipeline/nop/translator_test.go new file mode 100644 index 0000000000..22c30371dd --- /dev/null +++ b/translator/translate/otel/pipeline/nop/translator_test.go @@ -0,0 +1,89 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package nop + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + + "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" + "github.com/aws/amazon-cloudwatch-agent/translator/translate/otel/common" +) + +func TestTranslator(t *testing.T) { + type want struct { + receivers []string + processors []string + exporters []string + extensions []string + } + tt := NewTranslator() + assert.EqualValues(t, "metrics/nop", tt.ID().String()) + testCases := map[string]struct { + input map[string]interface{} + want *want + wantErr error + }{ + "WithoutKey": { + input: map[string]interface{}{}, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(logAgentKey)}, + }, + "WithMetricsKey": { + input: map[string]interface{}{ + "metrics": map[string]interface{}{}, + }, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(logAgentKey)}, + }, + "WithTracesKey": { + input: map[string]interface{}{ + "traces": map[string]interface{}{}, + }, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(logAgentKey)}, + }, + "WithEMFKey": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "metrics_collected": map[string]interface{}{}, + }, + }, + wantErr: &common.MissingKeyError{ID: tt.ID(), JsonKey: fmt.Sprint(logAgentKey)}, + }, + "WithLogsKey": { + input: map[string]interface{}{ + "logs": map[string]interface{}{ + "logs_collected": map[string]interface{}{ + "files": nil, + }, + }, + }, + want: &want{ + receivers: []string{"nop"}, + processors: []string{}, + exporters: []string{"nop"}, + extensions: []string{}, + }, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + conf := confmap.NewFromStringMap(testCase.input) + got, err := tt.Translate(conf) + assert.Equal(t, testCase.wantErr, err) + if testCase.want == nil { + assert.Nil(t, got) + } else { + require.NotNil(t, got) + assert.Equal(t, testCase.want.receivers, collections.MapSlice(got.Receivers.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.processors, collections.MapSlice(got.Processors.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.exporters, collections.MapSlice(got.Exporters.Keys(), component.ID.String)) + assert.Equal(t, testCase.want.extensions, collections.MapSlice(got.Extensions.Keys(), component.ID.String)) + } + }) + } +} From a090dd351cca4c44c4dd3d2a499cb81644f405bc Mon Sep 17 00:00:00 2001 From: zhihonl <61301537+zhihonl@users.noreply.github.com> Date: Mon, 28 Oct 2024 12:03:04 -0400 Subject: [PATCH 41/47] Fix empty entity metrics not being properly set as regular metric data (#863) --- plugins/outputs/cloudwatch/util.go | 14 +++++++------ plugins/outputs/cloudwatch/util_test.go | 28 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/cloudwatch/util.go b/plugins/outputs/cloudwatch/util.go index 939428a76c..6a2149365c 100644 --- a/plugins/outputs/cloudwatch/util.go +++ b/plugins/outputs/cloudwatch/util.go @@ -124,7 +124,7 @@ func payload(datum *cloudwatch.MetricDatum) (size int) { } func entityToString(entity cloudwatch.Entity) string { - var attributes, keyAttributes string + var attributes, keyAttributes, data string if entity.Attributes != nil { attributes = entityAttributesToString(entity.Attributes) } @@ -132,11 +132,13 @@ func entityToString(entity cloudwatch.Entity) string { keyAttributes = entityAttributesToString(entity.KeyAttributes) } - data := fmt.Sprintf( - "%s|%s", - attributes, - keyAttributes, - ) + if attributes != "" || keyAttributes != "" { + data = fmt.Sprintf( + "%s|%s", + attributes, + keyAttributes, + ) + } return data } diff --git a/plugins/outputs/cloudwatch/util_test.go b/plugins/outputs/cloudwatch/util_test.go index aca977b9f2..962f2cf1b2 100644 --- a/plugins/outputs/cloudwatch/util_test.go +++ b/plugins/outputs/cloudwatch/util_test.go @@ -173,6 +173,11 @@ func TestEntityToString_StringToEntity(t *testing.T) { }, entityString: "|Environment:Environment;Service:Service", }, + { + name: "Empty Entity", + entity: cloudwatch.Entity{}, + entityString: "", + }, } for _, tc := range testCases { @@ -182,3 +187,26 @@ func TestEntityToString_StringToEntity(t *testing.T) { }) } } + +func TestEntityToString(t *testing.T) { + testCases := []struct { + name string + entity cloudwatch.Entity + entityString string + }{ + { + name: "EmptyEntityMaps", + entity: cloudwatch.Entity{ + KeyAttributes: map[string]*string{}, + Attributes: map[string]*string{}, + }, + entityString: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.entityString, entityToString(tc.entity)) + }) + } +} From ee0868b2faa6753e17c8529b4a00ddb6bb5b14a7 Mon Sep 17 00:00:00 2001 From: Varun <48163435+varunch77@users.noreply.github.com> Date: Mon, 28 Oct 2024 10:55:45 -0400 Subject: [PATCH 42/47] Add unit test to check awsentity logs for sensitive information (#855) --- .../processors/awsentity/processor_test.go | 137 ++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/plugins/processors/awsentity/processor_test.go b/plugins/processors/awsentity/processor_test.go index 4d2f9216ce..bd263e0a4f 100644 --- a/plugins/processors/awsentity/processor_test.go +++ b/plugins/processors/awsentity/processor_test.go @@ -4,13 +4,16 @@ package awsentity import ( + "bytes" "context" "testing" "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" "go.uber.org/zap" + "go.uber.org/zap/zapcore" "github.com/aws/amazon-cloudwatch-agent/extension/entitystore" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" @@ -80,6 +83,19 @@ func newMockGetEC2InfoFromEntityStore(instance, accountId, asg string) func() en } } +// This helper function creates a test logger +// so that it can send the log messages into a +// temporary buffer for pattern matching +func CreateTestLogger(buf *bytes.Buffer) *zap.Logger { + writer := zapcore.AddSync(buf) + + // Create a custom zapcore.Core that writes to the buffer + encoder := zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()) + core := zapcore.NewCore(encoder, writer, zapcore.DebugLevel) + logger := zap.New(core) + return logger +} + func TestProcessMetricsLogGroupAssociation(t *testing.T) { logger, _ := zap.NewDevelopment() p := newAwsEntityProcessor(&Config{ @@ -467,6 +483,127 @@ func TestProcessMetricsResourceEntityProcessing(t *testing.T) { } } +func TestAWSEntityProcessorNoSensitiveInfoInLogs(t *testing.T) { + // Create a buffer to capture log output + var buf bytes.Buffer + logger := CreateTestLogger(&buf) + + configs := []struct { + name string + config *Config + }{ + { + name: "EC2Service", + config: &Config{ + EntityType: entityattributes.Service, + Platform: config.ModeEC2, + }, + }, + { + name: "EKSService", + config: &Config{ + EntityType: entityattributes.Service, + Platform: config.ModeEC2, + KubernetesMode: config.ModeEKS, + ClusterName: "test-cluster", + }, + }, + { + name: "EC2Resource", + config: &Config{ + EntityType: entityattributes.Resource, + Platform: config.ModeEC2, + }, + }, + { + name: "K8sOnPremService", + config: &Config{ + EntityType: entityattributes.Service, + Platform: config.ModeOnPrem, + KubernetesMode: config.ModeK8sOnPrem, + ClusterName: "test-cluster", + }, + }, + } + + for _, cfg := range configs { + t.Run(cfg.name, func(t *testing.T) { + buf.Reset() + processor := newAwsEntityProcessor(cfg.config, logger) + + resetServiceNameSource := getServiceNameSource + getServiceNameSource = newMockGetServiceNameAndSource("test-service", "UserConfiguration") + defer func() { getServiceNameSource = resetServiceNameSource }() + + resetGetEC2InfoFromEntityStore := getEC2InfoFromEntityStore + asgName := "test-asg" + getEC2InfoFromEntityStore = newMockGetEC2InfoFromEntityStore("i-1234567890abcdef0", "123456789012", asgName) + defer func() { getEC2InfoFromEntityStore = resetGetEC2InfoFromEntityStore }() + + md := generateTestMetrics() + _, err := processor.processMetrics(context.Background(), md) + assert.NoError(t, err) + + logOutput := buf.String() + assertNoSensitiveInfo(t, logOutput, md, asgName) + }) + } +} + +func generateTestMetrics() pmetric.Metrics { + md := pmetric.NewMetrics() + rm := md.ResourceMetrics().AppendEmpty() + + attrs := rm.Resource().Attributes() + attrs.PutStr(attributeAwsLogGroupNames, "test-log-group") + attrs.PutStr(attributeServiceName, "test-service") + attrs.PutStr(attributeDeploymentEnvironment, "test-environment") + attrs.PutStr(semconv.AttributeK8SPodName, "test-pod") + attrs.PutStr(semconv.AttributeK8SNamespaceName, "test-namespace") + attrs.PutStr(semconv.AttributeK8SDeploymentName, "test-deployment") + attrs.PutStr(semconv.AttributeK8SNodeName, "test-node") + + metric := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + metric.SetName("test-metric") + dp := metric.SetEmptyGauge().DataPoints().AppendEmpty() + dp.Attributes().PutStr(attributeServiceName, "datapoint-service-name") + dp.Attributes().PutStr(attributeDeploymentEnvironment, "datapoint-environment") + + return md +} + +func assertNoSensitiveInfo(t *testing.T, logOutput string, md pmetric.Metrics, asgName string) { + rm := md.ResourceMetrics().At(0) + attrs := rm.Resource().Attributes() + dp := rm.ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0) + + getStringOrEmpty := func(val pcommon.Value, exists bool) string { + if !exists { + return "" + } + return val.AsString() + } + + sensitivePatterns := []string{ + `i-[0-9a-f]{17}`, // EC2 Instance ID regex pattern + `\d{12}`, // AWS Account ID regex pattern + asgName, // Auto Scaling Group name + getStringOrEmpty(attrs.Get(attributeAwsLogGroupNames)), + getStringOrEmpty(attrs.Get(attributeServiceName)), + getStringOrEmpty(attrs.Get(attributeDeploymentEnvironment)), + getStringOrEmpty(attrs.Get(semconv.AttributeK8SPodName)), + getStringOrEmpty(attrs.Get(semconv.AttributeK8SNamespaceName)), + getStringOrEmpty(attrs.Get(semconv.AttributeK8SDeploymentName)), + getStringOrEmpty(attrs.Get(semconv.AttributeK8SNodeName)), + getStringOrEmpty(dp.Attributes().Get(attributeServiceName)), + getStringOrEmpty(dp.Attributes().Get(attributeDeploymentEnvironment)), + } + + for _, pattern := range sensitivePatterns { + assert.NotRegexp(t, pattern, logOutput) + } +} + func TestProcessMetricsDatapointAttributeScraping(t *testing.T) { logger, _ := zap.NewDevelopment() ctx := context.Background() From f7df406f8a5240db3b3fbd160a235363fbd6a183 Mon Sep 17 00:00:00 2001 From: Pooja Reddy Nathala Date: Mon, 28 Oct 2024 13:37:08 -0400 Subject: [PATCH 43/47] removed beta release workflow --- .github/workflows/compass-beta-release.yml | 29 ---------------------- 1 file changed, 29 deletions(-) delete mode 100644 .github/workflows/compass-beta-release.yml diff --git a/.github/workflows/compass-beta-release.yml b/.github/workflows/compass-beta-release.yml deleted file mode 100644 index bc8d74df36..0000000000 --- a/.github/workflows/compass-beta-release.yml +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT - -name: Compass Beta Release -on: - workflow_dispatch: -jobs: - BuildAndUpload: - uses: ./.github/workflows/test-build.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - BucketKey: "compass-beta-release" - PackageBucketKey: "compass-beta-release" - TerraformAWSAssumeRole: ${{ vars.TERRAFORM_AWS_ASSUME_ROLE }} - Bucket: "private-cloudwatch-agent-integration-test" - - BuildAndUploadContainer: - uses: ./.github/workflows/test-build-docker.yml - secrets: inherit - permissions: - id-token: write - contents: read - with: - ContainerRepositoryNameAndTag: "cwagent-compass-beta-release:latest" - BucketKey: "compass-beta-release" - PackageBucketKey: "compass-beta-release" \ No newline at end of file From 161cd5ba722e09ab0aa3b0a6d3f40e3a06e6aa1d Mon Sep 17 00:00:00 2001 From: Zhihong Lin Date: Tue, 29 Oct 2024 16:52:11 -0400 Subject: [PATCH 44/47] Fixing CloudWatch entity components bugs --- extension/entitystore/ec2Info.go | 45 ++- extension/entitystore/ec2Info_test.go | 113 +++--- extension/entitystore/eksInfo.go | 10 +- extension/entitystore/eksInfo_test.go | 19 + extension/entitystore/extension.go | 23 +- extension/entitystore/extension_test.go | 26 +- extension/entitystore/retryer_test.go | 4 +- extension/entitystore/serviceprovider.go | 40 +- extension/entitystore/serviceprovider_test.go | 17 +- go.mod | 7 +- internal/tls/certWatcher.go | 7 +- plugins/outputs/cloudwatch/convert_otel.go | 7 +- .../outputs/cloudwatch/convert_otel_test.go | 101 ++++-- plugins/outputs/cloudwatchlogs/pusher.go | 4 +- .../entityattributes/entityattributes.go | 25 +- plugins/processors/awsentity/processor.go | 26 +- test/README.md | 11 - test/compass/compass_test.go | 342 ------------------ .../resources/compass_default_log.json | 22 -- .../resources/compass_service_in_config.json | 23 -- test/go.mod | 58 --- test/go.sum | 119 ------ 22 files changed, 283 insertions(+), 766 deletions(-) delete mode 100644 test/README.md delete mode 100644 test/compass/compass_test.go delete mode 100644 test/compass/resources/compass_default_log.json delete mode 100644 test/compass/resources/compass_service_in_config.json delete mode 100644 test/go.mod delete mode 100644 test/go.sum diff --git a/extension/entitystore/ec2Info.go b/extension/entitystore/ec2Info.go index 703ff7db1d..8309c209b5 100644 --- a/extension/entitystore/ec2Info.go +++ b/extension/entitystore/ec2Info.go @@ -7,6 +7,7 @@ import ( "context" "errors" "strings" + "sync" "time" "go.uber.org/zap" @@ -36,6 +37,7 @@ type EC2Info struct { metadataProvider ec2metadataprovider.MetadataProvider logger *zap.Logger done chan struct{} + mutex sync.RWMutex } func (ei *EC2Info) initEc2Info() { @@ -47,7 +49,24 @@ func (ei *EC2Info) initEc2Info() { return } ei.logger.Debug("Finished initializing EC2Info") - ei.ignoreInvalidFields() +} + +func (ei *EC2Info) GetInstanceID() string { + ei.mutex.RLock() + defer ei.mutex.RUnlock() + return ei.InstanceID +} + +func (ei *EC2Info) GetAccountID() string { + ei.mutex.RLock() + defer ei.mutex.RUnlock() + return ei.AccountID +} + +func (ei *EC2Info) GetAutoScalingGroup() string { + ei.mutex.RLock() + defer ei.mutex.RUnlock() + return ei.AutoScalingGroup } func (ei *EC2Info) setInstanceIDAccountID() error { @@ -65,8 +84,14 @@ func (ei *EC2Info) setInstanceIDAccountID() error { } } ei.logger.Debug("Successfully retrieved Instance ID and Account ID") + ei.mutex.Lock() ei.InstanceID = metadataDoc.InstanceID + if idLength := len(ei.InstanceID); idLength > instanceIdSizeMax { + ei.logger.Warn("InstanceId length exceeds characters limit and will be ignored", zap.Int("length", idLength), zap.Int("character limit", instanceIdSizeMax)) + ei.InstanceID = "" + } ei.AccountID = metadataDoc.AccountID + ei.mutex.Unlock() return nil } } @@ -116,7 +141,13 @@ func (ei *EC2Info) retrieveAsgName() error { ei.logger.Error("Failed to get AutoScalingGroup through metadata provider", zap.Error(err)) } else { ei.logger.Debug("AutoScalingGroup retrieved through IMDS") + ei.mutex.Lock() ei.AutoScalingGroup = asg + if asgLength := len(ei.AutoScalingGroup); asgLength > autoScalingGroupSizeMax { + ei.logger.Warn("AutoScalingGroup length exceeds characters limit and will be ignored", zap.Int("length", asgLength), zap.Int("character limit", autoScalingGroupSizeMax)) + ei.AutoScalingGroup = "" + } + ei.mutex.Unlock() } } return nil @@ -130,15 +161,3 @@ func newEC2Info(metadataProvider ec2metadataprovider.MetadataProvider, done chan logger: logger, } } - -func (ei *EC2Info) ignoreInvalidFields() { - if idLength := len(ei.InstanceID); idLength > instanceIdSizeMax { - ei.logger.Warn("InstanceId length exceeds characters limit and will be ignored", zap.Int("length", idLength), zap.Int("character limit", instanceIdSizeMax)) - ei.InstanceID = "" - } - - if asgLength := len(ei.AutoScalingGroup); asgLength > autoScalingGroupSizeMax { - ei.logger.Warn("AutoScalingGroup length exceeds characters limit and will be ignored", zap.Int("length", asgLength), zap.Int("character limit", autoScalingGroupSizeMax)) - ei.AutoScalingGroup = "" - } -} diff --git a/extension/entitystore/ec2Info_test.go b/extension/entitystore/ec2Info_test.go index 13b2674861..21fc8d148c 100644 --- a/extension/entitystore/ec2Info_test.go +++ b/extension/entitystore/ec2Info_test.go @@ -25,11 +25,19 @@ var mockedInstanceIdentityDoc = &ec2metadata.EC2InstanceIdentityDocument{ ImageID: "ami-09edd32d9b0990d49", } +var mockedInstanceIdentityDocWithLargeInstanceId = &ec2metadata.EC2InstanceIdentityDocument{ + InstanceID: "i-01d2417c27a396e44394824728", + AccountID: "874389809020", + Region: "us-east-1", + InstanceType: "m5ad.large", + ImageID: "ami-09edd32d9b0990d49", +} + var ( tagVal3 = "ASG-1" ) -func TestSetInstanceIdAndRegion(t *testing.T) { +func TestSetInstanceIDAccountID(t *testing.T) { type args struct { metadataProvider ec2metadataprovider.MetadataProvider } @@ -50,6 +58,17 @@ func TestSetInstanceIdAndRegion(t *testing.T) { AccountID: mockedInstanceIdentityDoc.AccountID, }, }, + { + name: "InstanceId too large", + args: args{ + metadataProvider: &mockMetadataProvider{InstanceIdentityDocument: mockedInstanceIdentityDocWithLargeInstanceId}, + }, + wantErr: false, + want: EC2Info{ + InstanceID: "", + AccountID: mockedInstanceIdentityDocWithLargeInstanceId.AccountID, + }, + }, } for _, tt := range tests { logger, _ := zap.NewDevelopment() @@ -61,8 +80,8 @@ func TestSetInstanceIdAndRegion(t *testing.T) { if err := ei.setInstanceIDAccountID(); (err != nil) != tt.wantErr { t.Errorf("setInstanceIDAccountID() error = %v, wantErr %v", err, tt.wantErr) } - assert.Equal(t, tt.want.InstanceID, ei.InstanceID) - assert.Equal(t, tt.want.AccountID, ei.AccountID) + assert.Equal(t, tt.want.InstanceID, ei.GetInstanceID()) + assert.Equal(t, tt.want.AccountID, ei.GetAccountID()) }) } } @@ -104,6 +123,23 @@ func TestRetrieveASGName(t *testing.T) { AutoScalingGroup: tagVal3, }, }, + { + name: "AutoScalingGroup too large", + args: args{ + metadataProvider: &mockMetadataProvider{ + InstanceIdentityDocument: mockedInstanceIdentityDoc, + Tags: map[string]string{ + "aws:autoscaling:groupName": strings.Repeat("a", 256), + "env": "test-env", + "name": "test-name", + }}, + }, + + wantErr: false, + want: EC2Info{ + AutoScalingGroup: "", + }, + }, { name: "Success IMDS tags call but no ASG", args: args{ @@ -122,72 +158,7 @@ func TestRetrieveASGName(t *testing.T) { if err := ei.retrieveAsgName(); (err != nil) != tt.wantErr { t.Errorf("retrieveAsgName() error = %v, wantErr %v", err, tt.wantErr) } - assert.Equal(t, tt.want.AutoScalingGroup, ei.AutoScalingGroup) - }) - } -} - -func TestIgnoreInvalidFields(t *testing.T) { - logger, _ := zap.NewDevelopment() - type want struct { - instanceId string - accountId string - autoScalingGroup string - } - tests := []struct { - name string - args *EC2Info - want want - }{ - { - name: "Happy path", - args: &EC2Info{ - InstanceID: "i-01d2417c27a396e44", - AccountID: "0123456789012", - AutoScalingGroup: "asg", - logger: logger, - }, - want: want{ - instanceId: "i-01d2417c27a396e44", - accountId: "0123456789012", - autoScalingGroup: "asg", - }, - }, - { - name: "InstanceId too large", - args: &EC2Info{ - InstanceID: strings.Repeat("a", 20), - AccountID: "0123456789012", - AutoScalingGroup: "asg", - logger: logger, - }, - want: want{ - instanceId: "", - accountId: "0123456789012", - autoScalingGroup: "asg", - }, - }, - { - name: "AutoScalingGroup too large", - args: &EC2Info{ - InstanceID: "i-01d2417c27a396e44", - AccountID: "0123456789012", - AutoScalingGroup: strings.Repeat("a", 256), - logger: logger, - }, - want: want{ - instanceId: "i-01d2417c27a396e44", - accountId: "0123456789012", - autoScalingGroup: "", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.args.ignoreInvalidFields() - assert.Equal(t, tt.want.instanceId, tt.args.InstanceID) - assert.Equal(t, tt.want.accountId, tt.args.AccountID) - assert.Equal(t, tt.want.autoScalingGroup, tt.args.AutoScalingGroup) + assert.Equal(t, tt.want.AutoScalingGroup, ei.GetAutoScalingGroup()) }) } } @@ -229,8 +200,8 @@ func TestLogMessageDoesNotIncludeResourceInfo(t *testing.T) { logOutput := buf.String() log.Println(logOutput) - assert.NotContains(t, logOutput, ei.InstanceID) - assert.NotContains(t, logOutput, ei.AutoScalingGroup) + assert.NotContains(t, logOutput, ei.GetInstanceID()) + assert.NotContains(t, logOutput, ei.GetAutoScalingGroup()) }) } } diff --git a/extension/entitystore/eksInfo.go b/extension/entitystore/eksInfo.go index 3c885e032f..9b6786d744 100644 --- a/extension/entitystore/eksInfo.go +++ b/extension/entitystore/eksInfo.go @@ -10,7 +10,14 @@ import ( "go.uber.org/zap" ) -const ttlDuration = 5 * time.Minute +const ( + ttlDuration = 5 * time.Minute + + // Agent server extension is mainly opened for FluentBit to + // consume data and FluentBit only caches 256 pods in memory + // so we will follow the same pattern + maxPodAssociationMapCapacity = 256 +) type ServiceEnvironment struct { ServiceName string @@ -28,6 +35,7 @@ func newEKSInfo(logger *zap.Logger) *eksInfo { logger: logger, podToServiceEnvMap: ttlcache.New[string, ServiceEnvironment]( ttlcache.WithTTL[string, ServiceEnvironment](ttlDuration), + ttlcache.WithCapacity[string, ServiceEnvironment](maxPodAssociationMapCapacity), ), } } diff --git a/extension/entitystore/eksInfo_test.go b/extension/entitystore/eksInfo_test.go index 42b66a116e..c5644a77e3 100644 --- a/extension/entitystore/eksInfo_test.go +++ b/extension/entitystore/eksInfo_test.go @@ -4,6 +4,7 @@ package entitystore import ( + "strconv" "testing" "time" @@ -104,6 +105,24 @@ func TestAddPodServiceEnvironmentMapping_TtlRefresh(t *testing.T) { assert.Equal(t, 1, ei.podToServiceEnvMap.Len()) } +func TestAddPodServiceEnvironmentMapping_MaxCapacity(t *testing.T) { + logger := zap.NewNop() + ei := newEKSInfo(logger) + + //adds new pod to service environment mapping + for i := 0; i < 300; i++ { + ei.AddPodServiceEnvironmentMapping("test-pod-"+strconv.Itoa(i), "test-service", "test-environment", "Instrumentation") + } + assert.Equal(t, maxPodAssociationMapCapacity, ei.podToServiceEnvMap.Len()) + itemIndex := 299 + ei.podToServiceEnvMap.Range(func(item *ttlcache.Item[string, ServiceEnvironment]) bool { + // Check if the item's value equals the target string + assert.Equal(t, item.Key(), "test-pod-"+strconv.Itoa(itemIndex)) + itemIndex-- + return true + }) +} + func TestGetPodServiceEnvironmentMapping(t *testing.T) { tests := []struct { name string diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index afbd1bf7a2..d9058f9f78 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -94,23 +94,27 @@ func (e *EntityStore) Start(ctx context.Context, host component.Host) error { Profile: e.config.Profile, Filename: e.config.Filename, } + e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done, e.logger) switch e.mode { case config.ModeEC2: e.ec2Info = *newEC2Info(e.metadataprovider, e.done, e.config.Region, e.logger) go e.ec2Info.initEc2Info() + go e.serviceprovider.startServiceProvider() } if e.kubernetesMode != "" { e.eksInfo = newEKSInfo(e.logger) // Starting the ttl cache will automatically evict all expired pods from the map - go e.StartPodToServiceEnvironmentMappingTtlCache(e.done) + go e.StartPodToServiceEnvironmentMappingTtlCache() } - e.serviceprovider = newServiceProvider(e.mode, e.config.Region, &e.ec2Info, e.metadataprovider, getEC2Provider, ec2CredentialConfig, e.done, e.logger) - go e.serviceprovider.startServiceProvider() return nil } func (e *EntityStore) Shutdown(_ context.Context) error { close(e.done) + if e.eksInfo != nil && e.eksInfo.podToServiceEnvMap != nil { + e.eksInfo.podToServiceEnvMap.Stop() + } + e.logger.Info("Pod to Service Environment Mapping TTL Cache stopped") return nil } @@ -189,16 +193,9 @@ func (e *EntityStore) AddPodServiceEnvironmentMapping(podName string, serviceNam } } -func (e *EntityStore) StartPodToServiceEnvironmentMappingTtlCache(done chan struct{}) { +func (e *EntityStore) StartPodToServiceEnvironmentMappingTtlCache() { if e.eksInfo != nil { e.eksInfo.podToServiceEnvMap.Start() - - // Start a goroutine to stop the cache when done channel is closed - go func() { - <-done - e.eksInfo.podToServiceEnvMap.Stop() - e.logger.Info("Pod to Service Environment Mapping TTL Cache stopped") - }() } } @@ -215,8 +212,8 @@ func (e *EntityStore) createAttributeMap() map[string]*string { attributeMap := make(map[string]*string) if e.mode == config.ModeEC2 { - addNonEmptyToMap(attributeMap, InstanceIDKey, e.ec2Info.InstanceID) - addNonEmptyToMap(attributeMap, ASGKey, e.ec2Info.AutoScalingGroup) + addNonEmptyToMap(attributeMap, InstanceIDKey, e.ec2Info.GetInstanceID()) + addNonEmptyToMap(attributeMap, ASGKey, e.ec2Info.GetAutoScalingGroup()) } switch e.mode { case config.ModeEC2: diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 816d6470f1..668c8bd1a4 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -503,7 +503,7 @@ func TestEntityStore_StartPodToServiceEnvironmentMappingTtlCache(t *testing.T) { e.done = make(chan struct{}) e.eksInfo.podToServiceEnvMap = setupTTLCacheForTesting(map[string]ServiceEnvironment{}, time.Microsecond) - go e.StartPodToServiceEnvironmentMappingTtlCache(e.done) + go e.StartPodToServiceEnvironmentMappingTtlCache() assert.Equal(t, 0, e.GetPodServiceEnvironmentMapping().Len()) e.AddPodServiceEnvironmentMapping("pod", "service", "env", "Instrumentation") assert.Equal(t, 1, e.GetPodServiceEnvironmentMapping().Len()) @@ -516,6 +516,24 @@ func TestEntityStore_StartPodToServiceEnvironmentMappingTtlCache(t *testing.T) { } +func TestEntityStore_StopPodToServiceEnvironmentMappingTtlCache(t *testing.T) { + e := EntityStore{eksInfo: newEKSInfo(zap.NewExample())} + e.done = make(chan struct{}) + e.eksInfo.podToServiceEnvMap = setupTTLCacheForTesting(map[string]ServiceEnvironment{}, time.Second) + e.logger = zap.NewNop() + + go e.StartPodToServiceEnvironmentMappingTtlCache() + assert.Equal(t, 0, e.GetPodServiceEnvironmentMapping().Len()) + e.AddPodServiceEnvironmentMapping("pod", "service", "env", "Instrumentation") + assert.Equal(t, 1, e.GetPodServiceEnvironmentMapping().Len()) + + time.Sleep(time.Millisecond) + assert.NoError(t, e.Shutdown(nil)) + //cache should be cleared + time.Sleep(time.Second) + assert.Equal(t, 1, e.GetPodServiceEnvironmentMapping().Len()) +} + func TestEntityStore_GetMetricServiceNameSource(t *testing.T) { instanceId := "i-abcd1234" accountId := "123456789012" @@ -581,9 +599,9 @@ func TestEntityStore_LogMessageDoesNotIncludeResourceInfo(t *testing.T) { logOutput := buf.String() log.Println(logOutput) - assertIfNonEmpty(t, logOutput, es.ec2Info.InstanceID) - assertIfNonEmpty(t, logOutput, es.ec2Info.AutoScalingGroup) - assertIfNonEmpty(t, logOutput, es.ec2Info.AccountID) + assertIfNonEmpty(t, logOutput, es.ec2Info.GetInstanceID()) + assertIfNonEmpty(t, logOutput, es.ec2Info.GetAutoScalingGroup()) + assertIfNonEmpty(t, logOutput, es.ec2Info.GetAccountID()) }) } diff --git a/extension/entitystore/retryer_test.go b/extension/entitystore/retryer_test.go index 35883cf695..9c8c88951e 100644 --- a/extension/entitystore/retryer_test.go +++ b/extension/entitystore/retryer_test.go @@ -48,10 +48,10 @@ func TestRetryer_refreshLoop(t *testing.T) { done: done, } unlimitedRetryer := NewRetryer(tt.fields.oneTime, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, logger) - go unlimitedRetryer.refreshLoop(s.getIAMRole) + go unlimitedRetryer.refreshLoop(s.scrapeIAMRole) time.Sleep(time.Second) close(done) - assert.Equal(t, tt.wantIamRole, s.iamRole) + assert.Equal(t, tt.wantIamRole, s.GetIAMRole()) }) } } diff --git a/extension/entitystore/serviceprovider.go b/extension/entitystore/serviceprovider.go index d9a32fc83c..d30040c697 100644 --- a/extension/entitystore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "strings" + "sync" "github.com/aws/aws-sdk-go/aws/arn" "go.uber.org/zap" @@ -64,6 +65,7 @@ type serviceprovider struct { region string done chan struct{} logger *zap.Logger + mutex sync.RWMutex // logFiles stores the service attributes that were configured for log files in CloudWatch Agent configuration. // Example: // "/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log": {ServiceName: "cloudwatch-agent"} @@ -78,8 +80,20 @@ type serviceprovider struct { func (s *serviceprovider) startServiceProvider() { unlimitedRetryer := NewRetryer(false, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, s.logger) limitedRetryer := NewRetryer(false, false, describeTagsJitterMin, describeTagsJitterMax, ec2tagger.ThrottleBackOffArray, maxRetry, s.done, s.logger) - go unlimitedRetryer.refreshLoop(s.getIAMRole) - go limitedRetryer.refreshLoop(s.getImdsServiceName) + go unlimitedRetryer.refreshLoop(s.scrapeIAMRole) + go limitedRetryer.refreshLoop(s.scrapeImdsServiceName) +} + +func (s *serviceprovider) GetIAMRole() string { + s.mutex.RLock() + defer s.mutex.RUnlock() + return s.iamRole +} + +func (s *serviceprovider) GetIMDSServiceName() string { + s.mutex.RLock() + defer s.mutex.RUnlock() + return s.imdsServiceName } // addEntryForLogFile adds an association between a log file glob and a service attribute, as configured in the @@ -164,34 +178,34 @@ func (s *serviceprovider) serviceAttributeForLogFile(logFile LogFileGlob) Servic } func (s *serviceprovider) serviceAttributeFromImdsTags() ServiceAttribute { - if s.imdsServiceName == "" { + if s.GetIMDSServiceName() == "" { return ServiceAttribute{} } return ServiceAttribute{ - ServiceName: s.imdsServiceName, + ServiceName: s.GetIMDSServiceName(), ServiceNameSource: ServiceNameSourceResourceTags, } } func (s *serviceprovider) serviceAttributeFromIamRole() ServiceAttribute { - if s.iamRole == "" { + if s.GetIAMRole() == "" { return ServiceAttribute{} } return ServiceAttribute{ - ServiceName: s.iamRole, + ServiceName: s.GetIAMRole(), ServiceNameSource: ServiceNameSourceClientIamRole, } } func (s *serviceprovider) serviceAttributeFromAsg() ServiceAttribute { - if s.ec2Info == nil || s.ec2Info.AutoScalingGroup == "" { + if s.ec2Info == nil || s.ec2Info.GetAutoScalingGroup() == "" { return ServiceAttribute{} } return ServiceAttribute{ - Environment: "ec2:" + s.ec2Info.AutoScalingGroup, + Environment: "ec2:" + s.ec2Info.GetAutoScalingGroup(), } } @@ -207,7 +221,7 @@ func (s *serviceprovider) serviceAttributeFallback() ServiceAttribute { return attr } -func (s *serviceprovider) getIAMRole() error { +func (s *serviceprovider) scrapeIAMRole() error { iamRole, err := s.metadataProvider.InstanceProfileIAMRole() if err != nil { return err @@ -219,13 +233,15 @@ func (s *serviceprovider) getIAMRole() error { iamRoleResource := iamRoleArn.Resource if strings.HasPrefix(iamRoleResource, INSTANCE_PROFILE) { roleName := strings.TrimPrefix(iamRoleResource, INSTANCE_PROFILE) + s.mutex.Lock() s.iamRole = roleName + s.mutex.Unlock() } else { return fmt.Errorf("IAM Role resource does not follow the expected pattern. Should be instance-profile/") } return nil } -func (s *serviceprovider) getImdsServiceName() error { +func (s *serviceprovider) scrapeImdsServiceName() error { tags, err := s.metadataProvider.InstanceTags(context.Background()) if err != nil { s.logger.Debug("Failed to get tags through metadata provider", zap.Error(err)) @@ -238,12 +254,14 @@ func (s *serviceprovider) getImdsServiceName() error { if err != nil { continue } else { + s.mutex.Lock() s.imdsServiceName = serviceName + s.mutex.Unlock() } break } } - if s.imdsServiceName == "" { + if s.GetIMDSServiceName() == "" { s.logger.Debug("Service name not found through IMDS") } return nil diff --git a/extension/entitystore/serviceprovider_test.go b/extension/entitystore/serviceprovider_test.go index a05f87f755..011a522c96 100644 --- a/extension/entitystore/serviceprovider_test.go +++ b/extension/entitystore/serviceprovider_test.go @@ -45,9 +45,8 @@ func Test_serviceprovider_startServiceProvider(t *testing.T) { go s.startServiceProvider() time.Sleep(3 * time.Second) close(done) - - assert.Equal(t, tt.wantIAM, s.iamRole) - assert.Equal(t, tt.wantTag, s.imdsServiceName) + assert.Equal(t, tt.wantIAM, s.GetIAMRole()) + assert.Equal(t, tt.wantTag, s.GetIMDSServiceName()) }) } } @@ -229,12 +228,12 @@ func Test_serviceprovider_getServiceNameSource(t *testing.T) { s.iamRole = "test-role" serviceName, serviceNameSource = s.getServiceNameAndSource() - assert.Equal(t, s.iamRole, serviceName) + assert.Equal(t, s.GetIAMRole(), serviceName) assert.Equal(t, ServiceNameSourceClientIamRole, serviceNameSource) s.imdsServiceName = "test-service-from-tags" serviceName, serviceNameSource = s.getServiceNameAndSource() - assert.Equal(t, s.imdsServiceName, serviceName) + assert.Equal(t, s.GetIMDSServiceName(), serviceName) assert.Equal(t, ServiceNameSourceResourceTags, serviceNameSource) } @@ -256,8 +255,8 @@ func Test_serviceprovider_getIAMRole(t *testing.T) { s := serviceprovider{ metadataProvider: tt.metadataProvider, } - s.getIAMRole() - assert.Equal(t, tt.want, s.iamRole) + s.scrapeIAMRole() + assert.Equal(t, tt.want, s.GetIAMRole()) }) } } @@ -306,8 +305,8 @@ func Test_serviceprovider_getImdsServiceName(t *testing.T) { logger: zap.NewExample(), metadataProvider: tt.metadataProvider, } - s.getImdsServiceName() - assert.Equal(t, tt.wantTagServiceName, s.imdsServiceName) + s.scrapeImdsServiceName() + assert.Equal(t, tt.wantTagServiceName, s.GetIMDSServiceName()) }) } } diff --git a/go.mod b/go.mod index 9f9f62d8e6..5d52962fa3 100644 --- a/go.mod +++ b/go.mod @@ -96,8 +96,10 @@ require ( github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.30.2 github.com/bigkevmcd/go-configparser v0.0.0-20200217161103-d137835d2579 github.com/deckarep/golang-set/v2 v2.3.1 + github.com/fsnotify/fsnotify v1.7.0 github.com/gin-gonic/gin v1.10.0 github.com/go-kit/log v0.2.1 + github.com/go-playground/validator/v10 v10.20.0 github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31 github.com/gobwas/glob v0.2.3 github.com/google/go-cmp v0.6.0 @@ -205,11 +207,6 @@ require ( k8s.io/klog/v2 v2.120.1 ) -require ( - github.com/fsnotify/fsnotify v1.7.0 - github.com/go-playground/validator/v10 v10.20.0 -) - require ( cloud.google.com/go v0.112.1 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect diff --git a/internal/tls/certWatcher.go b/internal/tls/certWatcher.go index e5666b5ead..1016917811 100644 --- a/internal/tls/certWatcher.go +++ b/internal/tls/certWatcher.go @@ -72,8 +72,8 @@ func (cw *CertWatcher) RegisterCallback(callback func()) { // GetTLSConfig fetches the currently loaded tls Config, which may be nil. func (cw *CertWatcher) GetTLSConfig() *tls.Config { - cw.Lock() - defer cw.Unlock() + cw.RLock() + defer cw.RUnlock() return cw.currentTLSConfig } @@ -84,7 +84,6 @@ func (cw *CertWatcher) ReadTlsConfig() error { TLSKey: cw.keyPath, TLSAllowedCACerts: []string{cw.caPath}, } - //cw.printCerts(serverConfig) tlsConfig, err := serverConfig.TLSConfig() if err != nil { cw.logger.Error("failed to read certificate", zap.Error(err)) @@ -93,7 +92,9 @@ func (cw *CertWatcher) ReadTlsConfig() error { if tlsConfig != cw.currentTLSConfig { cw.logger.Debug("TLS certificate changed") + cw.Lock() cw.currentTLSConfig = tlsConfig + cw.Unlock() // If a callback is registered, invoke it with the new certificate. if cw.callback != nil { diff --git a/plugins/outputs/cloudwatch/convert_otel.go b/plugins/outputs/cloudwatch/convert_otel.go index 97c3a2e217..d91ce92c61 100644 --- a/plugins/outputs/cloudwatch/convert_otel.go +++ b/plugins/outputs/cloudwatch/convert_otel.go @@ -188,9 +188,12 @@ func ConvertOtelMetrics(m pmetric.Metrics) []*aggregationDatum { func fetchEntityFields(resourceAttributes pcommon.Map) cloudwatch.Entity { keyAttributesMap := map[string]*string{} attributeMap := map[string]*string{} - + platformType := "" + if platformTypeValue, ok := resourceAttributes.Get(entityattributes.AttributeEntityPlatformType); ok { + platformType = platformTypeValue.Str() + } processEntityAttributes(entityattributes.GetKeyAttributeEntityShortNameMap(), keyAttributesMap, resourceAttributes) - processEntityAttributes(entityattributes.GetAttributeEntityShortNameMap(), attributeMap, resourceAttributes) + processEntityAttributes(entityattributes.GetAttributeEntityShortNameMap(platformType), attributeMap, resourceAttributes) removeEntityFields(resourceAttributes) return cloudwatch.Entity{ diff --git a/plugins/outputs/cloudwatch/convert_otel_test.go b/plugins/outputs/cloudwatch/convert_otel_test.go index d9d6b5cd38..ae4a825f5b 100644 --- a/plugins/outputs/cloudwatch/convert_otel_test.go +++ b/plugins/outputs/cloudwatch/convert_otel_test.go @@ -18,8 +18,6 @@ import ( "github.com/aws/amazon-cloudwatch-agent/metric/distribution/regular" "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatch" - "github.com/aws/amazon-cloudwatch-agent/translator/config" - "github.com/aws/amazon-cloudwatch-agent/translator/context" ) const ( @@ -250,7 +248,6 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { resourceAttributes map[string]any wantedAttributes map[string]*string leftoverAttributes map[string]any - kubernetesMode string }{ { name: "key_attributes", @@ -267,19 +264,20 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { { name: "non-key_attributes", resourceAttributes: map[string]any{ - entityattributes.AttributeEntityCluster: "my-cluster", - entityattributes.AttributeEntityNamespace: "my-namespace", - entityattributes.AttributeEntityNode: "my-node", - entityattributes.AttributeEntityWorkload: "my-workload", + entityattributes.AttributeEntityCluster: "my-cluster", + entityattributes.AttributeEntityNamespace: "my-namespace", + entityattributes.AttributeEntityNode: "my-node", + entityattributes.AttributeEntityWorkload: "my-workload", + entityattributes.AttributeEntityPlatformType: "AWS::EKS", }, wantedAttributes: map[string]*string{ entityattributes.EksCluster: aws.String("my-cluster"), entityattributes.NamespaceField: aws.String("my-namespace"), entityattributes.Node: aws.String("my-node"), entityattributes.Workload: aws.String("my-workload"), + entityattributes.Platform: aws.String("AWS::EKS"), }, leftoverAttributes: make(map[string]any), - kubernetesMode: config.ModeEKS, }, { name: "key_and_non_key_attributes", @@ -290,6 +288,7 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { entityattributes.AttributeEntityNamespace: "my-namespace", entityattributes.AttributeEntityNode: "my-node", entityattributes.AttributeEntityWorkload: "my-workload", + entityattributes.AttributeEntityPlatformType: "K8s", }, wantedAttributes: map[string]*string{ entityattributes.ServiceName: aws.String("my-service"), @@ -298,9 +297,9 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { entityattributes.NamespaceField: aws.String("my-namespace"), entityattributes.Node: aws.String("my-node"), entityattributes.Workload: aws.String("my-workload"), + entityattributes.Platform: aws.String("K8s"), }, leftoverAttributes: make(map[string]any), - kubernetesMode: config.ModeK8sEC2, }, { name: "key_and_non_key_attributes_plus_extras", @@ -312,6 +311,7 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { entityattributes.AttributeEntityNamespace: "my-namespace", entityattributes.AttributeEntityNode: "my-node", entityattributes.AttributeEntityWorkload: "my-workload", + entityattributes.AttributeEntityPlatformType: "K8s", }, wantedAttributes: map[string]*string{ entityattributes.ServiceName: aws.String("my-service"), @@ -320,11 +320,11 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { entityattributes.NamespaceField: aws.String("my-namespace"), entityattributes.Node: aws.String("my-node"), entityattributes.Workload: aws.String("my-workload"), + entityattributes.Platform: aws.String("K8s"), }, leftoverAttributes: map[string]any{ "extra_attribute": "extra_value", }, - kubernetesMode: config.ModeK8sOnPrem, }, { name: "key_and_non_key_attributes_plus_unsupported_entity_field", @@ -336,6 +336,7 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { entityattributes.AttributeEntityNamespace: "my-namespace", entityattributes.AttributeEntityNode: "my-node", entityattributes.AttributeEntityWorkload: "my-workload", + entityattributes.AttributeEntityPlatformType: "AWS::EKS", }, wantedAttributes: map[string]*string{ entityattributes.ServiceName: aws.String("my-service"), @@ -344,25 +345,27 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { entityattributes.NamespaceField: aws.String("my-namespace"), entityattributes.Node: aws.String("my-node"), entityattributes.Workload: aws.String("my-workload"), + entityattributes.Platform: aws.String("AWS::EKS"), }, leftoverAttributes: map[string]any{}, - kubernetesMode: config.ModeEKS, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + attrs := pcommon.NewMap() + err := attrs.FromRaw(tc.resourceAttributes) + // resetting fields for current test case - ctx := context.CurrentContext() - ctx.SetKubernetesMode(tc.kubernetesMode) entityAttrMap := []map[string]string{entityattributes.GetKeyAttributeEntityShortNameMap()} - if tc.kubernetesMode != "" { - delete(entityattributes.GetAttributeEntityShortNameMap(), entityattributes.AttributeEntityCluster) - entityAttrMap = append(entityAttrMap, entityattributes.GetAttributeEntityShortNameMap()) + platformType := "" + if platformTypeValue, ok := attrs.Get(entityattributes.AttributeEntityPlatformType); ok { + platformType = platformTypeValue.Str() + } + if platformType != "" { + delete(entityattributes.GetAttributeEntityShortNameMap(platformType), entityattributes.AttributeEntityCluster) + entityAttrMap = append(entityAttrMap, entityattributes.GetAttributeEntityShortNameMap(platformType)) } - - attrs := pcommon.NewMap() - err := attrs.FromRaw(tc.resourceAttributes) assert.Nil(t, err) targetMap := make(map[string]*string) for _, entityMap := range entityAttrMap { @@ -384,8 +387,8 @@ func TestFetchEntityFields(t *testing.T) { resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityCluster, "my-cluster") resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNamespace, "my-namespace") resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityWorkload, "my-workload") - assert.Equal(t, 7, resourceMetrics.Resource().Attributes().Len()) - context.CurrentContext().SetKubernetesMode(config.ModeEKS) + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "AWS::EKS") + assert.Equal(t, 8, resourceMetrics.Resource().Attributes().Len()) expectedEntity := cloudwatch.Entity{ KeyAttributes: map[string]*string{ @@ -398,12 +401,68 @@ func TestFetchEntityFields(t *testing.T) { entityattributes.EksCluster: aws.String("my-cluster"), entityattributes.NamespaceField: aws.String("my-namespace"), entityattributes.Workload: aws.String("my-workload"), + entityattributes.Platform: aws.String("AWS::EKS"), }, } entity := fetchEntityFields(resourceMetrics.Resource().Attributes()) assert.Equal(t, 0, resourceMetrics.Resource().Attributes().Len()) assert.Equal(t, expectedEntity, entity) +} + +func TestFetchEntityFieldsOnK8s(t *testing.T) { + entityMap := entityattributes.GetAttributeEntityShortNameMap("") + delete(entityMap, entityattributes.AttributeEntityCluster) + resourceMetrics := pmetric.NewResourceMetrics() + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityType, "Service") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityDeploymentEnvironment, "my-environment") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityServiceName, "my-service") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNode, "my-node") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityCluster, "my-cluster") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNamespace, "my-namespace") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityWorkload, "my-workload") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "K8s") + assert.Equal(t, 8, resourceMetrics.Resource().Attributes().Len()) + expectedEntity := cloudwatch.Entity{ + KeyAttributes: map[string]*string{ + entityattributes.EntityType: aws.String(entityattributes.Service), + entityattributes.ServiceName: aws.String("my-service"), + entityattributes.DeploymentEnvironment: aws.String("my-environment"), + }, + Attributes: map[string]*string{ + entityattributes.Node: aws.String("my-node"), + entityattributes.K8sCluster: aws.String("my-cluster"), + entityattributes.NamespaceField: aws.String("my-namespace"), + entityattributes.Workload: aws.String("my-workload"), + entityattributes.Platform: aws.String("K8s"), + }, + } + entity := fetchEntityFields(resourceMetrics.Resource().Attributes()) + assert.Equal(t, 0, resourceMetrics.Resource().Attributes().Len()) + assert.Equal(t, expectedEntity, entity) +} + +func TestFetchEntityFieldsOnEc2(t *testing.T) { + resourceMetrics := pmetric.NewResourceMetrics() + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityType, "Service") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityDeploymentEnvironment, "my-environment") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityServiceName, "my-service") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "AWS::EC2") + assert.Equal(t, 4, resourceMetrics.Resource().Attributes().Len()) + + expectedEntity := cloudwatch.Entity{ + KeyAttributes: map[string]*string{ + entityattributes.EntityType: aws.String(entityattributes.Service), + entityattributes.ServiceName: aws.String("my-service"), + entityattributes.DeploymentEnvironment: aws.String("my-environment"), + }, + Attributes: map[string]*string{ + entityattributes.Platform: aws.String("AWS::EC2"), + }, + } + entity := fetchEntityFields(resourceMetrics.Resource().Attributes()) + assert.Equal(t, 0, resourceMetrics.Resource().Attributes().Len()) + assert.Equal(t, expectedEntity, entity) } func TestInvalidMetric(t *testing.T) { diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index f0a8c560d6..2c1036f1e8 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -233,9 +233,7 @@ func (p *pusher) send() { LogStreamName: &p.Stream, SequenceToken: p.sequenceToken, } - if p.region == "us-east-1" { - input.Entity = entity - } + input.Entity = entity startTime := time.Now() diff --git a/plugins/processors/awsentity/entityattributes/entityattributes.go b/plugins/processors/awsentity/entityattributes/entityattributes.go index c95b44d75d..9638af83a5 100644 --- a/plugins/processors/awsentity/entityattributes/entityattributes.go +++ b/plugins/processors/awsentity/entityattributes/entityattributes.go @@ -3,11 +3,6 @@ package entityattributes -import ( - "github.com/aws/amazon-cloudwatch-agent/translator/config" - "github.com/aws/amazon-cloudwatch-agent/translator/context" -) - const ( // The following are the possible values for EntityType config options @@ -90,27 +85,17 @@ func GetKeyAttributeEntityShortNameMap() map[string]string { } // Cluster attribute prefix could be either EKS or K8s. We set the field once at runtime. -func GetAttributeEntityShortNameMap() map[string]string { +func GetAttributeEntityShortNameMap(platformType string) map[string]string { if _, ok := attributeEntityToShortNameMap[AttributeEntityCluster]; !ok { - attributeEntityToShortNameMap[AttributeEntityCluster] = clusterType() + attributeEntityToShortNameMap[AttributeEntityCluster] = clusterType(platformType) } return attributeEntityToShortNameMap } -// Container Insights attributes used for scraping EKS related information -const ( - NodeName = "NodeName" - Namespace = "Namespace" - // PodName in Container Insights is the workload(Deployment, Daemonset, etc) name - PodName = "PodName" -) - -func clusterType() string { - ctx := context.CurrentContext() - mode := ctx.KubernetesMode() - if mode == config.ModeEKS { +func clusterType(platformType string) string { + if platformType == AttributeEntityEKSPlatform { return EksCluster - } else if mode == config.ModeK8sEC2 || mode == config.ModeK8sOnPrem { + } else if platformType == AttributeEntityK8sPlatform { return K8sCluster } return "" diff --git a/plugins/processors/awsentity/processor.go b/plugins/processors/awsentity/processor.go index 526230077b..7492050cf7 100644 --- a/plugins/processors/awsentity/processor.go +++ b/plugins/processors/awsentity/processor.go @@ -113,13 +113,13 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric switch p.config.EntityType { case entityattributes.Resource: if p.config.Platform == config.ModeEC2 { - ec2Info := getEC2InfoFromEntityStore() - if ec2Info.InstanceID != EMPTY { + ec2Info = getEC2InfoFromEntityStore() + if ec2Info.GetInstanceID() != EMPTY { resourceAttrs.PutStr(entityattributes.AttributeEntityType, entityattributes.AttributeEntityAWSResource) resourceAttrs.PutStr(entityattributes.AttributeEntityResourceType, entityattributes.AttributeEntityEC2InstanceResource) - resourceAttrs.PutStr(entityattributes.AttributeEntityIdentifier, ec2Info.InstanceID) + resourceAttrs.PutStr(entityattributes.AttributeEntityIdentifier, ec2Info.GetInstanceID()) } - AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.AccountID) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.GetAccountID()) } case entityattributes.Service: if logGroupNamesAttr, ok := resourceAttrs.Get(attributeAwsLogGroupNames); ok { @@ -160,7 +160,7 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric podInfo, ok := p.k8sscraper.(*k8sattributescraper.K8sAttributeScraper) // Perform fallback mechanism for service and environment name if they // are empty - if entityServiceName == EMPTY && podInfo.Workload != EMPTY { + if entityServiceName == EMPTY && ok && podInfo != nil && podInfo.Workload != EMPTY { entityServiceName = podInfo.Workload entityServiceNameSource = entitystore.ServiceNameSourceK8sWorkload } @@ -186,7 +186,7 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric Namespace: podInfo.Namespace, Workload: podInfo.Workload, Node: podInfo.Node, - InstanceId: ec2Info.InstanceID, + InstanceId: ec2Info.GetInstanceID(), ServiceNameSource: entityServiceNameSource, } AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityType, entityattributes.Service) @@ -199,8 +199,8 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric resourceAttrs.PutStr(entityattributes.AttributeEntityNamespace, eksAttributes.Namespace) resourceAttrs.PutStr(entityattributes.AttributeEntityWorkload, eksAttributes.Workload) resourceAttrs.PutStr(entityattributes.AttributeEntityNode, eksAttributes.Node) - AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityInstanceID, ec2Info.InstanceID) - AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.AccountID) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityInstanceID, ec2Info.GetInstanceID()) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.GetAccountID()) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceNameSource, entityServiceNameSource) } p.k8sscraper.Reset() @@ -220,8 +220,8 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric ec2Info = getEC2InfoFromEntityStore() if entityEnvironmentName == EMPTY { - if ec2Info.AutoScalingGroup != EMPTY { - entityEnvironmentName = entityattributes.DeploymentEnvironmentFallbackPrefix + ec2Info.AutoScalingGroup + if ec2Info.GetAutoScalingGroup() != EMPTY { + entityEnvironmentName = entityattributes.DeploymentEnvironmentFallbackPrefix + ec2Info.GetAutoScalingGroup() } else { entityEnvironmentName = entityattributes.DeploymentEnvironmentDefault } @@ -230,11 +230,11 @@ func (p *awsEntityProcessor) processMetrics(_ context.Context, md pmetric.Metric AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityType, entityattributes.Service) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityServiceName, entityServiceName) AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityDeploymentEnvironment, entityEnvironmentName) - AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.AccountID) + AddAttributeIfNonEmpty(resourceAttrs, entityattributes.AttributeEntityAwsAccountId, ec2Info.GetAccountID()) ec2Attributes := EC2ServiceAttributes{ - InstanceId: ec2Info.InstanceID, - AutoScalingGroup: ec2Info.AutoScalingGroup, + InstanceId: ec2Info.GetInstanceID(), + AutoScalingGroup: ec2Info.GetAutoScalingGroup(), ServiceNameSource: entityServiceNameSource, } if err := validate.Struct(ec2Attributes); err == nil { diff --git a/test/README.md b/test/README.md deleted file mode 100644 index 8676abc5db..0000000000 --- a/test/README.md +++ /dev/null @@ -1,11 +0,0 @@ -## Private Integration Tests -The `test` module is meant to serve as a place for integration tests that cannot be placed in the external `amazon-cloudwatch-agent-test` repo. -These follow the pattern established by the external test repo and import dependencies from it to reuse as much as possible. Therefore, there are -a few requirements that are needed before running the tests. - -### Base Requirements -- GoLang 1.22+ -- A built and installed version of the agent from this repo - -### Compass -The compass integration tests. Verifies that PutLogEvents calls are attached with entities by the agent. \ No newline at end of file diff --git a/test/compass/compass_test.go b/test/compass/compass_test.go deleted file mode 100644 index 6343ef05e7..0000000000 --- a/test/compass/compass_test.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: MIT - -package compass - -import ( - "context" - "errors" - "fmt" - "log" - "os" - "path/filepath" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" - cwlTypes "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" - "github.com/aws/aws-sdk-go-v2/service/ec2" - ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - - "github.com/aws/amazon-cloudwatch-agent-test/environment" - "github.com/aws/amazon-cloudwatch-agent-test/util/awsservice" - "github.com/aws/amazon-cloudwatch-agent-test/util/common" -) - -const ( - configOutputPath = "/opt/aws/amazon-cloudwatch-agent/bin/config.json" - logLineId1 = "foo" - logLineId2 = "bar" - logFilePath = "/tmp/cwagent_log_test.log" - sleepForFlush = 60 * time.Second - retryWaitTime = 30 * time.Second - cwlPerfEndpoint = "https://logs-perf.us-east-1.amazonaws.com" - iadRegionalCode = "us-east-1" - - entityType = "@entity.KeyAttributes.Type" - entityName = "@entity.KeyAttributes.Name" - entityEnvironment = "@entity.KeyAttributes.Environment" - - entityPlatform = "@entity.Attributes.PlatformType" - entityInstanceId = "@entity.Attributes.EC2.InstanceId" -) - -var ( - logLineIds = []string{logLineId1, logLineId2} - rnf *cwlTypes.ResourceNotFoundException - cwlClient *cloudwatchlogs.Client - ec2Client *ec2.Client -) - -type expectedEntity struct { - entityType string - name string - environment string - platformType string - instanceId string -} - -func init() { - environment.RegisterEnvironmentMetaDataFlags() - awsCfg, err := config.LoadDefaultConfig( - context.Background(), - config.WithRegion(iadRegionalCode), - ) - if err != nil { - // handle error - fmt.Println("There was an error trying to load default config: ", err) - return - } - - cwlClient = cloudwatchlogs.NewFromConfig(awsCfg, func(o *cloudwatchlogs.Options) { - o.BaseEndpoint = aws.String(cwlPerfEndpoint) - }) - ec2Client = ec2.NewFromConfig(awsCfg) - -} - -// TestWriteLogsToCloudWatch writes N number of logs, and then validates that the -// log events are associated with entities from CloudWatch Logs -func TestWriteLogsToCloudWatch(t *testing.T) { - // this uses the {instance_id} placeholder in the agent configuration, - // so we need to determine the host's instance ID for validation - instanceId := awsservice.GetInstanceId() - log.Printf("Found instance id %s", instanceId) - - defer awsservice.DeleteLogGroupAndStream(instanceId, instanceId) - - testCases := map[string]struct { - agentConfigPath string - iterations int - useEC2Tag bool - expectedEntity expectedEntity - }{ - "Compass/IAMRole": { - agentConfigPath: filepath.Join("resources", "compass_default_log.json"), - iterations: 1000, - expectedEntity: expectedEntity{ - entityType: "Service", - name: "cwa-e2e-iam-instance-profile", - environment: "ec2:default", - platformType: "AWS::EC2", - instanceId: instanceId, - }, - }, - "Compass/EC2Tags": { - agentConfigPath: filepath.Join("resources", "compass_default_log.json"), - iterations: 1000, - useEC2Tag: true, - expectedEntity: expectedEntity{ - entityType: "Service", - name: "compass-service-test", - environment: "ec2:default", - platformType: "AWS::EC2", - instanceId: instanceId, - }, - }, - "Compass/ServiceInConfig": { - agentConfigPath: filepath.Join("resources", "compass_service_in_config.json"), - iterations: 1000, - expectedEntity: expectedEntity{ - entityType: "Service", - name: "compass-service", - environment: "compass-environment", - platformType: "AWS::EC2", - instanceId: instanceId, - }, - }, - } - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - if testCase.useEC2Tag { - input := &ec2.CreateTagsInput{ - Resources: []string{instanceId}, - Tags: []ec2Types.Tag{ - { - Key: aws.String("service"), - Value: aws.String("compass-service-test"), - }, - }, - } - _, err := ec2Client.CreateTags(context.TODO(), input) - assert.NoError(t, err) - } - id := uuid.New() - f, err := os.Create(logFilePath + "-" + id.String()) - if err != nil { - t.Fatalf("Error occurred creating log file for writing: %v", err) - } - common.DeleteFile(common.AgentLogFile) - common.TouchFile(common.AgentLogFile) - - common.CopyFile(testCase.agentConfigPath, configOutputPath) - - common.StartAgent(configOutputPath, true, false) - - // ensure that there is enough time from the "start" time and the first log line, - // so we don't miss it in the GetLogEvents call - writeLogLines(t, f, testCase.iterations) - time.Sleep(sleepForFlush) - common.StopAgent() - end := time.Now() - - // check CWL to ensure we got the expected entities in the log group - ValidateEntity(t, instanceId, instanceId, &end, testCase.expectedEntity) - - f.Close() - os.Remove(logFilePath + "-" + id.String()) - }) - } -} - -func writeLogLines(t *testing.T, f *os.File, iterations int) { - log.Printf("Writing %d lines to %s", iterations*len(logLineIds), f.Name()) - - for i := 0; i < iterations; i++ { - ts := time.Now() - for _, id := range logLineIds { - _, err := f.WriteString(fmt.Sprintf("%s - [%s] #%d This is a log line.\n", ts.Format(time.StampMilli), id, i)) - if err != nil { - // don't need to fatal error here. if a log line doesn't get written, the count - // when validating the log stream should be incorrect and fail there. - t.Logf("Error occurred writing log line: %v", err) - } - } - time.Sleep(30 * time.Millisecond) - } -} - -// ValidateLogs queries a given LogGroup/LogStream combination given the start and end times, and executes an -// arbitrary validator function on the found logs. -func ValidateEntity(t *testing.T, logGroup, logStream string, end *time.Time, expectedEntity expectedEntity) { - log.Printf("Checking log group/stream: %s/%s", logGroup, logStream) - - logGroupInfo, err := getLogGroup() - for _, lg := range logGroupInfo { - if *lg.LogGroupName == logGroup { - log.Println("Log group " + *lg.LogGroupName + " exists") - break - } - } - assert.NoError(t, err) - begin := end.Add(-sleepForFlush * 2) - log.Printf("Start time is " + begin.String() + " and end time is " + end.String()) - queryId, err := getLogQueryId(logGroup, &begin, end) - assert.NoError(t, err) - log.Printf("queryId is " + *queryId) - result, err := getQueryResult(queryId) - assert.NoError(t, err) - if !assert.NotZero(t, len(result)) { - return - } - requiredEntityFields := map[string]bool{ - entityType: false, - entityName: false, - entityEnvironment: false, - entityPlatform: false, - entityInstanceId: false, - } - for _, field := range result[0] { - switch aws.ToString(field.Field) { - case entityType: - requiredEntityFields[entityType] = true - assert.Equal(t, expectedEntity.entityType, aws.ToString(field.Value)) - case entityName: - requiredEntityFields[entityName] = true - assert.Equal(t, expectedEntity.name, aws.ToString(field.Value)) - case entityEnvironment: - requiredEntityFields[entityEnvironment] = true - assert.Equal(t, expectedEntity.environment, aws.ToString(field.Value)) - case entityPlatform: - requiredEntityFields[entityPlatform] = true - assert.Equal(t, expectedEntity.platformType, aws.ToString(field.Value)) - case entityInstanceId: - requiredEntityFields[entityInstanceId] = true - assert.Equal(t, expectedEntity.instanceId, aws.ToString(field.Value)) - - } - fmt.Printf("%s: %s\n", aws.ToString(field.Field), aws.ToString(field.Value)) - } - allEntityFieldsFound := true - for _, value := range requiredEntityFields { - if !value { - allEntityFieldsFound = false - } - } - assert.True(t, allEntityFieldsFound) -} - -func getLogQueryId(logGroup string, since, until *time.Time) (*string, error) { - var queryId *string - params := &cloudwatchlogs.StartQueryInput{ - QueryString: aws.String("fields @message, @entity.KeyAttributes.Type, @entity.KeyAttributes.Name, @entity.KeyAttributes.Environment, @entity.Attributes.PlatformType, @entity.Attributes.EC2.InstanceId"), - LogGroupName: aws.String(logGroup), - } - if since != nil { - params.StartTime = aws.Int64(since.UnixMilli()) - } - if until != nil { - params.EndTime = aws.Int64(until.UnixMilli()) - } - attempts := 0 - - for { - output, err := cwlClient.StartQuery(context.Background(), params) - attempts += 1 - - if err != nil { - if errors.As(err, &rnf) && attempts <= awsservice.StandardRetries { - // The log group/stream hasn't been created yet, so wait and retry - time.Sleep(retryWaitTime) - continue - } - - // if the error is not a ResourceNotFoundException, we should fail here. - return queryId, err - } - queryId = output.QueryId - return queryId, err - } -} - -func getQueryResult(queryId *string) ([][]cwlTypes.ResultField, error) { - attempts := 0 - var results [][]cwlTypes.ResultField - params := &cloudwatchlogs.GetQueryResultsInput{ - QueryId: aws.String(*queryId), - } - for { - if attempts > awsservice.StandardRetries { - return results, errors.New("exceeded retry count") - } - result, err := cwlClient.GetQueryResults(context.Background(), params) - log.Printf("GetQueryResult status is: %v", result.Status) - attempts += 1 - if result.Status != cwlTypes.QueryStatusComplete { - log.Printf("GetQueryResult: sleeping for 5 seconds until status is complete") - time.Sleep(5 * time.Second) - continue - } - log.Printf("GetQueryResult: result length is %d", len(result.Results)) - if err != nil { - if errors.As(err, &rnf) { - // The log group/stream hasn't been created yet, so wait and retry - time.Sleep(retryWaitTime) - continue - } - - // if the error is not a ResourceNotFoundException, we should fail here. - return results, err - } - results = result.Results - return results, err - } -} - -func getLogGroup() ([]cwlTypes.LogGroup, error) { - attempts := 0 - var logGroups []cwlTypes.LogGroup - params := &cloudwatchlogs.DescribeLogGroupsInput{} - for { - output, err := cwlClient.DescribeLogGroups(context.Background(), params) - - attempts += 1 - - if err != nil { - if errors.As(err, &rnf) && attempts <= awsservice.StandardRetries { - // The log group/stream hasn't been created yet, so wait and retry - time.Sleep(retryWaitTime) - continue - } - - // if the error is not a ResourceNotFoundException, we should fail here. - return logGroups, err - } - logGroups = output.LogGroups - return logGroups, err - } -} diff --git a/test/compass/resources/compass_default_log.json b/test/compass/resources/compass_default_log.json deleted file mode 100644 index a4b3c40c35..0000000000 --- a/test/compass/resources/compass_default_log.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "agent": { - "run_as_user": "root", - "debug": true - }, - "logs": { - "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", - "force_flush_interval": 1, - "logs_collected": { - "files": { - "collect_list": [ - { - "file_path": "/tmp/cwagent_log_test.log*", - "log_group_name": "{instance_id}", - "log_stream_name": "{instance_id}", - "timezone": "UTC" - } - ] - } - } - } -} diff --git a/test/compass/resources/compass_service_in_config.json b/test/compass/resources/compass_service_in_config.json deleted file mode 100644 index e66a91f9a3..0000000000 --- a/test/compass/resources/compass_service_in_config.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "agent": { - "run_as_user": "root", - "debug": true - }, - "logs": { - "endpoint_override": "https://logs-perf.us-east-1.amazonaws.com", - "logs_collected": { - "files": { - "collect_list": [ - { - "file_path": "/tmp/cwagent_log_test.log*", - "log_group_name": "{instance_id}", - "log_stream_name": "{instance_id}", - "timezone": "UTC", - "service.name": "compass-service", - "deployment.environment": "compass-environment" - } - ] - } - } - } -} diff --git a/test/go.mod b/test/go.mod deleted file mode 100644 index fc4978f595..0000000000 --- a/test/go.mod +++ /dev/null @@ -1,58 +0,0 @@ -module github.com/aws/private-amazon-cloudwatch-agent-staging/test - -go 1.22.4 - -require ( - github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc - github.com/aws/aws-sdk-go-v2 v1.23.5 - github.com/aws/aws-sdk-go-v2/config v1.25.11 - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2 - github.com/stretchr/testify v1.8.4 -) - -require ( - collectd.org v0.5.0 // indirect - github.com/DataDog/datadog-go v4.8.3+incompatible // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 // indirect - github.com/aws/aws-sdk-go-v2/service/xray v1.23.2 // indirect - github.com/aws/smithy-go v1.18.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/google/uuid v1.4.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prozz/aws-embedded-metrics-golang v1.2.0 // indirect - github.com/qri-io/jsonpointer v0.1.1 // indirect - github.com/qri-io/jsonschema v0.2.1 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/tools v0.16.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/test/go.sum b/test/go.sum deleted file mode 100644 index fa1e4997a1..0000000000 --- a/test/go.sum +++ /dev/null @@ -1,119 +0,0 @@ -collectd.org v0.5.0 h1:mRTLdljvxJNXPMMO9RSxf0PANDAqu/Tz+I6Dt6OjB28= -collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= -github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc h1:oC0cgVlspqNbwRKk9Zk9zweYKZcjnW48Hwp0isLh1Co= -github.com/aws/amazon-cloudwatch-agent-test v0.0.0-20240613210401-2cd967b759dc/go.mod h1:E/w/idAjJTY+laomuWIO8wCE8Rtq3hSA2sVeNeV+YGA= -github.com/aws/aws-sdk-go-v2 v1.23.5 h1:xK6C4udTyDMd82RFvNkDQxtAd00xlzFUtX4fF2nMZyg= -github.com/aws/aws-sdk-go-v2 v1.23.5/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 h1:Zx9+31KyB8wQna6SXFWOewlgoY5uGdDAu6PTOEU3OQI= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3/go.mod h1:zxbEJhRdKTH1nqS2qu6UJ7zGe25xaHxZXaC2CvuQFnA= -github.com/aws/aws-sdk-go-v2/config v1.25.11 h1:RWzp7jhPRliIcACefGkKp03L0Yofmd2p8M25kbiyvno= -github.com/aws/aws-sdk-go-v2/config v1.25.11/go.mod h1:BVUs0chMdygHsQtvaMyEOpW2GIW+ubrxJLgIz/JU29s= -github.com/aws/aws-sdk-go-v2/credentials v1.16.9 h1:LQo3MUIOzod9JdUK+wxmSdgzLVYUbII3jXn3S/HJZU0= -github.com/aws/aws-sdk-go-v2/credentials v1.16.9/go.mod h1:R7mDuIJoCjH6TxGUc/cylE7Lp/o0bhKVoxdBThsjqCM= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9 h1:/KXnrU9g/RzJwJKuZ7G635w9segJCpg9OIwkjPYZs7g= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.12.9/go.mod h1:i6u5850nH0SFslKYMUVLW8Uc+JgEdpx4XHNA7T1S2C0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 h1:FZVFahMyZle6WcogZCOxo6D/lkDA2lqKIn4/ueUmVXw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9/go.mod h1:kjq7REMIkxdtcEC9/4BVXjOsNY5isz6jQbEgk6osRTU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4 h1:TUCNKBd4/JEefsZDxo5deRmrRRPZHqGyBYiUAeBKOWU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.4/go.mod h1:egDkcl+zsgFqS6VO142bKboip5Pe1sNMwN55Xy38QsM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 h1:8GVZIR0y6JRIUNSYI1xAMF4HDfV8H/bOsZ/8AD/uY5Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8/go.mod h1:rwBfu0SoUkBUZndVgPZKAD9Y2JigaZtRP68unRiYToQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 h1:ZE2ds/qeBkhk3yqYvS3CDCFNvd9ir5hMjlVStLZWrvM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8/go.mod h1:/lAPPymDYL023+TS6DJmjuL42nxix2AvEvfjqOBRODk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8 h1:abKT+RuM1sdCNZIGIfZpLkvxEX3Rpsto019XG/rkYG8= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.8/go.mod h1:Owc4ysUE71JSruVTTa3h4f2pp3E4hlcAtmeNXxDmjj4= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0 h1:Eub6qmSRH5ahS1zhVLa1i1qT3raC9Sxrn2kgtG19J3I= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.42.0/go.mod h1:ehWDbgXo5Zy6eLjP+xX+Vf8wXaSyLGeRf6KlvoVAaXk= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2 h1:HWB+RXvOQQkhEp8QCpTlgullbCiysRQlo6ulVZRBBtM= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.31.2/go.mod h1:YHhAfr9Qd5xd0fLT2B7LxDFWbIZ6RbaI81Hu2ASCiTY= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2 h1:pq1AgSc6YRDkT3/iuXgPUPL0ArmdEmjPoAl0YEJZ4d4= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.2/go.mod h1:ZGxc+lOwUVsyeKrneIf8/hhowNgyqvCcwmLU/Hrscbk= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3 h1:Ytz7+VR04GK7wF1C+yQScMZ4Q01xeL4EbQ4kOQ8HY1c= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.3/go.mod h1:qqiIi0EbEEovHG/nQXYGAXcVvHPaUg7KMwh3VARzQz4= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2 h1:/zmckWK6/SL9MTnCD8p2vOEmOT+LFQtXeoo/bTRBa3c= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.18.2/go.mod h1:Wkk+2ZcFVCqnuf/yXjvSlySsoy5l2RSFfv/ikosEv3M= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2 h1:e3Imv1oXz+W3Tfclflkh72t5TUPUwWdkHP7ctQGk8Dc= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.138.2/go.mod h1:d1hAqgLDOPaSO1Piy/0bBmj6oAplFwv6p0cquHntNHM= -github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2 h1:yIr1T8uPhZT2cKCBeO39utfzG/RKJn3SxbuBOdj18Nc= -github.com/aws/aws-sdk-go-v2/service/ecs v1.35.2/go.mod h1:MvDz+yXfa2sSEfHB57rdf83deKJIeKEopqHFhVmaRlk= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 h1:e3PCNeEaev/ZF01cQyNZgmYE9oYYePIMJs2mWSKG514= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3/go.mod h1:gIeeNyaL8tIEqZrzAnTeyhHcE0yysCtcaP+N9kxLZ+E= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8 h1:xyfOAYV/ujzZOo01H9+OnyeiRKmTEp6EsITTsmq332Q= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.8/go.mod h1:coLeQEoKzW9ViTL2bn0YUlU7K0RYjivKudG74gtd+sI= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9 h1:Vn/qqsXxe3JEALfoU6ypVt86fb811wKqv4kdxvAUk/Q= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.9/go.mod h1:TQYzeHkuQrsz/AsxxK96CYJO4KRd4E6QozqktOR2h3w= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 h1:EamsKe+ZjkOQjDdHd86/JCEucjFKQ9T0atWKO4s2Lgs= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8/go.mod h1:Q0vV3/csTpbkfKLI5Sb56cJQTCTtJ0ixdb7P+Wedqiw= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8 h1:ip5ia3JOXl4OAsqeTdrOOmqKgoWiu+t9XSOnRzBwmRs= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.8/go.mod h1:kE+aERnK9VQIw1vrk7ElAvhCsgLNzGyCPNg2Qe4Eq4c= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2 h1:DLSAG8zpJV2pYsU+UPkj1IEZghyBnnUsvIRs6UuXSDU= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.2/go.mod h1:thjZng67jGsvMyVZnSxlcqKyLwB0XTG8bHIRZPTJ+Bs= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2 h1:lmdmYCvG1EJKGLEsUsYDNO6MwZyBZROrRg04Vrb5TwA= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.2/go.mod h1:pHJ1md/3F3WkYfZ4JKOllPfXQi4NiWk7NxbeOD53HQc= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 h1:xJPydhNm0Hiqct5TVKEuHG7weC0+sOs4MUnd7A5n5F4= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.2/go.mod h1:zxk6y1X2KXThESWMS5CrKRvISD8mbIMab6nZrCGxDG0= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 h1:8dU9zqA77C5egbU6yd4hFLaiIdPv3rU+6cp7sz5FjCU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2/go.mod h1:7Lt5mjQ8x5rVdKqg+sKKDeuwoszDJIIPmkd8BVsEdS0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 h1:fFrLsy08wEbAisqW3KDl/cPHrF43GmV79zXB9EwJiZw= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.2/go.mod h1:7Ld9eTqocTvJqqJ5K/orbSDwmGcpRdlDiLjz2DO+SL8= -github.com/aws/aws-sdk-go-v2/service/xray v1.23.2 h1:mFHM/R2FYnCkmUB52SqJncU5TWDCfI55uXlNTp96g3Y= -github.com/aws/aws-sdk-go-v2/service/xray v1.23.2/go.mod h1:zz5H6SRVFHj93yt3lxA8Ql63c/pY90YjNvvalulrCTk= -github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= -github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/kinbiko/jsonassert v1.0.1 h1:8gdLmUaPWuxk2TzQSofKRqatFH6zwTF6AsUH4bugJYY= -github.com/kinbiko/jsonassert v1.0.1/go.mod h1:QRwBwiAsrcJpjw+L+Q4WS8psLxuUY+HylVZS/4j74TM= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prozz/aws-embedded-metrics-golang v1.2.0 h1:b/LFb8J9LbgANow/9nYZE3M3bkb457/dj0zAB3hPyvo= -github.com/prozz/aws-embedded-metrics-golang v1.2.0/go.mod h1:MXOqF9cJCEHjj77LWq7NWK44/AOyaFzwmcAYqR3057M= -github.com/qri-io/jsonpointer v0.1.1 h1:prVZBZLL6TW5vsSB9fFHFAMBLI4b0ri5vribQlTJiBA= -github.com/qri-io/jsonpointer v0.1.1/go.mod h1:DnJPaYgiKu56EuDp8TU5wFLdZIcAnb/uH9v37ZaMV64= -github.com/qri-io/jsonschema v0.2.1 h1:NNFoKms+kut6ABPf6xiKNM5214jzxAhDBrPHCJ97Wg0= -github.com/qri-io/jsonschema v0.2.1/go.mod h1:g7DPkiOsK1xv6T/Ao5scXRkd+yTFygcANPBaaqW+VrI= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= -golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 7d43ef9cc2feb063e42537faa5d3ca28ee410c94 Mon Sep 17 00:00:00 2001 From: Zhihong Lin Date: Tue, 29 Oct 2024 16:52:41 -0400 Subject: [PATCH 45/47] Fix AppSignal and infra metrics pipeline incorrectly scraping entity at datapoint level --- .../sampleConfig/advanced_config_darwin.yaml | 1 - .../sampleConfig/advanced_config_linux.yaml | 1 - .../sampleConfig/advanced_config_windows.yaml | 1 - .../sampleConfig/amp_config_linux.yaml | 1 - .../appsignals_and_eks_config.yaml | 4 ++-- .../appsignals_and_k8s_config.yaml | 4 ++-- .../appsignals_fallback_and_eks_config.yaml | 4 ++-- .../appsignals_over_fallback_config.yaml | 4 ++-- .../sampleConfig/basic_config_linux.yaml | 1 - .../sampleConfig/basic_config_windows.yaml | 1 - .../sampleConfig/collectd_config_linux.yaml | 4 ++-- .../sampleConfig/compass_linux_config.yaml | 4 ++-- .../sampleConfig/complete_darwin_config.yaml | 5 ++-- .../sampleConfig/complete_linux_config.yaml | 5 ++-- .../sampleConfig/complete_windows_config.yaml | 5 ++-- .../sampleConfig/delta_config_linux.yaml | 1 - .../sampleConfig/delta_net_config_linux.yaml | 1 - .../sampleConfig/drop_origin_linux.yaml | 1 - .../ignore_append_dimensions.yaml | 1 - .../sampleConfig/invalid_input_linux.yaml | 1 - .../sampleConfig/jmx_config_linux.yaml | 1 - .../sampleConfig/standard_config_linux.yaml | 1 - ...ndard_config_linux_with_common_config.yaml | 1 - .../sampleConfig/standard_config_windows.yaml | 1 - ...ard_config_windows_with_common_config.yaml | 1 - .../sampleConfig/statsd_config_linux.yaml | 4 ++-- .../sampleConfig/statsd_config_windows.yaml | 4 ++-- translator/translate/otel/common/common.go | 18 --------------- .../pipeline/applicationsignals/translator.go | 2 +- .../applicationsignals/translator_test.go | 6 ++--- .../otel/pipeline/host/translator.go | 4 ++-- .../otel/pipeline/host/translator_test.go | 2 +- .../otel/processor/awsentity/translator.go | 23 ++++++++++++------- 33 files changed, 44 insertions(+), 74 deletions(-) diff --git a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml index 185303f269..64d81aaa17 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_darwin.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml index c8bfcc520f..b202e49fd9 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_linux.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml index 30a234e297..0b7068462e 100644 --- a/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/advanced_config_windows.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/amp_config_linux.yaml b/translator/tocwconfig/sampleConfig/amp_config_linux.yaml index ab334e047d..d32905f7ca 100644 --- a/translator/tocwconfig/sampleConfig/amp_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/amp_config_linux.yaml @@ -81,7 +81,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 batch/host/amp: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml index 072c1dded6..06d6e36b8d 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_eks_config.yaml @@ -305,7 +305,7 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity/service: + awsentity/service/application_signals: cluster_name: TestCluster entity_type: Service kubernetes_mode: EKS @@ -682,7 +682,7 @@ service: exporters: - awsemf/application_signals processors: - - awsentity/service + - awsentity/service/application_signals - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml index f72551debe..6bf6b6a7d9 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_and_k8s_config.yaml @@ -306,7 +306,7 @@ processors: resolvers: - name: TestCluster platform: k8s - awsentity/service: + awsentity/service/application_signals: cluster_name: TestCluster entity_type: Service kubernetes_mode: K8sEC2 @@ -663,7 +663,7 @@ service: exporters: - awsemf/application_signals processors: - - awsentity/service + - awsentity/service/application_signals - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml index 5e4598ecdf..ed0f4d5b50 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_fallback_and_eks_config.yaml @@ -305,7 +305,7 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity/service: + awsentity/service/application_signals: cluster_name: TestCluster entity_type: Service kubernetes_mode: EKS @@ -682,7 +682,7 @@ service: exporters: - awsemf/application_signals processors: - - awsentity/service + - awsentity/service/application_signals - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml index 43b3268f7b..f2e00bc52b 100644 --- a/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml +++ b/translator/tocwconfig/sampleConfig/appsignals_over_fallback_config.yaml @@ -305,7 +305,7 @@ processors: resolvers: - name: TestCluster platform: eks - awsentity/service: + awsentity/service/application_signals: cluster_name: TestCluster entity_type: Service kubernetes_mode: EKS @@ -682,7 +682,7 @@ service: exporters: - awsemf/application_signals processors: - - awsentity/service + - awsentity/service/application_signals - resourcedetection - awsapplicationsignals receivers: diff --git a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml index 046ae18f0b..9a349c682e 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_linux.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml index af56afd903..31c7c5229b 100644 --- a/translator/tocwconfig/sampleConfig/basic_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/basic_config_windows.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml index b9506b7381..a1ecf22c1e 100644 --- a/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/collectd_config_linux.yaml @@ -21,7 +21,7 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity/service: + awsentity/service/telegraf: entity_type: Service scrape_datapoint_attribute: true platform: ec2 @@ -39,7 +39,7 @@ service: exporters: - awscloudwatch processors: - - awsentity/service + - awsentity/service/telegraf receivers: - telegraf_socket_listener telemetry: diff --git a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml index 95bab71eeb..ab934cba89 100644 --- a/translator/tocwconfig/sampleConfig/compass_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/compass_linux_config.yaml @@ -32,7 +32,7 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity/service: + awsentity/service/telegraf: entity_type: Service scrape_datapoint_attribute: true platform: ec2 @@ -63,7 +63,7 @@ service: exporters: - awscloudwatch processors: - - awsentity/service + - awsentity/service/telegraf - ec2tagger receivers: - telegraf_socket_listener diff --git a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml index a50514557a..ec2a2d8994 100644 --- a/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_darwin_config.yaml @@ -97,9 +97,8 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 - awsentity/service: + awsentity/service/telegraf: entity_type: Service scrape_datapoint_attribute: true platform: ec2 @@ -287,7 +286,7 @@ service: exporters: - awscloudwatch processors: - - awsentity/service + - awsentity/service/telegraf - ec2tagger - transform receivers: diff --git a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml index 3448a34d8b..e54d859883 100644 --- a/translator/tocwconfig/sampleConfig/complete_linux_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_linux_config.yaml @@ -102,9 +102,8 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 - awsentity/service: + awsentity/service/telegraf: entity_type: Service scrape_datapoint_attribute: true platform: ec2 @@ -393,7 +392,7 @@ service: exporters: - awscloudwatch processors: - - awsentity/service + - awsentity/service/telegraf - ec2tagger - transform receivers: diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml index 918a9f5d70..d41982d0cb 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.yaml +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.yaml @@ -97,9 +97,8 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 - awsentity/service: + awsentity/service/telegraf: entity_type: Service scrape_datapoint_attribute: true platform: ec2 @@ -275,7 +274,7 @@ service: exporters: - awscloudwatch processors: - - awsentity/service + - awsentity/service/telegraf - ec2tagger - transform receivers: diff --git a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml index 63eecf2382..6a27ffc90d 100644 --- a/translator/tocwconfig/sampleConfig/delta_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_config_linux.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml index d7eeffb216..f6601c168d 100644 --- a/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/delta_net_config_linux.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml index 1a73d782ca..a458852b8e 100644 --- a/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml +++ b/translator/tocwconfig/sampleConfig/drop_origin_linux.yaml @@ -28,7 +28,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml index b484ce567a..d38f02f2cd 100644 --- a/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml +++ b/translator/tocwconfig/sampleConfig/ignore_append_dimensions.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 ec2tagger: imds_retries: 1 diff --git a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml index 3387b901d5..373ba8e154 100644 --- a/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml +++ b/translator/tocwconfig/sampleConfig/invalid_input_linux.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml index 30c711d562..2bd2541d3e 100644 --- a/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/jmx_config_linux.yaml @@ -75,7 +75,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 batch/host/amp: metadata_cardinality_limit: 1000 diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml index 616d7589dc..99724c7d28 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml index ff82b2f699..13bac887c2 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_linux_with_common_config.yaml @@ -27,7 +27,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 cumulativetodelta/hostDeltaMetrics: exclude: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml index 682831e467..8df619d970 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows.yaml @@ -23,7 +23,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml index fd788a5c17..2914d1bb74 100644 --- a/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml +++ b/translator/tocwconfig/sampleConfig/standard_config_windows_with_common_config.yaml @@ -27,7 +27,6 @@ extensions: processors: awsentity/resource: entity_type: Resource - scrape_datapoint_attribute: true platform: ec2 ec2tagger: ec2_instance_tag_keys: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml index 88980baa0d..0034301d02 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_linux.yaml @@ -21,7 +21,7 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity/service: + awsentity/service/telegraf: entity_type: Service scrape_datapoint_attribute: true platform: ec2 @@ -39,7 +39,7 @@ service: exporters: - awscloudwatch processors: - - awsentity/service + - awsentity/service/telegraf receivers: - telegraf_statsd telemetry: diff --git a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml index 906f053a31..6aeeae4dd6 100644 --- a/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml +++ b/translator/tocwconfig/sampleConfig/statsd_config_windows.yaml @@ -21,7 +21,7 @@ extensions: mode: ec2 region: us-west-2 processors: - awsentity/service: + awsentity/service/telegraf: entity_type: Service scrape_datapoint_attribute: true platform: ec2 @@ -39,7 +39,7 @@ service: exporters: - awscloudwatch processors: - - awsentity/service + - awsentity/service/telegraf receivers: - telegraf_statsd telemetry: diff --git a/translator/translate/otel/common/common.go b/translator/translate/otel/common/common.go index fac3c819b7..f7637150d2 100644 --- a/translator/translate/otel/common/common.go +++ b/translator/translate/otel/common/common.go @@ -14,8 +14,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" "gopkg.in/yaml.v3" - - "github.com/aws/amazon-cloudwatch-agent/internal/util/collections" ) const ( @@ -136,12 +134,6 @@ var ( AgentDebugConfigKey = ConfigKey(AgentKey, DebugKey) MetricsAggregationDimensionsKey = ConfigKey(MetricsKey, AggregationDimensionsKey) - - TelegrafPlugins = collections.NewSet[string](CollectDMetricKey, CPUMetricKey, DiskMetricKey, DiskIoMetricKey, - StatsDMetricKey, SwapMetricKey, MemMetricKey, NetMetricKey, NetStatMetricKey, ProcessMetricKey, ProcStatMetricKey, - //Windows Plugins - MemMetricKeyWindows, LogicalDiskMetricKeyWindows, NetworkMetricKeyWindows, PagingMetricKeyWindows, PhysicalDiskMetricKeyWindows, - ProcessorMetricKeyWindows, SystemMetricKeyWindows, TCPv4MetricKeyWindows, TCPv6MetricKeyWindows) ) // Translator is used to translate the JSON config into an @@ -455,13 +447,3 @@ func IsAnySet(conf *confmap.Conf, keys []string) bool { } return false } - -// TelegrafMetricsEnabled checks if any telegraf plugin is present in the configuration. -func TelegrafMetricsEnabled(conf *confmap.Conf) bool { - for plugin := range TelegrafPlugins { - if conf.IsSet(ConfigKey(MetricsKey, MetricsCollectedKey, plugin)) { - return true - } - } - return false -} diff --git a/translator/translate/otel/pipeline/applicationsignals/translator.go b/translator/translate/otel/pipeline/applicationsignals/translator.go index 61bfb47964..bc20a570c8 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator.go @@ -56,7 +56,7 @@ func (t *translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators mode := context.CurrentContext().KubernetesMode() if t.dataType == component.DataTypeMetrics && mode != "" { - translators.Processors.Set(awsentity.NewTranslatorWithEntityType(awsentity.Service)) + translators.Processors.Set(awsentity.NewTranslatorWithEntityType(awsentity.Service, common.AppSignals, false)) } translators.Processors.Set(resourcedetection.NewTranslator(resourcedetection.WithDataType(t.dataType))) translators.Processors.Set(awsapplicationsignals.NewTranslator(awsapplicationsignals.WithDataType(t.dataType))) diff --git a/translator/translate/otel/pipeline/applicationsignals/translator_test.go b/translator/translate/otel/pipeline/applicationsignals/translator_test.go index da566e57bd..9572dfd1d2 100644 --- a/translator/translate/otel/pipeline/applicationsignals/translator_test.go +++ b/translator/translate/otel/pipeline/applicationsignals/translator_test.go @@ -126,7 +126,7 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"awsentity/service", "resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity/service/application_signals", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, @@ -147,7 +147,7 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"awsentity/service", "resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity/service/application_signals", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"debug/application_signals", "awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, @@ -165,7 +165,7 @@ func TestTranslatorMetricsForKubernetes(t *testing.T) { }, want: &want{ receivers: []string{"otlp/application_signals"}, - processors: []string{"awsentity/service", "resourcedetection", "awsapplicationsignals"}, + processors: []string{"awsentity/service/application_signals", "resourcedetection", "awsapplicationsignals"}, exporters: []string{"awsemf/application_signals"}, extensions: []string{"agenthealth/logs"}, }, diff --git a/translator/translate/otel/pipeline/host/translator.go b/translator/translate/otel/pipeline/host/translator.go index 3e6b8795f9..641ed43e93 100644 --- a/translator/translate/otel/pipeline/host/translator.go +++ b/translator/translate/otel/pipeline/host/translator.go @@ -66,9 +66,9 @@ func (t translator) Translate(conf *confmap.Conf) (*common.ComponentTranslators, if strings.HasPrefix(t.name, common.PipelineNameHostOtlpMetrics) { entityProcessor = nil } else if strings.HasPrefix(t.name, common.PipelineNameHostCustomMetrics) { - entityProcessor = awsentity.NewTranslatorWithEntityType(awsentity.Service) + entityProcessor = awsentity.NewTranslatorWithEntityType(awsentity.Service, "telegraf", true) } else if strings.HasPrefix(t.name, common.PipelineNameHost) || strings.HasPrefix(t.name, common.PipelineNameHostDeltaMetrics) { - entityProcessor = awsentity.NewTranslatorWithEntityType(awsentity.Resource) + entityProcessor = awsentity.NewTranslatorWithEntityType(awsentity.Resource, "", false) } translators := common.ComponentTranslators{ diff --git a/translator/translate/otel/pipeline/host/translator_test.go b/translator/translate/otel/pipeline/host/translator_test.go index a3b855e1e4..85f7f762c2 100644 --- a/translator/translate/otel/pipeline/host/translator_test.go +++ b/translator/translate/otel/pipeline/host/translator_test.go @@ -93,7 +93,7 @@ func TestTranslator(t *testing.T) { want: &want{ pipelineID: "metrics/hostCustomMetrics", receivers: []string{"nop", "other"}, - processors: []string{"awsentity/service"}, + processors: []string{"awsentity/service/telegraf"}, exporters: []string{"awscloudwatch"}, extensions: []string{"agenthealth/metrics"}, }, diff --git a/translator/translate/otel/processor/awsentity/translator.go b/translator/translate/otel/processor/awsentity/translator.go index e057a65d17..ed81c702d0 100644 --- a/translator/translate/otel/processor/awsentity/translator.go +++ b/translator/translate/otel/processor/awsentity/translator.go @@ -24,9 +24,10 @@ const ( ) type translator struct { - factory processor.Factory - entityType string - name string + factory processor.Factory + entityType string + name string + scrapeDatapointAttribute bool } func NewTranslator() common.Translator[component.Config] { @@ -35,11 +36,17 @@ func NewTranslator() common.Translator[component.Config] { } } -func NewTranslatorWithEntityType(entityType string) common.Translator[component.Config] { +func NewTranslatorWithEntityType(entityType string, name string, scrapeDatapointAttribute bool) common.Translator[component.Config] { + pipelineName := strings.ToLower(entityType) + if name != "" { + pipelineName = pipelineName + "/" + name + } + return &translator{ - factory: awsentity.NewFactory(), - entityType: entityType, - name: strings.ToLower(entityType), + factory: awsentity.NewFactory(), + entityType: entityType, + name: pipelineName, + scrapeDatapointAttribute: scrapeDatapointAttribute, } } @@ -59,7 +66,7 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { cfg.EntityType = t.entityType } - if common.TelegrafMetricsEnabled(conf) { + if t.scrapeDatapointAttribute { cfg.ScrapeDatapointAttribute = true } From b3c03e7a21b0b1ee9fa8818b379f4b8da1b72e3f Mon Sep 17 00:00:00 2001 From: Zhihong Lin Date: Tue, 29 Oct 2024 18:24:34 -0400 Subject: [PATCH 46/47] Increase retry duration for service tag retrieval --- extension/entitystore/serviceprovider.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/extension/entitystore/serviceprovider.go b/extension/entitystore/serviceprovider.go index d30040c697..41942028cd 100644 --- a/extension/entitystore/serviceprovider.go +++ b/extension/entitystore/serviceprovider.go @@ -37,8 +37,8 @@ const ( describeTagsJitterMax = 3600 describeTagsJitterMin = 3000 - defaultJitterMin = 60 - defaultJitterMax = 180 + defaultJitterMin = 480 + defaultJitterMax = 600 maxRetry = 3 ) @@ -79,7 +79,7 @@ type serviceprovider struct { func (s *serviceprovider) startServiceProvider() { unlimitedRetryer := NewRetryer(false, true, defaultJitterMin, defaultJitterMax, ec2tagger.BackoffSleepArray, infRetry, s.done, s.logger) - limitedRetryer := NewRetryer(false, false, describeTagsJitterMin, describeTagsJitterMax, ec2tagger.ThrottleBackOffArray, maxRetry, s.done, s.logger) + limitedRetryer := NewRetryer(false, true, describeTagsJitterMin, describeTagsJitterMax, ec2tagger.ThrottleBackOffArray, maxRetry, s.done, s.logger) go unlimitedRetryer.refreshLoop(s.scrapeIAMRole) go limitedRetryer.refreshLoop(s.scrapeImdsServiceName) } From 5c6298d46df11d8e15ec4a270c67417bed3ea8fd Mon Sep 17 00:00:00 2001 From: Zhihong Lin Date: Tue, 29 Oct 2024 19:58:13 -0400 Subject: [PATCH 47/47] Drop entity if account ID is not present --- extension/entitystore/extension.go | 18 +++--- extension/entitystore/extension_test.go | 38 ++++-------- plugins/outputs/cloudwatch/convert_otel.go | 4 +- .../outputs/cloudwatch/convert_otel_test.go | 33 +++++++++- plugins/outputs/cloudwatchlogs/pusher.go | 9 +-- plugins/outputs/cloudwatchlogs/pusher_test.go | 61 +++++++++++++++++-- 6 files changed, 112 insertions(+), 51 deletions(-) diff --git a/extension/entitystore/extension.go b/extension/entitystore/extension.go index d9058f9f78..bd27012a3c 100644 --- a/extension/entitystore/extension.go +++ b/extension/entitystore/extension.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" - "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/jellydator/ttlcache/v3" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension" @@ -20,6 +19,7 @@ import ( configaws "github.com/aws/amazon-cloudwatch-agent/cfg/aws" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" "github.com/aws/amazon-cloudwatch-agent/internal/retryer" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) @@ -31,9 +31,6 @@ const ( ServiceNameSourceKey = "AWS.ServiceNameSource" PlatformType = "PlatformType" EC2PlatForm = "AWS::EC2" - Type = "Type" - Name = "Name" - Environment = "Environment" podTerminationCheckInterval = 5 * time.Minute ) @@ -74,8 +71,6 @@ type EntityStore struct { metadataprovider ec2metadataprovider.MetadataProvider - stsClient stsiface.STSAPI - podTerminationCheckInterval time.Duration } @@ -149,7 +144,9 @@ func (e *EntityStore) CreateLogFileEntity(logFileGlob LogFileGlob, logGroupName keyAttributes := e.createServiceKeyAttributes(serviceAttr) attributeMap := e.createAttributeMap() addNonEmptyToMap(attributeMap, ServiceNameSourceKey, serviceAttr.ServiceNameSource) - + if _, ok := keyAttributes[entityattributes.AwsAccountId]; !ok { + return nil + } return &cloudwatchlogs.Entity{ KeyAttributes: keyAttributes, Attributes: attributeMap, @@ -225,10 +222,11 @@ func (e *EntityStore) createAttributeMap() map[string]*string { // createServiceKeyAttribute creates KeyAttributes for Service entities func (e *EntityStore) createServiceKeyAttributes(serviceAttr ServiceAttribute) map[string]*string { serviceKeyAttr := map[string]*string{ - Type: aws.String(Service), + entityattributes.EntityType: aws.String(Service), } - addNonEmptyToMap(serviceKeyAttr, Name, serviceAttr.ServiceName) - addNonEmptyToMap(serviceKeyAttr, Environment, serviceAttr.Environment) + addNonEmptyToMap(serviceKeyAttr, entityattributes.ServiceName, serviceAttr.ServiceName) + addNonEmptyToMap(serviceKeyAttr, entityattributes.DeploymentEnvironment, serviceAttr.Environment) + addNonEmptyToMap(serviceKeyAttr, entityattributes.AwsAccountId, e.ec2Info.GetAccountID()) return serviceKeyAttr } diff --git a/extension/entitystore/extension_test.go b/extension/entitystore/extension_test.go index 668c8bd1a4..fea148847a 100644 --- a/extension/entitystore/extension_test.go +++ b/extension/entitystore/extension_test.go @@ -15,8 +15,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/jellydator/ttlcache/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -24,6 +22,7 @@ import ( "go.uber.org/zap/zapcore" "github.com/aws/amazon-cloudwatch-agent/internal/ec2metadataprovider" + "github.com/aws/amazon-cloudwatch-agent/plugins/processors/awsentity/entityattributes" "github.com/aws/amazon-cloudwatch-agent/sdk/service/cloudwatchlogs" "github.com/aws/amazon-cloudwatch-agent/translator/config" ) @@ -64,15 +63,6 @@ func (s *mockServiceProvider) getServiceNameAndSource() (string, string) { return "test-service-name", "UserConfiguration" } -type mockSTSClient struct { - stsiface.STSAPI - accountId string -} - -func (ms *mockSTSClient) GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) { - return &sts.GetCallerIdentityOutput{Account: aws.String(ms.accountId)}, nil -} - type mockMetadataProvider struct { InstanceIdentityDocument *ec2metadata.EC2InstanceIdentityDocument Tags map[string]string @@ -288,25 +278,25 @@ func TestEntityStore_createServiceKeyAttributes(t *testing.T) { name: "NameAndEnvironmentSet", serviceAttr: ServiceAttribute{ServiceName: "test-service", Environment: "test-environment"}, want: map[string]*string{ - Environment: aws.String("test-environment"), - Name: aws.String("test-service"), - Type: aws.String(Service), + entityattributes.DeploymentEnvironment: aws.String("test-environment"), + entityattributes.ServiceName: aws.String("test-service"), + entityattributes.EntityType: aws.String(Service), }, }, { name: "OnlyNameSet", serviceAttr: ServiceAttribute{ServiceName: "test-service"}, want: map[string]*string{ - Name: aws.String("test-service"), - Type: aws.String(Service), + entityattributes.ServiceName: aws.String("test-service"), + entityattributes.EntityType: aws.String(Service), }, }, { name: "OnlyEnvironmentSet", serviceAttr: ServiceAttribute{Environment: "test-environment"}, want: map[string]*string{ - Environment: aws.String("test-environment"), - Type: aws.String(Service), + entityattributes.DeploymentEnvironment: aws.String("test-environment"), + entityattributes.EntityType: aws.String(Service), }, }, } @@ -332,10 +322,8 @@ func TestEntityStore_createLogFileRID(t *testing.T) { sp.On("logFileServiceAttribute", glob, group).Return(serviceAttr) e := EntityStore{ mode: config.ModeEC2, - ec2Info: EC2Info{InstanceID: instanceId}, + ec2Info: EC2Info{InstanceID: instanceId, AccountID: accountId}, serviceprovider: sp, - metadataprovider: mockMetadataProviderWithAccountId(accountId), - stsClient: &mockSTSClient{accountId: accountId}, nativeCredential: &session.Session{}, } @@ -343,9 +331,10 @@ func TestEntityStore_createLogFileRID(t *testing.T) { expectedEntity := cloudwatchlogs.Entity{ KeyAttributes: map[string]*string{ - Environment: aws.String("test-environment"), - Name: aws.String("test-service"), - Type: aws.String(Service), + entityattributes.DeploymentEnvironment: aws.String("test-environment"), + entityattributes.ServiceName: aws.String("test-service"), + entityattributes.EntityType: aws.String(Service), + entityattributes.AwsAccountId: aws.String(accountId), }, Attributes: map[string]*string{ InstanceIDKey: aws.String(instanceId), @@ -543,7 +532,6 @@ func TestEntityStore_GetMetricServiceNameSource(t *testing.T) { ec2Info: EC2Info{InstanceID: instanceId}, serviceprovider: sp, metadataprovider: mockMetadataProviderWithAccountId(accountId), - stsClient: &mockSTSClient{accountId: accountId}, nativeCredential: &session.Session{}, } diff --git a/plugins/outputs/cloudwatch/convert_otel.go b/plugins/outputs/cloudwatch/convert_otel.go index d91ce92c61..a318eaa265 100644 --- a/plugins/outputs/cloudwatch/convert_otel.go +++ b/plugins/outputs/cloudwatch/convert_otel.go @@ -195,7 +195,9 @@ func fetchEntityFields(resourceAttributes pcommon.Map) cloudwatch.Entity { processEntityAttributes(entityattributes.GetKeyAttributeEntityShortNameMap(), keyAttributesMap, resourceAttributes) processEntityAttributes(entityattributes.GetAttributeEntityShortNameMap(platformType), attributeMap, resourceAttributes) removeEntityFields(resourceAttributes) - + if _, ok := keyAttributesMap[entityattributes.AwsAccountId]; !ok { + return cloudwatch.Entity{} + } return cloudwatch.Entity{ KeyAttributes: keyAttributesMap, Attributes: attributeMap, diff --git a/plugins/outputs/cloudwatch/convert_otel_test.go b/plugins/outputs/cloudwatch/convert_otel_test.go index ae4a825f5b..458d72de3a 100644 --- a/plugins/outputs/cloudwatch/convert_otel_test.go +++ b/plugins/outputs/cloudwatch/convert_otel_test.go @@ -378,7 +378,7 @@ func TestProcessAndRemoveEntityAttributes(t *testing.T) { } } -func TestFetchEntityFields(t *testing.T) { +func TestFetchEntityFields_WithoutAccountID(t *testing.T) { resourceMetrics := pmetric.NewResourceMetrics() resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityType, "Service") resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityDeploymentEnvironment, "my-environment") @@ -390,11 +390,34 @@ func TestFetchEntityFields(t *testing.T) { resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "AWS::EKS") assert.Equal(t, 8, resourceMetrics.Resource().Attributes().Len()) + expectedEntity := cloudwatch.Entity{ + KeyAttributes: nil, + Attributes: nil, + } + entity := fetchEntityFields(resourceMetrics.Resource().Attributes()) + assert.Equal(t, 0, resourceMetrics.Resource().Attributes().Len()) + assert.Equal(t, expectedEntity, entity) +} + +func TestFetchEntityFields_WithAccountID(t *testing.T) { + resourceMetrics := pmetric.NewResourceMetrics() + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityType, "Service") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityDeploymentEnvironment, "my-environment") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityServiceName, "my-service") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNode, "my-node") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityCluster, "my-cluster") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNamespace, "my-namespace") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityWorkload, "my-workload") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "AWS::EKS") + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityAwsAccountId, "123456789") + assert.Equal(t, 9, resourceMetrics.Resource().Attributes().Len()) + expectedEntity := cloudwatch.Entity{ KeyAttributes: map[string]*string{ entityattributes.EntityType: aws.String(entityattributes.Service), entityattributes.ServiceName: aws.String("my-service"), entityattributes.DeploymentEnvironment: aws.String("my-environment"), + entityattributes.AwsAccountId: aws.String("123456789"), }, Attributes: map[string]*string{ entityattributes.Node: aws.String("my-node"), @@ -421,13 +444,15 @@ func TestFetchEntityFieldsOnK8s(t *testing.T) { resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityNamespace, "my-namespace") resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityWorkload, "my-workload") resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "K8s") - assert.Equal(t, 8, resourceMetrics.Resource().Attributes().Len()) + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityAwsAccountId, "123456789") + assert.Equal(t, 9, resourceMetrics.Resource().Attributes().Len()) expectedEntity := cloudwatch.Entity{ KeyAttributes: map[string]*string{ entityattributes.EntityType: aws.String(entityattributes.Service), entityattributes.ServiceName: aws.String("my-service"), entityattributes.DeploymentEnvironment: aws.String("my-environment"), + entityattributes.AwsAccountId: aws.String("123456789"), }, Attributes: map[string]*string{ entityattributes.Node: aws.String("my-node"), @@ -448,13 +473,15 @@ func TestFetchEntityFieldsOnEc2(t *testing.T) { resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityDeploymentEnvironment, "my-environment") resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityServiceName, "my-service") resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityPlatformType, "AWS::EC2") - assert.Equal(t, 4, resourceMetrics.Resource().Attributes().Len()) + resourceMetrics.Resource().Attributes().PutStr(entityattributes.AttributeEntityAwsAccountId, "123456789") + assert.Equal(t, 5, resourceMetrics.Resource().Attributes().Len()) expectedEntity := cloudwatch.Entity{ KeyAttributes: map[string]*string{ entityattributes.EntityType: aws.String(entityattributes.Service), entityattributes.ServiceName: aws.String("my-service"), entityattributes.DeploymentEnvironment: aws.String("my-environment"), + entityattributes.AwsAccountId: aws.String("123456789"), }, Attributes: map[string]*string{ entityattributes.Platform: aws.String("AWS::EC2"), diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index 2c1036f1e8..ff0bb1dd0e 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -222,18 +222,15 @@ func (p *pusher) send() { if p.needSort { sort.Stable(ByTimestamp(p.events)) } - var entity *cloudwatchlogs.Entity - if p.logSrc != nil { - entity = p.logSrc.Entity() - } - input := &cloudwatchlogs.PutLogEventsInput{ LogEvents: p.events, LogGroupName: &p.Group, LogStreamName: &p.Stream, SequenceToken: p.sequenceToken, } - input.Entity = entity + if p.logSrc != nil { + input.Entity = p.logSrc.Entity() + } startTime := time.Now() diff --git a/plugins/outputs/cloudwatchlogs/pusher_test.go b/plugins/outputs/cloudwatchlogs/pusher_test.go index f97b9f22ec..fc3957e5cc 100644 --- a/plugins/outputs/cloudwatchlogs/pusher_test.go +++ b/plugins/outputs/cloudwatchlogs/pusher_test.go @@ -27,20 +27,26 @@ import ( type mockLogSrc struct { logs.LogSrc + returnEmpty bool } func (m *mockLogSrc) Entity() *cloudwatchlogs.Entity { - return &cloudwatchlogs.Entity{ + entity := &cloudwatchlogs.Entity{ Attributes: map[string]*string{ "PlatformType": aws.String("AWS::EC2"), "EC2.InstanceId": aws.String("i-123456789"), "EC2.AutoScalingGroup": aws.String("test-group"), }, KeyAttributes: map[string]*string{ - "Name": aws.String("myService"), - "Environment": aws.String("myEnvironment"), + "Name": aws.String("myService"), + "Environment": aws.String("myEnvironment"), + "AwsAccountId": aws.String("123456789"), }, } + if m.returnEmpty { + return nil + } + return entity } var wg sync.WaitGroup @@ -103,7 +109,7 @@ func (e evtMock) Done() { } } -func TestAddSingleEvent(t *testing.T) { +func TestAddSingleEvent_WithAccountId(t *testing.T) { var s svcMock called := false nst := "NEXT_SEQ_TOKEN" @@ -114,8 +120,9 @@ func TestAddSingleEvent(t *testing.T) { "EC2.AutoScalingGroup": aws.String("test-group"), }, KeyAttributes: map[string]*string{ - "Name": aws.String("myService"), - "Environment": aws.String("myEnvironment"), + "Name": aws.String("myService"), + "Environment": aws.String("myEnvironment"), + "AwsAccountId": aws.String("123456789"), }, } @@ -155,6 +162,48 @@ func TestAddSingleEvent(t *testing.T) { wg.Wait() } +func TestAddSingleEvent_WithoutAccountId(t *testing.T) { + var s svcMock + called := false + nst := "NEXT_SEQ_TOKEN" + + s.ple = func(in *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + called = true + + if in.SequenceToken != nil { + t.Errorf("PutLogEvents called with wrong sequenceToken, first call should not provide any token") + } + + if *in.LogGroupName != "G" || *in.LogStreamName != "S" { + t.Errorf("PutLogEvents called with wrong group and stream: %v/%v", *in.LogGroupName, *in.LogStreamName) + } + + if len(in.LogEvents) != 1 || *in.LogEvents[0].Message != "MSG" { + t.Errorf("PutLogEvents called with incorrect message, got: '%v'", *in.LogEvents[0].Message) + } + require.Nil(t, in.Entity) + return &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: &nst, + }, nil + } + + stop, p := testPreparation(-1, &s, 1*time.Hour, maxRetryTimeout) + p.logSrc = &mockLogSrc{returnEmpty: true} + + p.AddEvent(evtMock{"MSG", time.Now(), nil}) + require.False(t, called, "PutLogEvents has been called too fast, it should wait until FlushTimeout.") + + p.FlushTimeout = 10 * time.Millisecond + p.resetFlushTimer() + + time.Sleep(3 * time.Second) + require.True(t, called, "PutLogEvents has not been called after FlushTimeout has been reached.") + require.NotNil(t, nst, *p.sequenceToken, "Pusher did not capture the NextSequenceToken") + + close(stop) + wg.Wait() +} + func TestStopPusherWouldDoFinalSend(t *testing.T) { var s svcMock called := false