From 2ffaac23839e28b9d2befd95f95c4e702826ff54 Mon Sep 17 00:00:00 2001 From: Christoph Hartmann Date: Mon, 12 Aug 2024 11:23:31 +0200 Subject: [PATCH] =?UTF-8?q?=E2=AD=90=EF=B8=8F=20AWS=20TimeStream=20LifeAna?= =?UTF-8?q?lytics=20resources=20(#4532)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/actions/spelling/expect.txt | 2 + providers/aws/connection/clients.go | 56 +++ providers/aws/go.mod | 3 + providers/aws/go.sum | 6 + providers/aws/resources/aws.lr | 48 ++- providers/aws/resources/aws.lr.go | 374 +++++++++++++++++++ providers/aws/resources/aws.lr.manifest.yaml | 79 ++++ providers/aws/resources/aws_timestream.go | 210 +++++++++++ 8 files changed, 777 insertions(+), 1 deletion(-) create mode 100644 providers/aws/resources/aws_timestream.go diff --git a/.github/actions/spelling/expect.txt b/.github/actions/spelling/expect.txt index 6ba7543316..0d1aca1d46 100644 --- a/.github/actions/spelling/expect.txt +++ b/.github/actions/spelling/expect.txt @@ -48,6 +48,7 @@ istio jira jsonbody labelmatchstatement +lifeanalytics loggingservice managedrulegroupstatement managedzone @@ -91,6 +92,7 @@ sqlserver targetgroup tde testutils +timestream toplevel tpu vdcs diff --git a/providers/aws/connection/clients.go b/providers/aws/connection/clients.go index 505051e178..ea25e0a4f9 100644 --- a/providers/aws/connection/clients.go +++ b/providers/aws/connection/clients.go @@ -48,6 +48,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/sns" "github.com/aws/aws-sdk-go-v2/service/sqs" "github.com/aws/aws-sdk-go-v2/service/ssm" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" "github.com/aws/aws-sdk-go-v2/service/wafv2" "github.com/rs/zerolog/log" ) @@ -776,6 +778,60 @@ func (t *AwsConnection) Neptune(region string) *neptune.Client { return client } +// TimestreamLifeAnalytics returns a Timestream client for Life Analytics +func (t *AwsConnection) TimestreamLifeAnalytics(region string) *timestreamwrite.Client { + // if no region value is sent in, use the configured region + if len(region) == 0 { + region = t.cfg.Region + } + cacheVal := "_timestream_" + region + + // check for cached client and return it if it exists + c, ok := t.clientcache.Load(cacheVal) + if ok { + log.Debug().Msg("use cached timestreamwrite client") + return c.Data.(*timestreamwrite.Client) + } + + // create the client + cfg := t.cfg.Copy() + cfg.Region = region + + // Create a Neptune client from just a session. + client := timestreamwrite.NewFromConfig(cfg) + + // cache it + t.clientcache.Store(cacheVal, &CacheEntry{Data: client}) + return client +} + +// TimestreamInflux returns a Timestream client for InfluxDB +func (t *AwsConnection) TimestreamInfluxDB(region string) *timestreaminfluxdb.Client { + // if no region value is sent in, use the configured region + if len(region) == 0 { + region = t.cfg.Region + } + cacheVal := "_timestream_" + region + + // check for cached client and return it if it exists + c, ok := t.clientcache.Load(cacheVal) + if ok { + log.Debug().Msg("use cached timestreaminfluxdb client") + return c.Data.(*timestreaminfluxdb.Client) + } + + // create the client + cfg := t.cfg.Copy() + cfg.Region = region + + // Create a Neptune client from just a session. + client := timestreaminfluxdb.NewFromConfig(cfg) + + // cache it + t.clientcache.Store(cacheVal, &CacheEntry{Data: client}) + return client +} + func (t *AwsConnection) AccessAnalyzer(region string) *accessanalyzer.Client { // if no region value is sent in, use the configured region if len(region) == 0 { diff --git a/providers/aws/go.mod b/providers/aws/go.mod index 8fa57989c9..959a4a22c2 100644 --- a/providers/aws/go.mod +++ b/providers/aws/go.mod @@ -54,6 +54,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 github.com/aws/aws-sdk-go-v2/service/ssm v1.52.4 github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 + github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3 github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4 github.com/aws/smithy-go v1.20.3 github.com/cockroachdb/errors v1.11.3 @@ -63,6 +65,7 @@ require ( github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.9.0 go.mondoo.com/cnquery/v11 v11.16.1 + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa k8s.io/client-go v0.30.3 ) diff --git a/providers/aws/go.sum b/providers/aws/go.sum index c2c5033a17..a2c5a2234f 100644 --- a/providers/aws/go.sum +++ b/providers/aws/go.sum @@ -173,6 +173,10 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrA github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3 h1:Qbimk+9ZyMxjyunIkdvaDeA/LLbeSV0NqurwC2D/gKg= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3/go.mod h1:2AEQ9klGEJdMIg+bC1gnGGiJqKebIkhfwJyNYBYh9dg= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3 h1:GbbpHIz5tBazjVOunsf6xcgruWFvj1DT+jUNyKDwK2s= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3/go.mod h1:sXSJhu0vub083lif2S+g7fPocwVuqu9D9Bp1FEIYqOE= github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4 h1:1khBA5uryBRJoCb4G2iR5RT06BkfPEjjDCHAiRb8P3Q= github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4/go.mod h1:QpFImaPGKNwa+MiZ+oo6LbV1PVQBapc0CnrAMRScoxM= github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= @@ -540,6 +544,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= diff --git a/providers/aws/resources/aws.lr b/providers/aws/resources/aws.lr index 0d50eb4786..52cd65dd6a 100644 --- a/providers/aws/resources/aws.lr +++ b/providers/aws/resources/aws.lr @@ -3428,4 +3428,50 @@ private aws.neptune.instance @defaults("arn name status"){ storageType string // Key store with which the instance is associated for TDE encryption tdeCredentialArn string -} \ No newline at end of file +} + +// Amazon Timestream for LifeAnalytics +aws.timestream.lifeanalytics @defaults("databases") { + // List of databases + databases() []aws.timestream.lifeanalytics.database + // List of database tables + tables() []aws.timestream.lifeanalytics.table +} + +// Amazon Timestream for LifeAnalytics database +private aws.timestream.lifeanalytics.database @defaults("name region") { + // ARN for the database + arn string + // Name of the database + name string + // KMS key used to encrypt the data stored in the database + kmsKeyId string + // Region where the database exists + region string + // Time when the database was created + createdAt time + // Time when the database was last updated + updatedAt time + // Total number of tables in database + tableCount int +} + +// Amazon Timestream for LifeAnalytics table +private aws.timestream.lifeanalytics.table @defaults("name region") { + // ARN for the table + arn string + // Name of the table + name string + // Name of the database + databaseName string + // Region where the table exists + region string + // Time when the table was created + createdAt time + // Time when the table was last updated + updatedAt time + // magnetic store properties for the table + magneticStoreWriteProperties dict + // retention duration properties for the table + retentionProperties dict +} diff --git a/providers/aws/resources/aws.lr.go b/providers/aws/resources/aws.lr.go index 82d437309f..869a4f2aba 100644 --- a/providers/aws/resources/aws.lr.go +++ b/providers/aws/resources/aws.lr.go @@ -754,6 +754,18 @@ func init() { // to override args, implement: initAwsNeptuneInstance(runtime *plugin.Runtime, args map[string]*llx.RawData) (map[string]*llx.RawData, plugin.Resource, error) Create: createAwsNeptuneInstance, }, + "aws.timestream.lifeanalytics": { + // to override args, implement: initAwsTimestreamLifeanalytics(runtime *plugin.Runtime, args map[string]*llx.RawData) (map[string]*llx.RawData, plugin.Resource, error) + Create: createAwsTimestreamLifeanalytics, + }, + "aws.timestream.lifeanalytics.database": { + // to override args, implement: initAwsTimestreamLifeanalyticsDatabase(runtime *plugin.Runtime, args map[string]*llx.RawData) (map[string]*llx.RawData, plugin.Resource, error) + Create: createAwsTimestreamLifeanalyticsDatabase, + }, + "aws.timestream.lifeanalytics.table": { + // to override args, implement: initAwsTimestreamLifeanalyticsTable(runtime *plugin.Runtime, args map[string]*llx.RawData) (map[string]*llx.RawData, plugin.Resource, error) + Create: createAwsTimestreamLifeanalyticsTable, + }, } } @@ -4827,6 +4839,57 @@ var getDataFields = map[string]func(r plugin.Resource) *plugin.DataRes{ "aws.neptune.instance.tdeCredentialArn": func(r plugin.Resource) *plugin.DataRes { return (r.(*mqlAwsNeptuneInstance).GetTdeCredentialArn()).ToDataRes(types.String) }, + "aws.timestream.lifeanalytics.databases": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalytics).GetDatabases()).ToDataRes(types.Array(types.Resource("aws.timestream.lifeanalytics.database"))) + }, + "aws.timestream.lifeanalytics.tables": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalytics).GetTables()).ToDataRes(types.Array(types.Resource("aws.timestream.lifeanalytics.table"))) + }, + "aws.timestream.lifeanalytics.database.arn": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsDatabase).GetArn()).ToDataRes(types.String) + }, + "aws.timestream.lifeanalytics.database.name": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsDatabase).GetName()).ToDataRes(types.String) + }, + "aws.timestream.lifeanalytics.database.kmsKeyId": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsDatabase).GetKmsKeyId()).ToDataRes(types.String) + }, + "aws.timestream.lifeanalytics.database.region": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsDatabase).GetRegion()).ToDataRes(types.String) + }, + "aws.timestream.lifeanalytics.database.createdAt": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsDatabase).GetCreatedAt()).ToDataRes(types.Time) + }, + "aws.timestream.lifeanalytics.database.updatedAt": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsDatabase).GetUpdatedAt()).ToDataRes(types.Time) + }, + "aws.timestream.lifeanalytics.database.tableCount": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsDatabase).GetTableCount()).ToDataRes(types.Int) + }, + "aws.timestream.lifeanalytics.table.arn": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsTable).GetArn()).ToDataRes(types.String) + }, + "aws.timestream.lifeanalytics.table.name": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsTable).GetName()).ToDataRes(types.String) + }, + "aws.timestream.lifeanalytics.table.databaseName": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsTable).GetDatabaseName()).ToDataRes(types.String) + }, + "aws.timestream.lifeanalytics.table.region": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsTable).GetRegion()).ToDataRes(types.String) + }, + "aws.timestream.lifeanalytics.table.createdAt": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsTable).GetCreatedAt()).ToDataRes(types.Time) + }, + "aws.timestream.lifeanalytics.table.updatedAt": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsTable).GetUpdatedAt()).ToDataRes(types.Time) + }, + "aws.timestream.lifeanalytics.table.magneticStoreWriteProperties": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsTable).GetMagneticStoreWriteProperties()).ToDataRes(types.Dict) + }, + "aws.timestream.lifeanalytics.table.retentionProperties": func(r plugin.Resource) *plugin.DataRes { + return (r.(*mqlAwsTimestreamLifeanalyticsTable).GetRetentionProperties()).ToDataRes(types.Dict) + }, } func GetData(resource plugin.Resource, field string, args map[string]*llx.RawData) *plugin.DataRes { @@ -10915,6 +10978,86 @@ var setDataFields = map[string]func(r plugin.Resource, v *llx.RawData) bool { r.(*mqlAwsNeptuneInstance).TdeCredentialArn, ok = plugin.RawToTValue[string](v.Value, v.Error) return }, + "aws.timestream.lifeanalytics.__id": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalytics).__id, ok = v.Value.(string) + return + }, + "aws.timestream.lifeanalytics.databases": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalytics).Databases, ok = plugin.RawToTValue[[]interface{}](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.tables": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalytics).Tables, ok = plugin.RawToTValue[[]interface{}](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.database.__id": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsDatabase).__id, ok = v.Value.(string) + return + }, + "aws.timestream.lifeanalytics.database.arn": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsDatabase).Arn, ok = plugin.RawToTValue[string](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.database.name": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsDatabase).Name, ok = plugin.RawToTValue[string](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.database.kmsKeyId": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsDatabase).KmsKeyId, ok = plugin.RawToTValue[string](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.database.region": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsDatabase).Region, ok = plugin.RawToTValue[string](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.database.createdAt": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsDatabase).CreatedAt, ok = plugin.RawToTValue[*time.Time](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.database.updatedAt": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsDatabase).UpdatedAt, ok = plugin.RawToTValue[*time.Time](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.database.tableCount": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsDatabase).TableCount, ok = plugin.RawToTValue[int64](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.table.__id": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).__id, ok = v.Value.(string) + return + }, + "aws.timestream.lifeanalytics.table.arn": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).Arn, ok = plugin.RawToTValue[string](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.table.name": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).Name, ok = plugin.RawToTValue[string](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.table.databaseName": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).DatabaseName, ok = plugin.RawToTValue[string](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.table.region": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).Region, ok = plugin.RawToTValue[string](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.table.createdAt": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).CreatedAt, ok = plugin.RawToTValue[*time.Time](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.table.updatedAt": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).UpdatedAt, ok = plugin.RawToTValue[*time.Time](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.table.magneticStoreWriteProperties": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).MagneticStoreWriteProperties, ok = plugin.RawToTValue[interface{}](v.Value, v.Error) + return + }, + "aws.timestream.lifeanalytics.table.retentionProperties": func(r plugin.Resource, v *llx.RawData) (ok bool) { + r.(*mqlAwsTimestreamLifeanalyticsTable).RetentionProperties, ok = plugin.RawToTValue[interface{}](v.Value, v.Error) + return + }, } func SetData(resource plugin.Resource, field string, val *llx.RawData) error { @@ -27873,3 +28016,234 @@ func (c *mqlAwsNeptuneInstance) GetStorageType() *plugin.TValue[string] { func (c *mqlAwsNeptuneInstance) GetTdeCredentialArn() *plugin.TValue[string] { return &c.TdeCredentialArn } + +// mqlAwsTimestreamLifeanalytics for the aws.timestream.lifeanalytics resource +type mqlAwsTimestreamLifeanalytics struct { + MqlRuntime *plugin.Runtime + __id string + // optional: if you define mqlAwsTimestreamLifeanalyticsInternal it will be used here + Databases plugin.TValue[[]interface{}] + Tables plugin.TValue[[]interface{}] +} + +// createAwsTimestreamLifeanalytics creates a new instance of this resource +func createAwsTimestreamLifeanalytics(runtime *plugin.Runtime, args map[string]*llx.RawData) (plugin.Resource, error) { + res := &mqlAwsTimestreamLifeanalytics{ + MqlRuntime: runtime, + } + + err := SetAllData(res, args) + if err != nil { + return res, err + } + + if res.__id == "" { + res.__id, err = res.id() + if err != nil { + return nil, err + } + } + + if runtime.HasRecording { + args, err = runtime.ResourceFromRecording("aws.timestream.lifeanalytics", res.__id) + if err != nil || args == nil { + return res, err + } + return res, SetAllData(res, args) + } + + return res, nil +} + +func (c *mqlAwsTimestreamLifeanalytics) MqlName() string { + return "aws.timestream.lifeanalytics" +} + +func (c *mqlAwsTimestreamLifeanalytics) MqlID() string { + return c.__id +} + +func (c *mqlAwsTimestreamLifeanalytics) GetDatabases() *plugin.TValue[[]interface{}] { + return plugin.GetOrCompute[[]interface{}](&c.Databases, func() ([]interface{}, error) { + if c.MqlRuntime.HasRecording { + d, err := c.MqlRuntime.FieldResourceFromRecording("aws.timestream.lifeanalytics", c.__id, "databases") + if err != nil { + return nil, err + } + if d != nil { + return d.Value.([]interface{}), nil + } + } + + return c.databases() + }) +} + +func (c *mqlAwsTimestreamLifeanalytics) GetTables() *plugin.TValue[[]interface{}] { + return plugin.GetOrCompute[[]interface{}](&c.Tables, func() ([]interface{}, error) { + if c.MqlRuntime.HasRecording { + d, err := c.MqlRuntime.FieldResourceFromRecording("aws.timestream.lifeanalytics", c.__id, "tables") + if err != nil { + return nil, err + } + if d != nil { + return d.Value.([]interface{}), nil + } + } + + return c.tables() + }) +} + +// mqlAwsTimestreamLifeanalyticsDatabase for the aws.timestream.lifeanalytics.database resource +type mqlAwsTimestreamLifeanalyticsDatabase struct { + MqlRuntime *plugin.Runtime + __id string + // optional: if you define mqlAwsTimestreamLifeanalyticsDatabaseInternal it will be used here + Arn plugin.TValue[string] + Name plugin.TValue[string] + KmsKeyId plugin.TValue[string] + Region plugin.TValue[string] + CreatedAt plugin.TValue[*time.Time] + UpdatedAt plugin.TValue[*time.Time] + TableCount plugin.TValue[int64] +} + +// createAwsTimestreamLifeanalyticsDatabase creates a new instance of this resource +func createAwsTimestreamLifeanalyticsDatabase(runtime *plugin.Runtime, args map[string]*llx.RawData) (plugin.Resource, error) { + res := &mqlAwsTimestreamLifeanalyticsDatabase{ + MqlRuntime: runtime, + } + + err := SetAllData(res, args) + if err != nil { + return res, err + } + + // to override __id implement: id() (string, error) + + if runtime.HasRecording { + args, err = runtime.ResourceFromRecording("aws.timestream.lifeanalytics.database", res.__id) + if err != nil || args == nil { + return res, err + } + return res, SetAllData(res, args) + } + + return res, nil +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) MqlName() string { + return "aws.timestream.lifeanalytics.database" +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) MqlID() string { + return c.__id +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) GetArn() *plugin.TValue[string] { + return &c.Arn +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) GetName() *plugin.TValue[string] { + return &c.Name +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) GetKmsKeyId() *plugin.TValue[string] { + return &c.KmsKeyId +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) GetRegion() *plugin.TValue[string] { + return &c.Region +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) GetCreatedAt() *plugin.TValue[*time.Time] { + return &c.CreatedAt +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) GetUpdatedAt() *plugin.TValue[*time.Time] { + return &c.UpdatedAt +} + +func (c *mqlAwsTimestreamLifeanalyticsDatabase) GetTableCount() *plugin.TValue[int64] { + return &c.TableCount +} + +// mqlAwsTimestreamLifeanalyticsTable for the aws.timestream.lifeanalytics.table resource +type mqlAwsTimestreamLifeanalyticsTable struct { + MqlRuntime *plugin.Runtime + __id string + // optional: if you define mqlAwsTimestreamLifeanalyticsTableInternal it will be used here + Arn plugin.TValue[string] + Name plugin.TValue[string] + DatabaseName plugin.TValue[string] + Region plugin.TValue[string] + CreatedAt plugin.TValue[*time.Time] + UpdatedAt plugin.TValue[*time.Time] + MagneticStoreWriteProperties plugin.TValue[interface{}] + RetentionProperties plugin.TValue[interface{}] +} + +// createAwsTimestreamLifeanalyticsTable creates a new instance of this resource +func createAwsTimestreamLifeanalyticsTable(runtime *plugin.Runtime, args map[string]*llx.RawData) (plugin.Resource, error) { + res := &mqlAwsTimestreamLifeanalyticsTable{ + MqlRuntime: runtime, + } + + err := SetAllData(res, args) + if err != nil { + return res, err + } + + // to override __id implement: id() (string, error) + + if runtime.HasRecording { + args, err = runtime.ResourceFromRecording("aws.timestream.lifeanalytics.table", res.__id) + if err != nil || args == nil { + return res, err + } + return res, SetAllData(res, args) + } + + return res, nil +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) MqlName() string { + return "aws.timestream.lifeanalytics.table" +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) MqlID() string { + return c.__id +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) GetArn() *plugin.TValue[string] { + return &c.Arn +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) GetName() *plugin.TValue[string] { + return &c.Name +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) GetDatabaseName() *plugin.TValue[string] { + return &c.DatabaseName +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) GetRegion() *plugin.TValue[string] { + return &c.Region +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) GetCreatedAt() *plugin.TValue[*time.Time] { + return &c.CreatedAt +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) GetUpdatedAt() *plugin.TValue[*time.Time] { + return &c.UpdatedAt +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) GetMagneticStoreWriteProperties() *plugin.TValue[interface{}] { + return &c.MagneticStoreWriteProperties +} + +func (c *mqlAwsTimestreamLifeanalyticsTable) GetRetentionProperties() *plugin.TValue[interface{}] { + return &c.RetentionProperties +} diff --git a/providers/aws/resources/aws.lr.manifest.yaml b/providers/aws/resources/aws.lr.manifest.yaml index 89c42bf907..8b68e2a666 100755 --- a/providers/aws/resources/aws.lr.manifest.yaml +++ b/providers/aws/resources/aws.lr.manifest.yaml @@ -2989,6 +2989,85 @@ resources: platform: name: - aws + aws.timestream: + fields: + backups: {} + databases: {} + tables: {} + min_mondoo_version: 9.0.0 + platform: + name: + - aws + aws.timestream.backup: + fields: + arn: {} + createdAt: {} + name: {} + region: {} + updatedAt: {} + min_mondoo_version: 9.0.0 + platform: + name: + - aws + aws.timestream.database: + fields: + arn: {} + createdAt: {} + name: {} + region: {} + updatedAt: {} + min_mondoo_version: 9.0.0 + platform: + name: + - aws + aws.timestream.lifeanalytics: + fields: + databases: {} + tables: {} + min_mondoo_version: 9.0.0 + platform: + name: + - aws + aws.timestream.lifeanalytics.database: + fields: + arn: {} + createdAt: {} + kmsKeyId: {} + name: {} + region: {} + tableCount: {} + updatedAt: {} + is_private: true + min_mondoo_version: 9.0.0 + platform: + name: + - aws + aws.timestream.lifeanalytics.table: + fields: + arn: {} + createdAt: {} + databaseName: {} + magneticStoreWriteProperties: {} + name: {} + region: {} + retentionProperties: {} + updatedAt: {} + is_private: true + min_mondoo_version: 9.0.0 + platform: + name: + - aws + aws.timestream.table: + fields: + arn: {} + createdAt: {} + name: {} + region: {} + updatedAt: {} + min_mondoo_version: 9.0.0 + platform: + name: + - aws aws.vpc: fields: arn: {} diff --git a/providers/aws/resources/aws_timestream.go b/providers/aws/resources/aws_timestream.go new file mode 100644 index 0000000000..4efb75578b --- /dev/null +++ b/providers/aws/resources/aws_timestream.go @@ -0,0 +1,210 @@ +// Copyright (c) Mondoo, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package resources + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" + "github.com/rs/zerolog/log" + "go.mondoo.com/cnquery/v11/llx" + "go.mondoo.com/cnquery/v11/providers-sdk/v1/util/convert" + "go.mondoo.com/cnquery/v11/providers-sdk/v1/util/jobpool" + "go.mondoo.com/cnquery/v11/providers/aws/connection" + "golang.org/x/exp/slices" +) + +// AWS TimeStream LifeAnalytics is not available in all regions +var timeStreamLifeRegions = []string{ + "us-gov-west-1", + "ap-south-1", + "ap-northeast-1", + // "ap-southeast-1", // only InfluxDB is available + "ap-southeast-2", + "eu-central-1", + "eu-west-1", + // "eu-north-1", // only InfluxDB is available + "us-east-1", + "us-east-2", + "us-west-2", +} + +func (a *mqlAwsTimestreamLifeanalytics) id() (string, error) { + return "aws.timestream.lifeanalytics", nil +} + +func (a *mqlAwsTimestreamLifeanalytics) databases() ([]interface{}, error) { + conn := a.MqlRuntime.Connection.(*connection.AwsConnection) + res := []interface{}{} + poolOfJobs := jobpool.CreatePool(a.getDatabases(conn), 5) + poolOfJobs.Run() + + // check for errors + if poolOfJobs.HasErrors() { + return nil, poolOfJobs.GetErrors() + } + // get all the results + for i := range poolOfJobs.Jobs { + if poolOfJobs.Jobs[i].Result != nil { + res = append(res, poolOfJobs.Jobs[i].Result.([]interface{})...) + } + } + + return res, nil +} + +func (a *mqlAwsTimestreamLifeanalytics) getDatabases(conn *connection.AwsConnection) []*jobpool.Job { + tasks := make([]*jobpool.Job, 0) + regions, err := conn.Regions() + if err != nil { + return []*jobpool.Job{{Err: err}} + } + + for _, region := range regions { + regionVal := region + if !slices.Contains(timeStreamLifeRegions, regionVal) { + log.Debug().Str("region", regionVal).Msg("skipping region since timestream is not available in this region") + continue + } + f := func() (jobpool.JobResult, error) { + log.Debug().Msgf("timestream>getDatabases>calling aws with region %s", regionVal) + + svc := conn.TimestreamLifeAnalytics(regionVal) + ctx := context.Background() + res := []interface{}{} + + var marker *string + for { + resp, err := svc.ListDatabases(ctx, ×treamwrite.ListDatabasesInput{ + NextToken: marker, + }) + if err != nil { + if Is400AccessDeniedError(err) { + log.Warn().Str("region", regionVal).Msg("error accessing region for AWS API") + return res, nil + } + return nil, err + } + if len(resp.Databases) == 0 { + return nil, nil + } + for i := range resp.Databases { + database := resp.Databases[i] + + mqlCluster, err := CreateResource(a.MqlRuntime, "aws.timestream.lifeanalytics.database", + map[string]*llx.RawData{ + "__id": llx.StringDataPtr(database.Arn), + "arn": llx.StringDataPtr(database.Arn), + "name": llx.StringDataPtr(database.DatabaseName), + "kmsKeyId": llx.StringDataPtr(database.KmsKeyId), + "region": llx.StringData(regionVal), + "createdAt": llx.TimeDataPtr(database.CreationTime), + "updatedAt": llx.TimeDataPtr(database.LastUpdatedTime), + "tableCount": llx.IntData(database.TableCount), + }) + if err != nil { + return nil, err + } + res = append(res, mqlCluster) + } + if resp.NextToken == nil || *resp.NextToken == "" { + break + } + marker = resp.NextToken + } + return jobpool.JobResult(res), nil + } + tasks = append(tasks, jobpool.NewJob(f)) + } + return tasks +} + +func (a *mqlAwsTimestreamLifeanalytics) tables() ([]interface{}, error) { + conn := a.MqlRuntime.Connection.(*connection.AwsConnection) + res := []interface{}{} + poolOfJobs := jobpool.CreatePool(a.getTables(conn), 5) + poolOfJobs.Run() + + // check for errors + if poolOfJobs.HasErrors() { + return nil, poolOfJobs.GetErrors() + } + // get all the results + for i := range poolOfJobs.Jobs { + if poolOfJobs.Jobs[i].Result != nil { + res = append(res, poolOfJobs.Jobs[i].Result.([]interface{})...) + } + } + + return res, nil +} + +func (a *mqlAwsTimestreamLifeanalytics) getTables(conn *connection.AwsConnection) []*jobpool.Job { + tasks := make([]*jobpool.Job, 0) + regions, err := conn.Regions() + if err != nil { + return []*jobpool.Job{{Err: err}} + } + + for _, region := range regions { + regionVal := region + if !slices.Contains(timeStreamLifeRegions, regionVal) { + log.Debug().Str("region", regionVal).Msg("skipping region since timestream is not available in this region") + continue + } + f := func() (jobpool.JobResult, error) { + log.Debug().Msgf("timestream>getTables>calling aws with region %s", regionVal) + + svc := conn.TimestreamLifeAnalytics(regionVal) + ctx := context.Background() + res := []interface{}{} + + var marker *string + for { + resp, err := svc.ListTables(ctx, ×treamwrite.ListTablesInput{ + NextToken: marker, + }) + if err != nil { + if Is400AccessDeniedError(err) { + log.Warn().Str("region", regionVal).Msg("error accessing region for AWS API") + return res, nil + } + return nil, err + } + if len(resp.Tables) == 0 { + return nil, nil + } + for i := range resp.Tables { + table := resp.Tables[i] + + magneticStoreProperties, _ := convert.JsonToDictSlice(table.MagneticStoreWriteProperties) + retentionProperties, _ := convert.JsonToDictSlice(table.RetentionProperties) + + mqlCluster, err := CreateResource(a.MqlRuntime, "aws.timestream.lifeanalytics.table", + map[string]*llx.RawData{ + "__id": llx.StringDataPtr(table.Arn), + "arn": llx.StringDataPtr(table.Arn), + "databaseName": llx.StringDataPtr(table.DatabaseName), + "name": llx.StringDataPtr(table.TableName), + "createdAt": llx.TimeDataPtr(table.CreationTime), + "updatedAt": llx.TimeDataPtr(table.LastUpdatedTime), + "magneticStoreWriteProperties": llx.DictData(magneticStoreProperties), + "retentionProperties": llx.DictData(retentionProperties), + "region": llx.StringData(regionVal), + }) + if err != nil { + return nil, err + } + res = append(res, mqlCluster) + } + if resp.NextToken == nil || *resp.NextToken == "" { + break + } + marker = resp.NextToken + } + return jobpool.JobResult(res), nil + } + tasks = append(tasks, jobpool.NewJob(f)) + } + return tasks +}