From 0922e489c964a4f48c506eee2a5808de561eb75f Mon Sep 17 00:00:00 2001 From: OwnerBe Date: Fri, 14 Jun 2024 17:30:41 +1200 Subject: [PATCH 1/2] add mwaa_environment Signed-off-by: OwnerBe --- .../v1beta1/zz_environment_terraformed.go | 129 ++ apis/mwaa/v1beta1/zz_environment_types.go | 676 ++++++++ .../v1beta1/zz_generated.conversion_hubs.go | 10 + apis/mwaa/v1beta1/zz_generated.deepcopy.go | 1440 ++++++++++++++++ apis/mwaa/v1beta1/zz_generated.managed.go | 68 + apis/mwaa/v1beta1/zz_generated.managedlist.go | 17 + apis/mwaa/v1beta1/zz_generated.resolvers.go | 145 ++ apis/mwaa/v1beta1/zz_groupversion_info.go | 32 + apis/zz_register.go | 2 + cmd/provider/mwaa/zz_main.go | 223 +++ config/externalname.go | 5 + config/generated.lst | 1 + config/mwaa/config.go | 35 + config/registry.go | 6 +- .../mwaa/v1beta1/environment.yaml | 22 + examples/mwaa/environment.yaml | 272 +++ .../mwaa/environment/zz_controller.go | 95 ++ internal/controller/zz_monolith_setup.go | 2 + internal/controller/zz_mwaa_setup.go | 26 + .../mwaa.aws.upbound.io_environments.yaml | 1492 +++++++++++++++++ 20 files changed, 4695 insertions(+), 3 deletions(-) create mode 100755 apis/mwaa/v1beta1/zz_environment_terraformed.go create mode 100755 apis/mwaa/v1beta1/zz_environment_types.go create mode 100755 apis/mwaa/v1beta1/zz_generated.conversion_hubs.go create mode 100644 apis/mwaa/v1beta1/zz_generated.deepcopy.go create mode 100644 apis/mwaa/v1beta1/zz_generated.managed.go create mode 100644 apis/mwaa/v1beta1/zz_generated.managedlist.go create mode 100644 apis/mwaa/v1beta1/zz_generated.resolvers.go create mode 100755 apis/mwaa/v1beta1/zz_groupversion_info.go create mode 100644 cmd/provider/mwaa/zz_main.go create mode 100644 config/mwaa/config.go create mode 100644 examples-generated/mwaa/v1beta1/environment.yaml create mode 100644 examples/mwaa/environment.yaml create mode 100755 internal/controller/mwaa/environment/zz_controller.go create mode 100755 internal/controller/zz_mwaa_setup.go create mode 100644 package/crds/mwaa.aws.upbound.io_environments.yaml diff --git a/apis/mwaa/v1beta1/zz_environment_terraformed.go b/apis/mwaa/v1beta1/zz_environment_terraformed.go new file mode 100755 index 0000000000..5139a35661 --- /dev/null +++ b/apis/mwaa/v1beta1/zz_environment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Environment +func (mg *Environment) GetTerraformResourceType() string { + return "aws_mwaa_environment" +} + +// GetConnectionDetailsMapping for this Environment +func (tr *Environment) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"airflow_configuration_options": "airflowConfigurationOptionsSecretRef"} +} + +// GetObservation of this Environment +func (tr *Environment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Environment +func (tr *Environment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Environment +func (tr *Environment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Environment +func (tr *Environment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Environment +func (tr *Environment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Environment +func (tr *Environment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Environment +func (tr *Environment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Environment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Environment) LateInitialize(attrs []byte) (bool, error) { + params := &EnvironmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Environment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/mwaa/v1beta1/zz_environment_types.go b/apis/mwaa/v1beta1/zz_environment_types.go new file mode 100755 index 0000000000..316211bd8f --- /dev/null +++ b/apis/mwaa/v1beta1/zz_environment_types.go @@ -0,0 +1,676 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DagProcessingLogsInitParameters struct { + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type DagProcessingLogsObservation struct { + + // Provides the ARN for the CloudWatch group where the logs will be published + CloudWatchLogGroupArn *string `json:"cloudWatchLogGroupArn,omitempty" tf:"cloud_watch_log_group_arn,omitempty"` + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type DagProcessingLogsParameters struct { + + // Enabling or disabling the collection of logs + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type EnvironmentInitParameters struct { + AirflowConfigurationOptions map[string]*string `json:"airflowConfigurationOptionsSecretRef,omitempty" tf:"-"` + + // Airflow version of your environment, will be set by default to the latest version that MWAA supports. + AirflowVersion *string `json:"airflowVersion,omitempty" tf:"airflow_version,omitempty"` + + // The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see Importing DAGs on Amazon MWAA. + DagS3Path *string `json:"dagS3Path,omitempty" tf:"dag_s3_path,omitempty"` + + EndpointManagement *string `json:"endpointManagement,omitempty" tf:"endpoint_management,omitempty"` + + // Environment class for the cluster. Possible options are mw1.small, mw1.medium, mw1.large. Will be set by default to mw1.small. Please check the AWS Pricing for more information about the environment classes. + EnvironmentClass *string `json:"environmentClass,omitempty" tf:"environment_class,omitempty"` + + // The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the official AWS documentation for the detailed role specification. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key aws/airflow by default. Please check the Official Documentation for more information. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + // Reference to a Key in kms to populate kmsKey. + // +kubebuilder:validation:Optional + KMSKeyRef *v1.Reference `json:"kmsKeyRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKey. + // +kubebuilder:validation:Optional + KMSKeySelector *v1.Selector `json:"kmsKeySelector,omitempty" tf:"-"` + + // The Apache Airflow logs you want to send to Amazon CloudWatch Logs. + LoggingConfiguration []LoggingConfigurationInitParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The maximum number of workers that can be automatically scaled up. Value need to be between 1 and 25. Will be 10 by default. + MaxWorkers *float64 `json:"maxWorkers,omitempty" tf:"max_workers,omitempty"` + + // The minimum number of workers that you want to run in your environment. Will be 1 by default. + MinWorkers *float64 `json:"minWorkers,omitempty" tf:"min_workers,omitempty"` + + // Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details. + NetworkConfiguration []NetworkConfigurationInitParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // The plugins.zip file version you want to use. + PluginsS3ObjectVersion *string `json:"pluginsS3ObjectVersion,omitempty" tf:"plugins_s3_object_version,omitempty"` + + // The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see Importing DAGs on Amazon MWAA. + PluginsS3Path *string `json:"pluginsS3Path,omitempty" tf:"plugins_s3_path,omitempty"` + + // The requirements.txt file version you want to use. + RequirementsS3ObjectVersion *string `json:"requirementsS3ObjectVersion,omitempty" tf:"requirements_s3_object_version,omitempty"` + + // The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see Importing DAGs on Amazon MWAA. + RequirementsS3Path *string `json:"requirementsS3Path,omitempty" tf:"requirements_s3_path,omitempty"` + + // The number of schedulers that you want to run in your environment. v2.0.2 and above accepts 2 - 5, default 2. v1.10.12 accepts 1. + Schedulers *float64 `json:"schedulers,omitempty" tf:"schedulers,omitempty"` + + // The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + SourceBucketArn *string `json:"sourceBucketArn,omitempty" tf:"source_bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate sourceBucketArn. + // +kubebuilder:validation:Optional + SourceBucketArnRef *v1.Reference `json:"sourceBucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate sourceBucketArn. + // +kubebuilder:validation:Optional + SourceBucketArnSelector *v1.Selector `json:"sourceBucketArnSelector,omitempty" tf:"-"` + + // The version of the startup shell script you want to use. You must specify the version ID that Amazon S3 assigns to the file every time you update the script. + StartupScriptS3ObjectVersion *string `json:"startupScriptS3ObjectVersion,omitempty" tf:"startup_script_s3_object_version,omitempty"` + + // The relative path to the script hosted in your bucket. The script runs as your environment starts before starting the Apache Airflow process. Use this script to install dependencies, modify configuration options, and set environment variables. See Using a startup script. Supported for environment versions 2.x and later. + StartupScriptS3Path *string `json:"startupScriptS3Path,omitempty" tf:"startup_script_s3_path,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: PRIVATE_ONLY (default) and PUBLIC_ONLY. + WebserverAccessMode *string `json:"webserverAccessMode,omitempty" tf:"webserver_access_mode,omitempty"` + + // Specifies the start date for the weekly maintenance window. + WeeklyMaintenanceWindowStart *string `json:"weeklyMaintenanceWindowStart,omitempty" tf:"weekly_maintenance_window_start,omitempty"` +} + +type EnvironmentObservation struct { + + // Airflow version of your environment, will be set by default to the latest version that MWAA supports. + AirflowVersion *string `json:"airflowVersion,omitempty" tf:"airflow_version,omitempty"` + + // The ARN of the MWAA Environment + Arn *string `json:"arn,omitempty" tf:"arn,omitempty"` + + // The Created At date of the MWAA Environment + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + // The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see Importing DAGs on Amazon MWAA. + DagS3Path *string `json:"dagS3Path,omitempty" tf:"dag_s3_path,omitempty"` + + // The VPC endpoint for the environment's Amazon RDS database + DatabaseVPCEndpointService *string `json:"databaseVpcEndpointService,omitempty" tf:"database_vpc_endpoint_service,omitempty"` + + EndpointManagement *string `json:"endpointManagement,omitempty" tf:"endpoint_management,omitempty"` + + // Environment class for the cluster. Possible options are mw1.small, mw1.medium, mw1.large. Will be set by default to mw1.small. Please check the AWS Pricing for more information about the environment classes. + EnvironmentClass *string `json:"environmentClass,omitempty" tf:"environment_class,omitempty"` + + // The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the official AWS documentation for the detailed role specification. + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key aws/airflow by default. Please check the Official Documentation for more information. + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + LastUpdated []LastUpdatedObservation `json:"lastUpdated,omitempty" tf:"last_updated,omitempty"` + + // The Apache Airflow logs you want to send to Amazon CloudWatch Logs. + LoggingConfiguration []LoggingConfigurationObservation `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The maximum number of workers that can be automatically scaled up. Value need to be between 1 and 25. Will be 10 by default. + MaxWorkers *float64 `json:"maxWorkers,omitempty" tf:"max_workers,omitempty"` + + // The minimum number of workers that you want to run in your environment. Will be 1 by default. + MinWorkers *float64 `json:"minWorkers,omitempty" tf:"min_workers,omitempty"` + + // Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details. + NetworkConfiguration []NetworkConfigurationObservation `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // The plugins.zip file version you want to use. + PluginsS3ObjectVersion *string `json:"pluginsS3ObjectVersion,omitempty" tf:"plugins_s3_object_version,omitempty"` + + // The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see Importing DAGs on Amazon MWAA. + PluginsS3Path *string `json:"pluginsS3Path,omitempty" tf:"plugins_s3_path,omitempty"` + + // The requirements.txt file version you want to use. + RequirementsS3ObjectVersion *string `json:"requirementsS3ObjectVersion,omitempty" tf:"requirements_s3_object_version,omitempty"` + + // The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see Importing DAGs on Amazon MWAA. + RequirementsS3Path *string `json:"requirementsS3Path,omitempty" tf:"requirements_s3_path,omitempty"` + + // The number of schedulers that you want to run in your environment. v2.0.2 and above accepts 2 - 5, default 2. v1.10.12 accepts 1. + Schedulers *float64 `json:"schedulers,omitempty" tf:"schedulers,omitempty"` + + // The Service Role ARN of the Amazon MWAA Environment + ServiceRoleArn *string `json:"serviceRoleArn,omitempty" tf:"service_role_arn,omitempty"` + + // The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname. + SourceBucketArn *string `json:"sourceBucketArn,omitempty" tf:"source_bucket_arn,omitempty"` + + // The version of the startup shell script you want to use. You must specify the version ID that Amazon S3 assigns to the file every time you update the script. + StartupScriptS3ObjectVersion *string `json:"startupScriptS3ObjectVersion,omitempty" tf:"startup_script_s3_object_version,omitempty"` + + // The relative path to the script hosted in your bucket. The script runs as your environment starts before starting the Apache Airflow process. Use this script to install dependencies, modify configuration options, and set environment variables. See Using a startup script. Supported for environment versions 2.x and later. + StartupScriptS3Path *string `json:"startupScriptS3Path,omitempty" tf:"startup_script_s3_path,omitempty"` + + // The status of the Amazon MWAA Environment + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Key-value map of resource tags. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block. + // +mapType=granular + TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"` + + // Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: PRIVATE_ONLY (default) and PUBLIC_ONLY. + WebserverAccessMode *string `json:"webserverAccessMode,omitempty" tf:"webserver_access_mode,omitempty"` + + // The webserver URL of the MWAA Environment + WebserverURL *string `json:"webserverUrl,omitempty" tf:"webserver_url,omitempty"` + + // The VPC endpoint for the environment's web server + WebserverVPCEndpointService *string `json:"webserverVpcEndpointService,omitempty" tf:"webserver_vpc_endpoint_service,omitempty"` + + // Specifies the start date for the weekly maintenance window. + WeeklyMaintenanceWindowStart *string `json:"weeklyMaintenanceWindowStart,omitempty" tf:"weekly_maintenance_window_start,omitempty"` +} + +type EnvironmentParameters struct { + + // The airflow_configuration_options parameter specifies airflow override options. Check the Official documentation for all possible configuration options. + // +kubebuilder:validation:Optional + AirflowConfigurationOptionsSecretRef *v1.SecretReference `json:"airflowConfigurationOptionsSecretRef,omitempty" tf:"-"` + + // Airflow version of your environment, will be set by default to the latest version that MWAA supports. + // +kubebuilder:validation:Optional + AirflowVersion *string `json:"airflowVersion,omitempty" tf:"airflow_version,omitempty"` + + // The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see Importing DAGs on Amazon MWAA. + // +kubebuilder:validation:Optional + DagS3Path *string `json:"dagS3Path,omitempty" tf:"dag_s3_path,omitempty"` + + // +kubebuilder:validation:Optional + EndpointManagement *string `json:"endpointManagement,omitempty" tf:"endpoint_management,omitempty"` + + // Environment class for the cluster. Possible options are mw1.small, mw1.medium, mw1.large. Will be set by default to mw1.small. Please check the AWS Pricing for more information about the environment classes. + // +kubebuilder:validation:Optional + EnvironmentClass *string `json:"environmentClass,omitempty" tf:"environment_class,omitempty"` + + // The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the official AWS documentation for the detailed role specification. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role + // +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor() + // +kubebuilder:validation:Optional + ExecutionRoleArn *string `json:"executionRoleArn,omitempty" tf:"execution_role_arn,omitempty"` + + // Reference to a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnRef *v1.Reference `json:"executionRoleArnRef,omitempty" tf:"-"` + + // Selector for a Role in iam to populate executionRoleArn. + // +kubebuilder:validation:Optional + ExecutionRoleArnSelector *v1.Selector `json:"executionRoleArnSelector,omitempty" tf:"-"` + + // The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key aws/airflow by default. Please check the Official Documentation for more information. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key + // +kubebuilder:validation:Optional + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + // Reference to a Key in kms to populate kmsKey. + // +kubebuilder:validation:Optional + KMSKeyRef *v1.Reference `json:"kmsKeyRef,omitempty" tf:"-"` + + // Selector for a Key in kms to populate kmsKey. + // +kubebuilder:validation:Optional + KMSKeySelector *v1.Selector `json:"kmsKeySelector,omitempty" tf:"-"` + + // The Apache Airflow logs you want to send to Amazon CloudWatch Logs. + // +kubebuilder:validation:Optional + LoggingConfiguration []LoggingConfigurationParameters `json:"loggingConfiguration,omitempty" tf:"logging_configuration,omitempty"` + + // The maximum number of workers that can be automatically scaled up. Value need to be between 1 and 25. Will be 10 by default. + // +kubebuilder:validation:Optional + MaxWorkers *float64 `json:"maxWorkers,omitempty" tf:"max_workers,omitempty"` + + // The minimum number of workers that you want to run in your environment. Will be 1 by default. + // +kubebuilder:validation:Optional + MinWorkers *float64 `json:"minWorkers,omitempty" tf:"min_workers,omitempty"` + + // Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See Network configuration below for details. + // +kubebuilder:validation:Optional + NetworkConfiguration []NetworkConfigurationParameters `json:"networkConfiguration,omitempty" tf:"network_configuration,omitempty"` + + // The plugins.zip file version you want to use. + // +kubebuilder:validation:Optional + PluginsS3ObjectVersion *string `json:"pluginsS3ObjectVersion,omitempty" tf:"plugins_s3_object_version,omitempty"` + + // The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see Importing DAGs on Amazon MWAA. + // +kubebuilder:validation:Optional + PluginsS3Path *string `json:"pluginsS3Path,omitempty" tf:"plugins_s3_path,omitempty"` + + // Region is the region you'd like your resource to be created in. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Required + Region *string `json:"region" tf:"-"` + + // The requirements.txt file version you want to use. + // +kubebuilder:validation:Optional + RequirementsS3ObjectVersion *string `json:"requirementsS3ObjectVersion,omitempty" tf:"requirements_s3_object_version,omitempty"` + + // The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see Importing DAGs on Amazon MWAA. + // +kubebuilder:validation:Optional + RequirementsS3Path *string `json:"requirementsS3Path,omitempty" tf:"requirements_s3_path,omitempty"` + + // The number of schedulers that you want to run in your environment. v2.0.2 and above accepts 2 - 5, default 2. v1.10.12 accepts 1. + // +kubebuilder:validation:Optional + Schedulers *float64 `json:"schedulers,omitempty" tf:"schedulers,omitempty"` + + // The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname. + // +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/s3/v1beta1.Bucket + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("arn",true) + // +kubebuilder:validation:Optional + SourceBucketArn *string `json:"sourceBucketArn,omitempty" tf:"source_bucket_arn,omitempty"` + + // Reference to a Bucket in s3 to populate sourceBucketArn. + // +kubebuilder:validation:Optional + SourceBucketArnRef *v1.Reference `json:"sourceBucketArnRef,omitempty" tf:"-"` + + // Selector for a Bucket in s3 to populate sourceBucketArn. + // +kubebuilder:validation:Optional + SourceBucketArnSelector *v1.Selector `json:"sourceBucketArnSelector,omitempty" tf:"-"` + + // The version of the startup shell script you want to use. You must specify the version ID that Amazon S3 assigns to the file every time you update the script. + // +kubebuilder:validation:Optional + StartupScriptS3ObjectVersion *string `json:"startupScriptS3ObjectVersion,omitempty" tf:"startup_script_s3_object_version,omitempty"` + + // The relative path to the script hosted in your bucket. The script runs as your environment starts before starting the Apache Airflow process. Use this script to install dependencies, modify configuration options, and set environment variables. See Using a startup script. Supported for environment versions 2.x and later. + // +kubebuilder:validation:Optional + StartupScriptS3Path *string `json:"startupScriptS3Path,omitempty" tf:"startup_script_s3_path,omitempty"` + + // Key-value map of resource tags. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: PRIVATE_ONLY (default) and PUBLIC_ONLY. + // +kubebuilder:validation:Optional + WebserverAccessMode *string `json:"webserverAccessMode,omitempty" tf:"webserver_access_mode,omitempty"` + + // Specifies the start date for the weekly maintenance window. + // +kubebuilder:validation:Optional + WeeklyMaintenanceWindowStart *string `json:"weeklyMaintenanceWindowStart,omitempty" tf:"weekly_maintenance_window_start,omitempty"` +} + +type ErrorInitParameters struct { +} + +type ErrorObservation struct { + ErrorCode *string `json:"errorCode,omitempty" tf:"error_code,omitempty"` + + ErrorMessage *string `json:"errorMessage,omitempty" tf:"error_message,omitempty"` +} + +type ErrorParameters struct { +} + +type LastUpdatedInitParameters struct { +} + +type LastUpdatedObservation struct { + + // The Created At date of the MWAA Environment + CreatedAt *string `json:"createdAt,omitempty" tf:"created_at,omitempty"` + + Error []ErrorObservation `json:"error,omitempty" tf:"error,omitempty"` + + // The status of the Amazon MWAA Environment + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type LastUpdatedParameters struct { +} + +type LoggingConfigurationInitParameters struct { + + // Log configuration options for processing DAGs. See Module logging configuration for more information. Disabled by default. + DagProcessingLogs []DagProcessingLogsInitParameters `json:"dagProcessingLogs,omitempty" tf:"dag_processing_logs,omitempty"` + + // Log configuration options for the schedulers. See Module logging configuration for more information. Disabled by default. + SchedulerLogs []SchedulerLogsInitParameters `json:"schedulerLogs,omitempty" tf:"scheduler_logs,omitempty"` + + // Log configuration options for DAG tasks. See Module logging configuration for more information. Enabled by default with INFO log level. + TaskLogs []TaskLogsInitParameters `json:"taskLogs,omitempty" tf:"task_logs,omitempty"` + + // Log configuration options for the webservers. See Module logging configuration for more information. Disabled by default. + WebserverLogs []WebserverLogsInitParameters `json:"webserverLogs,omitempty" tf:"webserver_logs,omitempty"` + + // Log configuration options for the workers. See Module logging configuration for more information. Disabled by default. + WorkerLogs []WorkerLogsInitParameters `json:"workerLogs,omitempty" tf:"worker_logs,omitempty"` +} + +type LoggingConfigurationObservation struct { + + // Log configuration options for processing DAGs. See Module logging configuration for more information. Disabled by default. + DagProcessingLogs []DagProcessingLogsObservation `json:"dagProcessingLogs,omitempty" tf:"dag_processing_logs,omitempty"` + + // Log configuration options for the schedulers. See Module logging configuration for more information. Disabled by default. + SchedulerLogs []SchedulerLogsObservation `json:"schedulerLogs,omitempty" tf:"scheduler_logs,omitempty"` + + // Log configuration options for DAG tasks. See Module logging configuration for more information. Enabled by default with INFO log level. + TaskLogs []TaskLogsObservation `json:"taskLogs,omitempty" tf:"task_logs,omitempty"` + + // Log configuration options for the webservers. See Module logging configuration for more information. Disabled by default. + WebserverLogs []WebserverLogsObservation `json:"webserverLogs,omitempty" tf:"webserver_logs,omitempty"` + + // Log configuration options for the workers. See Module logging configuration for more information. Disabled by default. + WorkerLogs []WorkerLogsObservation `json:"workerLogs,omitempty" tf:"worker_logs,omitempty"` +} + +type LoggingConfigurationParameters struct { + + // Log configuration options for processing DAGs. See Module logging configuration for more information. Disabled by default. + // +kubebuilder:validation:Optional + DagProcessingLogs []DagProcessingLogsParameters `json:"dagProcessingLogs,omitempty" tf:"dag_processing_logs,omitempty"` + + // Log configuration options for the schedulers. See Module logging configuration for more information. Disabled by default. + // +kubebuilder:validation:Optional + SchedulerLogs []SchedulerLogsParameters `json:"schedulerLogs,omitempty" tf:"scheduler_logs,omitempty"` + + // Log configuration options for DAG tasks. See Module logging configuration for more information. Enabled by default with INFO log level. + // +kubebuilder:validation:Optional + TaskLogs []TaskLogsParameters `json:"taskLogs,omitempty" tf:"task_logs,omitempty"` + + // Log configuration options for the webservers. See Module logging configuration for more information. Disabled by default. + // +kubebuilder:validation:Optional + WebserverLogs []WebserverLogsParameters `json:"webserverLogs,omitempty" tf:"webserver_logs,omitempty"` + + // Log configuration options for the workers. See Module logging configuration for more information. Disabled by default. + // +kubebuilder:validation:Optional + WorkerLogs []WorkerLogsParameters `json:"workerLogs,omitempty" tf:"worker_logs,omitempty"` +} + +type NetworkConfigurationInitParameters struct { + + // Security groups IDs for the environment. At least one of the security group needs to allow MWAA resources to talk to each other, otherwise MWAA cannot be provisioned. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The private subnet IDs in which the environment should be created. MWAA requires two subnets. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type NetworkConfigurationObservation struct { + + // Security groups IDs for the environment. At least one of the security group needs to allow MWAA resources to talk to each other, otherwise MWAA cannot be provisioned. + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds,omitempty" tf:"security_group_ids,omitempty"` + + // The private subnet IDs in which the environment should be created. MWAA requires two subnets. + // +listType=set + SubnetIds []*string `json:"subnetIds,omitempty" tf:"subnet_ids,omitempty"` +} + +type NetworkConfigurationParameters struct { + + // Security groups IDs for the environment. At least one of the security group needs to allow MWAA resources to talk to each other, otherwise MWAA cannot be provisioned. + // +kubebuilder:validation:Optional + // +listType=set + SecurityGroupIds []*string `json:"securityGroupIds" tf:"security_group_ids,omitempty"` + + // The private subnet IDs in which the environment should be created. MWAA requires two subnets. + // +kubebuilder:validation:Optional + // +listType=set + SubnetIds []*string `json:"subnetIds" tf:"subnet_ids,omitempty"` +} + +type SchedulerLogsInitParameters struct { + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type SchedulerLogsObservation struct { + + // Provides the ARN for the CloudWatch group where the logs will be published + CloudWatchLogGroupArn *string `json:"cloudWatchLogGroupArn,omitempty" tf:"cloud_watch_log_group_arn,omitempty"` + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type SchedulerLogsParameters struct { + + // Enabling or disabling the collection of logs + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type TaskLogsInitParameters struct { + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type TaskLogsObservation struct { + + // Provides the ARN for the CloudWatch group where the logs will be published + CloudWatchLogGroupArn *string `json:"cloudWatchLogGroupArn,omitempty" tf:"cloud_watch_log_group_arn,omitempty"` + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type TaskLogsParameters struct { + + // Enabling or disabling the collection of logs + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type WebserverLogsInitParameters struct { + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type WebserverLogsObservation struct { + + // Provides the ARN for the CloudWatch group where the logs will be published + CloudWatchLogGroupArn *string `json:"cloudWatchLogGroupArn,omitempty" tf:"cloud_watch_log_group_arn,omitempty"` + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type WebserverLogsParameters struct { + + // Enabling or disabling the collection of logs + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type WorkerLogsInitParameters struct { + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type WorkerLogsObservation struct { + + // Provides the ARN for the CloudWatch group where the logs will be published + CloudWatchLogGroupArn *string `json:"cloudWatchLogGroupArn,omitempty" tf:"cloud_watch_log_group_arn,omitempty"` + + // Enabling or disabling the collection of logs + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +type WorkerLogsParameters struct { + + // Enabling or disabling the collection of logs + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Logging level. Valid values: CRITICAL, ERROR, WARNING, INFO, DEBUG. Will be INFO by default. + // +kubebuilder:validation:Optional + LogLevel *string `json:"logLevel,omitempty" tf:"log_level,omitempty"` +} + +// EnvironmentSpec defines the desired state of Environment +type EnvironmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EnvironmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EnvironmentInitParameters `json:"initProvider,omitempty"` +} + +// EnvironmentStatus defines the observed state of Environment. +type EnvironmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EnvironmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// Environment is the Schema for the Environments API. Creates a MWAA Environment +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws} +type Environment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dagS3Path) || (has(self.initProvider) && has(self.initProvider.dagS3Path))",message="spec.forProvider.dagS3Path is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.networkConfiguration) || (has(self.initProvider) && has(self.initProvider.networkConfiguration))",message="spec.forProvider.networkConfiguration is a required parameter" + Spec EnvironmentSpec `json:"spec"` + Status EnvironmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EnvironmentList contains a list of Environments +type EnvironmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Environment `json:"items"` +} + +// Repository type metadata. +var ( + Environment_Kind = "Environment" + Environment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Environment_Kind}.String() + Environment_KindAPIVersion = Environment_Kind + "." + CRDGroupVersion.String() + Environment_GroupVersionKind = CRDGroupVersion.WithKind(Environment_Kind) +) + +func init() { + SchemeBuilder.Register(&Environment{}, &EnvironmentList{}) +} diff --git a/apis/mwaa/v1beta1/zz_generated.conversion_hubs.go b/apis/mwaa/v1beta1/zz_generated.conversion_hubs.go new file mode 100755 index 0000000000..e7ab6a185b --- /dev/null +++ b/apis/mwaa/v1beta1/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +// Hub marks this type as a conversion hub. +func (tr *Environment) Hub() {} diff --git a/apis/mwaa/v1beta1/zz_generated.deepcopy.go b/apis/mwaa/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f91d581b68 --- /dev/null +++ b/apis/mwaa/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1440 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DagProcessingLogsInitParameters) DeepCopyInto(out *DagProcessingLogsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DagProcessingLogsInitParameters. +func (in *DagProcessingLogsInitParameters) DeepCopy() *DagProcessingLogsInitParameters { + if in == nil { + return nil + } + out := new(DagProcessingLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DagProcessingLogsObservation) DeepCopyInto(out *DagProcessingLogsObservation) { + *out = *in + if in.CloudWatchLogGroupArn != nil { + in, out := &in.CloudWatchLogGroupArn, &out.CloudWatchLogGroupArn + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DagProcessingLogsObservation. +func (in *DagProcessingLogsObservation) DeepCopy() *DagProcessingLogsObservation { + if in == nil { + return nil + } + out := new(DagProcessingLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DagProcessingLogsParameters) DeepCopyInto(out *DagProcessingLogsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DagProcessingLogsParameters. +func (in *DagProcessingLogsParameters) DeepCopy() *DagProcessingLogsParameters { + if in == nil { + return nil + } + out := new(DagProcessingLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Environment) DeepCopyInto(out *Environment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Environment. +func (in *Environment) DeepCopy() *Environment { + if in == nil { + return nil + } + out := new(Environment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Environment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentInitParameters) DeepCopyInto(out *EnvironmentInitParameters) { + *out = *in + if in.AirflowConfigurationOptions != nil { + in, out := &in.AirflowConfigurationOptions, &out.AirflowConfigurationOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AirflowVersion != nil { + in, out := &in.AirflowVersion, &out.AirflowVersion + *out = new(string) + **out = **in + } + if in.DagS3Path != nil { + in, out := &in.DagS3Path, &out.DagS3Path + *out = new(string) + **out = **in + } + if in.EndpointManagement != nil { + in, out := &in.EndpointManagement, &out.EndpointManagement + *out = new(string) + **out = **in + } + if in.EnvironmentClass != nil { + in, out := &in.EnvironmentClass, &out.EnvironmentClass + *out = new(string) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.KMSKeyRef != nil { + in, out := &in.KMSKeyRef, &out.KMSKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeySelector != nil { + in, out := &in.KMSKeySelector, &out.KMSKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = make([]LoggingConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxWorkers != nil { + in, out := &in.MaxWorkers, &out.MaxWorkers + *out = new(float64) + **out = **in + } + if in.MinWorkers != nil { + in, out := &in.MinWorkers, &out.MinWorkers + *out = new(float64) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = make([]NetworkConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PluginsS3ObjectVersion != nil { + in, out := &in.PluginsS3ObjectVersion, &out.PluginsS3ObjectVersion + *out = new(string) + **out = **in + } + if in.PluginsS3Path != nil { + in, out := &in.PluginsS3Path, &out.PluginsS3Path + *out = new(string) + **out = **in + } + if in.RequirementsS3ObjectVersion != nil { + in, out := &in.RequirementsS3ObjectVersion, &out.RequirementsS3ObjectVersion + *out = new(string) + **out = **in + } + if in.RequirementsS3Path != nil { + in, out := &in.RequirementsS3Path, &out.RequirementsS3Path + *out = new(string) + **out = **in + } + if in.Schedulers != nil { + in, out := &in.Schedulers, &out.Schedulers + *out = new(float64) + **out = **in + } + if in.SourceBucketArn != nil { + in, out := &in.SourceBucketArn, &out.SourceBucketArn + *out = new(string) + **out = **in + } + if in.SourceBucketArnRef != nil { + in, out := &in.SourceBucketArnRef, &out.SourceBucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceBucketArnSelector != nil { + in, out := &in.SourceBucketArnSelector, &out.SourceBucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartupScriptS3ObjectVersion != nil { + in, out := &in.StartupScriptS3ObjectVersion, &out.StartupScriptS3ObjectVersion + *out = new(string) + **out = **in + } + if in.StartupScriptS3Path != nil { + in, out := &in.StartupScriptS3Path, &out.StartupScriptS3Path + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebserverAccessMode != nil { + in, out := &in.WebserverAccessMode, &out.WebserverAccessMode + *out = new(string) + **out = **in + } + if in.WeeklyMaintenanceWindowStart != nil { + in, out := &in.WeeklyMaintenanceWindowStart, &out.WeeklyMaintenanceWindowStart + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentInitParameters. +func (in *EnvironmentInitParameters) DeepCopy() *EnvironmentInitParameters { + if in == nil { + return nil + } + out := new(EnvironmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentList) DeepCopyInto(out *EnvironmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Environment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentList. +func (in *EnvironmentList) DeepCopy() *EnvironmentList { + if in == nil { + return nil + } + out := new(EnvironmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvironmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentObservation) DeepCopyInto(out *EnvironmentObservation) { + *out = *in + if in.AirflowVersion != nil { + in, out := &in.AirflowVersion, &out.AirflowVersion + *out = new(string) + **out = **in + } + if in.Arn != nil { + in, out := &in.Arn, &out.Arn + *out = new(string) + **out = **in + } + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.DagS3Path != nil { + in, out := &in.DagS3Path, &out.DagS3Path + *out = new(string) + **out = **in + } + if in.DatabaseVPCEndpointService != nil { + in, out := &in.DatabaseVPCEndpointService, &out.DatabaseVPCEndpointService + *out = new(string) + **out = **in + } + if in.EndpointManagement != nil { + in, out := &in.EndpointManagement, &out.EndpointManagement + *out = new(string) + **out = **in + } + if in.EnvironmentClass != nil { + in, out := &in.EnvironmentClass, &out.EnvironmentClass + *out = new(string) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = make([]LastUpdatedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = make([]LoggingConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxWorkers != nil { + in, out := &in.MaxWorkers, &out.MaxWorkers + *out = new(float64) + **out = **in + } + if in.MinWorkers != nil { + in, out := &in.MinWorkers, &out.MinWorkers + *out = new(float64) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = make([]NetworkConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PluginsS3ObjectVersion != nil { + in, out := &in.PluginsS3ObjectVersion, &out.PluginsS3ObjectVersion + *out = new(string) + **out = **in + } + if in.PluginsS3Path != nil { + in, out := &in.PluginsS3Path, &out.PluginsS3Path + *out = new(string) + **out = **in + } + if in.RequirementsS3ObjectVersion != nil { + in, out := &in.RequirementsS3ObjectVersion, &out.RequirementsS3ObjectVersion + *out = new(string) + **out = **in + } + if in.RequirementsS3Path != nil { + in, out := &in.RequirementsS3Path, &out.RequirementsS3Path + *out = new(string) + **out = **in + } + if in.Schedulers != nil { + in, out := &in.Schedulers, &out.Schedulers + *out = new(float64) + **out = **in + } + if in.ServiceRoleArn != nil { + in, out := &in.ServiceRoleArn, &out.ServiceRoleArn + *out = new(string) + **out = **in + } + if in.SourceBucketArn != nil { + in, out := &in.SourceBucketArn, &out.SourceBucketArn + *out = new(string) + **out = **in + } + if in.StartupScriptS3ObjectVersion != nil { + in, out := &in.StartupScriptS3ObjectVersion, &out.StartupScriptS3ObjectVersion + *out = new(string) + **out = **in + } + if in.StartupScriptS3Path != nil { + in, out := &in.StartupScriptS3Path, &out.StartupScriptS3Path + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TagsAll != nil { + in, out := &in.TagsAll, &out.TagsAll + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebserverAccessMode != nil { + in, out := &in.WebserverAccessMode, &out.WebserverAccessMode + *out = new(string) + **out = **in + } + if in.WebserverURL != nil { + in, out := &in.WebserverURL, &out.WebserverURL + *out = new(string) + **out = **in + } + if in.WebserverVPCEndpointService != nil { + in, out := &in.WebserverVPCEndpointService, &out.WebserverVPCEndpointService + *out = new(string) + **out = **in + } + if in.WeeklyMaintenanceWindowStart != nil { + in, out := &in.WeeklyMaintenanceWindowStart, &out.WeeklyMaintenanceWindowStart + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentObservation. +func (in *EnvironmentObservation) DeepCopy() *EnvironmentObservation { + if in == nil { + return nil + } + out := new(EnvironmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentParameters) DeepCopyInto(out *EnvironmentParameters) { + *out = *in + if in.AirflowConfigurationOptionsSecretRef != nil { + in, out := &in.AirflowConfigurationOptionsSecretRef, &out.AirflowConfigurationOptionsSecretRef + *out = new(v1.SecretReference) + **out = **in + } + if in.AirflowVersion != nil { + in, out := &in.AirflowVersion, &out.AirflowVersion + *out = new(string) + **out = **in + } + if in.DagS3Path != nil { + in, out := &in.DagS3Path, &out.DagS3Path + *out = new(string) + **out = **in + } + if in.EndpointManagement != nil { + in, out := &in.EndpointManagement, &out.EndpointManagement + *out = new(string) + **out = **in + } + if in.EnvironmentClass != nil { + in, out := &in.EnvironmentClass, &out.EnvironmentClass + *out = new(string) + **out = **in + } + if in.ExecutionRoleArn != nil { + in, out := &in.ExecutionRoleArn, &out.ExecutionRoleArn + *out = new(string) + **out = **in + } + if in.ExecutionRoleArnRef != nil { + in, out := &in.ExecutionRoleArnRef, &out.ExecutionRoleArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExecutionRoleArnSelector != nil { + in, out := &in.ExecutionRoleArnSelector, &out.ExecutionRoleArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.KMSKeyRef != nil { + in, out := &in.KMSKeyRef, &out.KMSKeyRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KMSKeySelector != nil { + in, out := &in.KMSKeySelector, &out.KMSKeySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LoggingConfiguration != nil { + in, out := &in.LoggingConfiguration, &out.LoggingConfiguration + *out = make([]LoggingConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxWorkers != nil { + in, out := &in.MaxWorkers, &out.MaxWorkers + *out = new(float64) + **out = **in + } + if in.MinWorkers != nil { + in, out := &in.MinWorkers, &out.MinWorkers + *out = new(float64) + **out = **in + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = make([]NetworkConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PluginsS3ObjectVersion != nil { + in, out := &in.PluginsS3ObjectVersion, &out.PluginsS3ObjectVersion + *out = new(string) + **out = **in + } + if in.PluginsS3Path != nil { + in, out := &in.PluginsS3Path, &out.PluginsS3Path + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RequirementsS3ObjectVersion != nil { + in, out := &in.RequirementsS3ObjectVersion, &out.RequirementsS3ObjectVersion + *out = new(string) + **out = **in + } + if in.RequirementsS3Path != nil { + in, out := &in.RequirementsS3Path, &out.RequirementsS3Path + *out = new(string) + **out = **in + } + if in.Schedulers != nil { + in, out := &in.Schedulers, &out.Schedulers + *out = new(float64) + **out = **in + } + if in.SourceBucketArn != nil { + in, out := &in.SourceBucketArn, &out.SourceBucketArn + *out = new(string) + **out = **in + } + if in.SourceBucketArnRef != nil { + in, out := &in.SourceBucketArnRef, &out.SourceBucketArnRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceBucketArnSelector != nil { + in, out := &in.SourceBucketArnSelector, &out.SourceBucketArnSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartupScriptS3ObjectVersion != nil { + in, out := &in.StartupScriptS3ObjectVersion, &out.StartupScriptS3ObjectVersion + *out = new(string) + **out = **in + } + if in.StartupScriptS3Path != nil { + in, out := &in.StartupScriptS3Path, &out.StartupScriptS3Path + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebserverAccessMode != nil { + in, out := &in.WebserverAccessMode, &out.WebserverAccessMode + *out = new(string) + **out = **in + } + if in.WeeklyMaintenanceWindowStart != nil { + in, out := &in.WeeklyMaintenanceWindowStart, &out.WeeklyMaintenanceWindowStart + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentParameters. +func (in *EnvironmentParameters) DeepCopy() *EnvironmentParameters { + if in == nil { + return nil + } + out := new(EnvironmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentSpec) DeepCopyInto(out *EnvironmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentSpec. +func (in *EnvironmentSpec) DeepCopy() *EnvironmentSpec { + if in == nil { + return nil + } + out := new(EnvironmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentStatus) DeepCopyInto(out *EnvironmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentStatus. +func (in *EnvironmentStatus) DeepCopy() *EnvironmentStatus { + if in == nil { + return nil + } + out := new(EnvironmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorInitParameters) DeepCopyInto(out *ErrorInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorInitParameters. +func (in *ErrorInitParameters) DeepCopy() *ErrorInitParameters { + if in == nil { + return nil + } + out := new(ErrorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorObservation) DeepCopyInto(out *ErrorObservation) { + *out = *in + if in.ErrorCode != nil { + in, out := &in.ErrorCode, &out.ErrorCode + *out = new(string) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorObservation. +func (in *ErrorObservation) DeepCopy() *ErrorObservation { + if in == nil { + return nil + } + out := new(ErrorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorParameters) DeepCopyInto(out *ErrorParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorParameters. +func (in *ErrorParameters) DeepCopy() *ErrorParameters { + if in == nil { + return nil + } + out := new(ErrorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastUpdatedInitParameters) DeepCopyInto(out *LastUpdatedInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastUpdatedInitParameters. +func (in *LastUpdatedInitParameters) DeepCopy() *LastUpdatedInitParameters { + if in == nil { + return nil + } + out := new(LastUpdatedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastUpdatedObservation) DeepCopyInto(out *LastUpdatedObservation) { + *out = *in + if in.CreatedAt != nil { + in, out := &in.CreatedAt, &out.CreatedAt + *out = new(string) + **out = **in + } + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = make([]ErrorObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastUpdatedObservation. +func (in *LastUpdatedObservation) DeepCopy() *LastUpdatedObservation { + if in == nil { + return nil + } + out := new(LastUpdatedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastUpdatedParameters) DeepCopyInto(out *LastUpdatedParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastUpdatedParameters. +func (in *LastUpdatedParameters) DeepCopy() *LastUpdatedParameters { + if in == nil { + return nil + } + out := new(LastUpdatedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationInitParameters) DeepCopyInto(out *LoggingConfigurationInitParameters) { + *out = *in + if in.DagProcessingLogs != nil { + in, out := &in.DagProcessingLogs, &out.DagProcessingLogs + *out = make([]DagProcessingLogsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulerLogs != nil { + in, out := &in.SchedulerLogs, &out.SchedulerLogs + *out = make([]SchedulerLogsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskLogs != nil { + in, out := &in.TaskLogs, &out.TaskLogs + *out = make([]TaskLogsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebserverLogs != nil { + in, out := &in.WebserverLogs, &out.WebserverLogs + *out = make([]WebserverLogsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkerLogs != nil { + in, out := &in.WorkerLogs, &out.WorkerLogs + *out = make([]WorkerLogsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationInitParameters. +func (in *LoggingConfigurationInitParameters) DeepCopy() *LoggingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationObservation) DeepCopyInto(out *LoggingConfigurationObservation) { + *out = *in + if in.DagProcessingLogs != nil { + in, out := &in.DagProcessingLogs, &out.DagProcessingLogs + *out = make([]DagProcessingLogsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulerLogs != nil { + in, out := &in.SchedulerLogs, &out.SchedulerLogs + *out = make([]SchedulerLogsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskLogs != nil { + in, out := &in.TaskLogs, &out.TaskLogs + *out = make([]TaskLogsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebserverLogs != nil { + in, out := &in.WebserverLogs, &out.WebserverLogs + *out = make([]WebserverLogsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkerLogs != nil { + in, out := &in.WorkerLogs, &out.WorkerLogs + *out = make([]WorkerLogsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationObservation. +func (in *LoggingConfigurationObservation) DeepCopy() *LoggingConfigurationObservation { + if in == nil { + return nil + } + out := new(LoggingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigurationParameters) DeepCopyInto(out *LoggingConfigurationParameters) { + *out = *in + if in.DagProcessingLogs != nil { + in, out := &in.DagProcessingLogs, &out.DagProcessingLogs + *out = make([]DagProcessingLogsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchedulerLogs != nil { + in, out := &in.SchedulerLogs, &out.SchedulerLogs + *out = make([]SchedulerLogsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskLogs != nil { + in, out := &in.TaskLogs, &out.TaskLogs + *out = make([]TaskLogsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebserverLogs != nil { + in, out := &in.WebserverLogs, &out.WebserverLogs + *out = make([]WebserverLogsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkerLogs != nil { + in, out := &in.WorkerLogs, &out.WorkerLogs + *out = make([]WorkerLogsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigurationParameters. +func (in *LoggingConfigurationParameters) DeepCopy() *LoggingConfigurationParameters { + if in == nil { + return nil + } + out := new(LoggingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationInitParameters) DeepCopyInto(out *NetworkConfigurationInitParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationInitParameters. +func (in *NetworkConfigurationInitParameters) DeepCopy() *NetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationObservation) DeepCopyInto(out *NetworkConfigurationObservation) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationObservation. +func (in *NetworkConfigurationObservation) DeepCopy() *NetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(NetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfigurationParameters) DeepCopyInto(out *NetworkConfigurationParameters) { + *out = *in + if in.SecurityGroupIds != nil { + in, out := &in.SecurityGroupIds, &out.SecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetIds != nil { + in, out := &in.SubnetIds, &out.SubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfigurationParameters. +func (in *NetworkConfigurationParameters) DeepCopy() *NetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(NetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerLogsInitParameters) DeepCopyInto(out *SchedulerLogsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerLogsInitParameters. +func (in *SchedulerLogsInitParameters) DeepCopy() *SchedulerLogsInitParameters { + if in == nil { + return nil + } + out := new(SchedulerLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerLogsObservation) DeepCopyInto(out *SchedulerLogsObservation) { + *out = *in + if in.CloudWatchLogGroupArn != nil { + in, out := &in.CloudWatchLogGroupArn, &out.CloudWatchLogGroupArn + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerLogsObservation. +func (in *SchedulerLogsObservation) DeepCopy() *SchedulerLogsObservation { + if in == nil { + return nil + } + out := new(SchedulerLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerLogsParameters) DeepCopyInto(out *SchedulerLogsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerLogsParameters. +func (in *SchedulerLogsParameters) DeepCopy() *SchedulerLogsParameters { + if in == nil { + return nil + } + out := new(SchedulerLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskLogsInitParameters) DeepCopyInto(out *TaskLogsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskLogsInitParameters. +func (in *TaskLogsInitParameters) DeepCopy() *TaskLogsInitParameters { + if in == nil { + return nil + } + out := new(TaskLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskLogsObservation) DeepCopyInto(out *TaskLogsObservation) { + *out = *in + if in.CloudWatchLogGroupArn != nil { + in, out := &in.CloudWatchLogGroupArn, &out.CloudWatchLogGroupArn + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskLogsObservation. +func (in *TaskLogsObservation) DeepCopy() *TaskLogsObservation { + if in == nil { + return nil + } + out := new(TaskLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskLogsParameters) DeepCopyInto(out *TaskLogsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskLogsParameters. +func (in *TaskLogsParameters) DeepCopy() *TaskLogsParameters { + if in == nil { + return nil + } + out := new(TaskLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebserverLogsInitParameters) DeepCopyInto(out *WebserverLogsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebserverLogsInitParameters. +func (in *WebserverLogsInitParameters) DeepCopy() *WebserverLogsInitParameters { + if in == nil { + return nil + } + out := new(WebserverLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebserverLogsObservation) DeepCopyInto(out *WebserverLogsObservation) { + *out = *in + if in.CloudWatchLogGroupArn != nil { + in, out := &in.CloudWatchLogGroupArn, &out.CloudWatchLogGroupArn + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebserverLogsObservation. +func (in *WebserverLogsObservation) DeepCopy() *WebserverLogsObservation { + if in == nil { + return nil + } + out := new(WebserverLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebserverLogsParameters) DeepCopyInto(out *WebserverLogsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebserverLogsParameters. +func (in *WebserverLogsParameters) DeepCopy() *WebserverLogsParameters { + if in == nil { + return nil + } + out := new(WebserverLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerLogsInitParameters) DeepCopyInto(out *WorkerLogsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerLogsInitParameters. +func (in *WorkerLogsInitParameters) DeepCopy() *WorkerLogsInitParameters { + if in == nil { + return nil + } + out := new(WorkerLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerLogsObservation) DeepCopyInto(out *WorkerLogsObservation) { + *out = *in + if in.CloudWatchLogGroupArn != nil { + in, out := &in.CloudWatchLogGroupArn, &out.CloudWatchLogGroupArn + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerLogsObservation. +func (in *WorkerLogsObservation) DeepCopy() *WorkerLogsObservation { + if in == nil { + return nil + } + out := new(WorkerLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerLogsParameters) DeepCopyInto(out *WorkerLogsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerLogsParameters. +func (in *WorkerLogsParameters) DeepCopy() *WorkerLogsParameters { + if in == nil { + return nil + } + out := new(WorkerLogsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/mwaa/v1beta1/zz_generated.managed.go b/apis/mwaa/v1beta1/zz_generated.managed.go new file mode 100644 index 0000000000..fb4c761e3b --- /dev/null +++ b/apis/mwaa/v1beta1/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Environment. +func (mg *Environment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Environment. +func (mg *Environment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Environment. +func (mg *Environment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Environment. +func (mg *Environment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Environment. +func (mg *Environment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Environment. +func (mg *Environment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Environment. +func (mg *Environment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Environment. +func (mg *Environment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Environment. +func (mg *Environment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Environment. +func (mg *Environment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Environment. +func (mg *Environment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Environment. +func (mg *Environment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/mwaa/v1beta1/zz_generated.managedlist.go b/apis/mwaa/v1beta1/zz_generated.managedlist.go new file mode 100644 index 0000000000..d1114b20bd --- /dev/null +++ b/apis/mwaa/v1beta1/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this EnvironmentList. +func (l *EnvironmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/mwaa/v1beta1/zz_generated.resolvers.go b/apis/mwaa/v1beta1/zz_generated.resolvers.go new file mode 100644 index 0000000000..5512384c26 --- /dev/null +++ b/apis/mwaa/v1beta1/zz_generated.resolvers.go @@ -0,0 +1,145 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + common "github.com/upbound/provider-aws/config/common" + apisresolver "github.com/upbound/provider-aws/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Environment) ResolveReferences( // ResolveReferences of this Environment. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.ForProvider.ExecutionRoleArnRef, + Selector: mg.Spec.ForProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExecutionRoleArn") + } + mg.Spec.ForProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExecutionRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KMSKey), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KMSKeyRef, + Selector: mg.Spec.ForProvider.KMSKeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KMSKey") + } + mg.Spec.ForProvider.KMSKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KMSKeyRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceBucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.ForProvider.SourceBucketArnRef, + Selector: mg.Spec.ForProvider.SourceBucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceBucketArn") + } + mg.Spec.ForProvider.SourceBucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceBucketArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("iam.aws.upbound.io", "v1beta1", "Role", "RoleList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExecutionRoleArn), + Extract: common.ARNExtractor(), + Reference: mg.Spec.InitProvider.ExecutionRoleArnRef, + Selector: mg.Spec.InitProvider.ExecutionRoleArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExecutionRoleArn") + } + mg.Spec.InitProvider.ExecutionRoleArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExecutionRoleArnRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kms.aws.upbound.io", "v1beta1", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KMSKey), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KMSKeyRef, + Selector: mg.Spec.InitProvider.KMSKeySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KMSKey") + } + mg.Spec.InitProvider.KMSKey = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KMSKeyRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("s3.aws.upbound.io", "v1beta1", "Bucket", "BucketList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceBucketArn), + Extract: resource.ExtractParamPath("arn", true), + Reference: mg.Spec.InitProvider.SourceBucketArnRef, + Selector: mg.Spec.InitProvider.SourceBucketArnSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceBucketArn") + } + mg.Spec.InitProvider.SourceBucketArn = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceBucketArnRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/mwaa/v1beta1/zz_groupversion_info.go b/apis/mwaa/v1beta1/zz_groupversion_info.go new file mode 100755 index 0000000000..0265dcc953 --- /dev/null +++ b/apis/mwaa/v1beta1/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=mwaa.aws.upbound.io +// +versionName=v1beta1 +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "mwaa.aws.upbound.io" + CRDVersion = "v1beta1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/zz_register.go b/apis/zz_register.go index 5f1163f003..6461ed951c 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -126,6 +126,7 @@ import ( v1beta1memorydb "github.com/upbound/provider-aws/apis/memorydb/v1beta1" v1alpha1 "github.com/upbound/provider-aws/apis/mq/v1alpha1" v1beta1mq "github.com/upbound/provider-aws/apis/mq/v1beta1" + v1beta1mwaa "github.com/upbound/provider-aws/apis/mwaa/v1beta1" v1beta1neptune "github.com/upbound/provider-aws/apis/neptune/v1beta1" v1beta1networkfirewall "github.com/upbound/provider-aws/apis/networkfirewall/v1beta1" v1beta1networkmanager "github.com/upbound/provider-aws/apis/networkmanager/v1beta1" @@ -301,6 +302,7 @@ func init() { v1beta1memorydb.SchemeBuilder.AddToScheme, v1alpha1.SchemeBuilder.AddToScheme, v1beta1mq.SchemeBuilder.AddToScheme, + v1beta1mwaa.SchemeBuilder.AddToScheme, v1beta1neptune.SchemeBuilder.AddToScheme, v1beta1networkfirewall.SchemeBuilder.AddToScheme, v1beta1networkmanager.SchemeBuilder.AddToScheme, diff --git a/cmd/provider/mwaa/zz_main.go b/cmd/provider/mwaa/zz_main.go new file mode 100644 index 0000000000..252518c4e5 --- /dev/null +++ b/cmd/provider/mwaa/zz_main.go @@ -0,0 +1,223 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "fmt" + "io" + "log" + "os" + "path/filepath" + "time" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/certificates" + xpcontroller "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane/crossplane-runtime/pkg/feature" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" + "gopkg.in/alecthomas/kingpin.v2" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection/resourcelock" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + "github.com/upbound/provider-aws/apis" + "github.com/upbound/provider-aws/apis/v1alpha1" + "github.com/upbound/provider-aws/config" + resolverapis "github.com/upbound/provider-aws/internal/apis" + "github.com/upbound/provider-aws/internal/clients" + "github.com/upbound/provider-aws/internal/controller" + "github.com/upbound/provider-aws/internal/features" +) + +const ( + webhookTLSCertDirEnvVar = "WEBHOOK_TLS_CERT_DIR" + tlsServerCertDirEnvVar = "TLS_SERVER_CERTS_DIR" + certsDirEnvVar = "CERTS_DIR" + tlsServerCertDir = "/tls/server" +) + +func deprecationAction(flagName string) kingpin.Action { + return func(c *kingpin.ParseContext) error { + _, err := fmt.Fprintf(os.Stderr, "warning: Command-line flag %q is deprecated and no longer used. It will be removed in a future release. Please remove it from all of your configurations (ControllerConfigs, etc.).\n", flagName) + kingpin.FatalIfError(err, "Failed to print the deprecation notice.") + return nil + } +} + +func main() { + var ( + app = kingpin.New(filepath.Base(os.Args[0]), "AWS support for Crossplane.").DefaultEnvars() + debug = app.Flag("debug", "Run with debug logging.").Short('d').Bool() + syncInterval = app.Flag("sync", "Sync interval controls how often all resources will be double checked for drift.").Short('s').Default("1h").Duration() + pollInterval = app.Flag("poll", "Poll interval controls how often an individual resource should be checked for drift.").Default("10m").Duration() + pollStateMetricInterval = app.Flag("poll-state-metric", "State metric recording interval").Default("5s").Duration() + leaderElection = app.Flag("leader-election", "Use leader election for the controller manager.").Short('l').Default("false").OverrideDefaultFromEnvar("LEADER_ELECTION").Bool() + maxReconcileRate = app.Flag("max-reconcile-rate", "The global maximum rate per second at which resources may be checked for drift from the desired state.").Default("100").Int() + + namespace = app.Flag("namespace", "Namespace used to set as default scope in default secret store config.").Default("crossplane-system").Envar("POD_NAMESPACE").String() + enableExternalSecretStores = app.Flag("enable-external-secret-stores", "Enable support for ExternalSecretStores.").Default("false").Envar("ENABLE_EXTERNAL_SECRET_STORES").Bool() + essTLSCertsPath = app.Flag("ess-tls-cert-dir", "Path of ESS TLS certificates.").Envar("ESS_TLS_CERTS_DIR").String() + enableManagementPolicies = app.Flag("enable-management-policies", "Enable support for Management Policies.").Default("true").Envar("ENABLE_MANAGEMENT_POLICIES").Bool() + + certsDirSet = false + // we record whether the command-line option "--certs-dir" was supplied + // in the registered PreAction for the flag. + certsDir = app.Flag("certs-dir", "The directory that contains the server key and certificate.").Default(tlsServerCertDir).Envar(certsDirEnvVar).PreAction(func(_ *kingpin.ParseContext) error { + certsDirSet = true + return nil + }).String() + + // now deprecated command-line arguments with the Terraform SDK-based upjet architecture + _ = app.Flag("provider-ttl", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] TTL for the native plugin processes before they are replaced. Changing the default may increase memory consumption.").Hidden().Action(deprecationAction("provider-ttl")).Int() + _ = app.Flag("terraform-version", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform version.").Envar("TERRAFORM_VERSION").Hidden().Action(deprecationAction("terraform-version")).String() + _ = app.Flag("terraform-provider-version", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform provider version.").Envar("TERRAFORM_PROVIDER_VERSION").Hidden().Action(deprecationAction("terraform-provider-version")).String() + _ = app.Flag("terraform-native-provider-path", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform native provider path for shared execution.").Envar("TERRAFORM_NATIVE_PROVIDER_PATH").Hidden().Action(deprecationAction("terraform-native-provider-path")).String() + _ = app.Flag("terraform-provider-source", "[DEPRECATED: This option is no longer used and it will be removed in a future release.] Terraform provider source.").Envar("TERRAFORM_PROVIDER_SOURCE").Hidden().Action(deprecationAction("terraform-provider-source")).String() + ) + kingpin.MustParse(app.Parse(os.Args[1:])) + log.Default().SetOutput(io.Discard) + ctrl.SetLogger(zap.New(zap.WriteTo(io.Discard))) + + zl := zap.New(zap.UseDevMode(*debug)) + logr := logging.NewLogrLogger(zl.WithName("provider-aws")) + if *debug { + // The controller-runtime runs with a no-op logger by default. It is + // *very* verbose even at info level, so we only provide it a real + // logger when we're running in debug mode. + ctrl.SetLogger(zl) + } + + // currently, we configure the jitter to be the 5% of the poll interval + pollJitter := time.Duration(float64(*pollInterval) * 0.05) + logr.Debug("Starting", "sync-interval", syncInterval.String(), + "poll-interval", pollInterval.String(), "poll-jitter", pollJitter, "max-reconcile-rate", *maxReconcileRate) + + cfg, err := ctrl.GetConfig() + kingpin.FatalIfError(err, "Cannot get API server rest config") + + // Get the TLS certs directory from the environment variables set by + // Crossplane if they're available. + // In older XP versions we used WEBHOOK_TLS_CERT_DIR, in newer versions + // we use TLS_SERVER_CERTS_DIR. If an explicit certs dir is not supplied + // via the command-line options, then these environment variables are used + // instead. + if !certsDirSet { + // backwards-compatibility concerns + xpCertsDir := os.Getenv(certsDirEnvVar) + if xpCertsDir == "" { + xpCertsDir = os.Getenv(tlsServerCertDirEnvVar) + } + if xpCertsDir == "" { + xpCertsDir = os.Getenv(webhookTLSCertDirEnvVar) + } + // we probably don't need this condition but just to be on the + // safe side, if we are missing any kingpin machinery details... + if xpCertsDir != "" { + *certsDir = xpCertsDir + } + } + + mgr, err := ctrl.NewManager(ratelimiter.LimitRESTConfig(cfg, *maxReconcileRate), ctrl.Options{ + LeaderElection: *leaderElection, + LeaderElectionID: "crossplane-leader-election-provider-aws-mwaa", + Cache: cache.Options{ + SyncPeriod: syncInterval, + }, + WebhookServer: webhook.NewServer( + webhook.Options{ + CertDir: *certsDir, + }), + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + LeaseDuration: func() *time.Duration { d := 60 * time.Second; return &d }(), + RenewDeadline: func() *time.Duration { d := 50 * time.Second; return &d }(), + }) + kingpin.FatalIfError(err, "Cannot create controller manager") + kingpin.FatalIfError(apis.AddToScheme(mgr.GetScheme()), "Cannot add AWS APIs to scheme") + kingpin.FatalIfError(resolverapis.BuildScheme(apis.AddToSchemes), "Cannot register the AWS APIs with the API resolver's runtime scheme") + + metricRecorder := managed.NewMRMetricRecorder() + stateMetrics := statemetrics.NewMRStateMetrics() + + metrics.Registry.MustRegister(metricRecorder) + metrics.Registry.MustRegister(stateMetrics) + + ctx := context.Background() + provider, err := config.GetProvider(ctx, false) + kingpin.FatalIfError(err, "Cannot initialize the provider configuration") + setupConfig := &clients.SetupConfig{ + Logger: logr, + TerraformProvider: provider.TerraformProvider, + } + o := tjcontroller.Options{ + Options: xpcontroller.Options{ + Logger: logr, + GlobalRateLimiter: ratelimiter.NewGlobal(*maxReconcileRate), + PollInterval: *pollInterval, + MaxConcurrentReconciles: *maxReconcileRate, + Features: &feature.Flags{}, + MetricOptions: &xpcontroller.MetricOptions{ + PollStateMetricInterval: *pollStateMetricInterval, + MRMetrics: metricRecorder, + MRStateMetrics: stateMetrics, + }, + }, + Provider: provider, + SetupFn: clients.SelectTerraformSetup(setupConfig), + PollJitter: pollJitter, + OperationTrackerStore: tjcontroller.NewOperationStore(logr), + StartWebhooks: *certsDir != "", + } + + if *enableManagementPolicies { + o.Features.Enable(features.EnableBetaManagementPolicies) + logr.Info("Beta feature enabled", "flag", features.EnableBetaManagementPolicies) + } + + if *enableExternalSecretStores { + o.SecretStoreConfigGVK = &v1alpha1.StoreConfigGroupVersionKind + logr.Info("Alpha feature enabled", "flag", features.EnableAlphaExternalSecretStores) + + o.ESSOptions = &tjcontroller.ESSOptions{} + if *essTLSCertsPath != "" { + logr.Info("ESS TLS certificates path is set. Loading mTLS configuration.") + tCfg, err := certificates.LoadMTLSConfig(filepath.Join(*essTLSCertsPath, "ca.crt"), filepath.Join(*essTLSCertsPath, "tls.crt"), filepath.Join(*essTLSCertsPath, "tls.key"), false) + kingpin.FatalIfError(err, "Cannot load ESS TLS config.") + + o.ESSOptions.TLSConfig = tCfg + } + + // Ensure default store config exists. + kingpin.FatalIfError(resource.Ignore(kerrors.IsAlreadyExists, mgr.GetClient().Create(ctx, &v1alpha1.StoreConfig{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: v1alpha1.StoreConfigSpec{ + // NOTE(turkenh): We only set required spec and expect optional + // ones to properly be initialized with CRD level default values. + SecretStoreConfig: xpv1.SecretStoreConfig{ + DefaultScope: *namespace, + }, + }, + Status: v1alpha1.StoreConfigStatus{}, + })), "cannot create default store config") + } + + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") + kingpin.FatalIfError(controller.Setup_mwaa(mgr, o), "Cannot setup AWS controllers") + kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") +} diff --git a/config/externalname.go b/config/externalname.go index 80abdf2e54..7522df4f44 100644 --- a/config/externalname.go +++ b/config/externalname.go @@ -77,6 +77,11 @@ var TerraformPluginFrameworkExternalNameConfigs = map[string]config.ExternalName // belonging to Terraform Plugin SDKv2 resources to be reconciled // under the no-fork architecture for this provider. var TerraformPluginSDKExternalNameConfigs = map[string]config.ExternalName{ + // mwaa + // + // mwaa_environment can be imported using the name + "aws_mwaa_environment": config.NameAsIdentifier, + // ACM // Imported using ARN that has a random substring: // arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a diff --git a/config/generated.lst b/config/generated.lst index 4fb69ab319..fa5a8082f1 100644 --- a/config/generated.lst +++ b/config/generated.lst @@ -588,6 +588,7 @@ "aws_mskconnect_connector", "aws_mskconnect_custom_plugin", "aws_mskconnect_worker_configuration", +"aws_mwaa_environment", "aws_nat_gateway", "aws_neptune_cluster", "aws_neptune_cluster_endpoint", diff --git a/config/mwaa/config.go b/config/mwaa/config.go new file mode 100644 index 0000000000..f9ce6065a9 --- /dev/null +++ b/config/mwaa/config.go @@ -0,0 +1,35 @@ +package mwaa + +import ( + "github.com/crossplane/upjet/pkg/config" + "github.com/upbound/provider-aws/config/common" +) + +// Configure adds configurations for the sagemaker group. +func Configure(p *config.Provider) { + + p.AddResourceConfigurator("aws_mwaa_environment", func(r *config.Resource) { + r.References = config.References{ + "execution_role_arn": config.Reference{ + TerraformName: "aws_iam_role", + Extractor: common.PathARNExtractor, + }, + "network_configuration.subnet_ids": config.Reference{ + TerraformName: "aws_subnet", + RefFieldName: "SubnetIDRefs", + SelectorFieldName: "SubnetIDSelector", + }, + "network_configuration.security_groups": config.Reference{ + TerraformName: "aws_security_group", + RefFieldName: "SecurityGroupRefs", + SelectorFieldName: "SecurityGroupSelector", + }, + "source_bucket_arn": config.Reference{ + TerraformName: "aws_s3_bucket", + }, + } + r.UseAsync = true + + }) + +} diff --git a/config/registry.go b/config/registry.go index 6c6b5bae86..d55e26d829 100644 --- a/config/registry.go +++ b/config/registry.go @@ -33,9 +33,9 @@ var ( ) var skipList = []string{ - "aws_waf_rule_group$", // Too big CRD schema - "aws_wafregional_rule_group$", // Too big CRD schema - "aws_mwaa_environment$", // See https://github.com/crossplane-contrib/terrajet/issues/100 + "aws_waf_rule_group$", // Too big CRD schema + "aws_wafregional_rule_group$", // Too big CRD schema + "aws_ecs_tag$", // tags are already managed by ecs resources. "aws_alb$", // identical with aws_lb "aws_alb_target_group_attachment$", // identical with aws_lb_target_group_attachment diff --git a/examples-generated/mwaa/v1beta1/environment.yaml b/examples-generated/mwaa/v1beta1/environment.yaml new file mode 100644 index 0000000000..b9451b8bc6 --- /dev/null +++ b/examples-generated/mwaa/v1beta1/environment.yaml @@ -0,0 +1,22 @@ +apiVersion: mwaa.aws.upbound.io/v1beta1 +kind: Environment +metadata: + annotations: + meta.upbound.io/example-id: mwaa/v1beta1/environment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dagS3Path: dags/ + executionRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + networkConfiguration: + - securityGroupIds: + - ${aws_security_group.example.id} + subnetIds: ${aws_subnet.private[*].id} + region: us-west-1 + sourceBucketArnSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples/mwaa/environment.yaml b/examples/mwaa/environment.yaml new file mode 100644 index 0000000000..7f7fe2edef --- /dev/null +++ b/examples/mwaa/environment.yaml @@ -0,0 +1,272 @@ +apiVersion: mwaa.aws.upbound.io/v1beta1 +kind: Environment +metadata: + annotations: + meta.upbound.io/example-id: mwaa/v1beta1/environment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example + dagS3Path: dags/ + executionRoleArnSelector: + matchLabels: + testing.upbound.io/example-name: example + networkConfiguration: + - securityGroupIdRefs: + - name: example + subnetIdRefs: + - name: sample-subnet1 + - name: sample-subnet2 + region: us-west-1 + sourceBucketArnSelector: + matchLabels: + testing.upbound.io/example-name: versioning-example + +--- + +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Role +metadata: + annotations: + meta.upbound.io/example-id: iam/v1beta1/role + labels: + testing.upbound.io/example-name: role + name: sample-role +spec: + forProvider: + assumeRolePolicy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } +--- +apiVersion: iam.aws.upbound.io/v1beta1 +kind: RolePolicyAttachment +metadata: + annotations: + meta.upbound.io/example-id: iam/v1beta1/role + labels: + testing.upbound.io/example-name: role + name: sample-policy-attachment +spec: + forProvider: + policyArnSelector: + matchLabels: + testing.upbound.io/example-name: role + roleSelector: + matchLabels: + testing.upbound.io/example-name: role +--- +apiVersion: iam.aws.upbound.io/v1beta1 +kind: Policy +metadata: + annotations: + meta.upbound.io/example-id: iam/v1beta1/role + labels: + testing.upbound.io/example-name: role + name: sample-user-policy +spec: + forProvider: + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "airflow:PublishMetrics", + "Resource": "arn:aws:airflow:us-west-1:${data.aws_account_id}:environment/example" + }, + { + "Effect": "Deny", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*" + ], + "Resource": [ + "arn:aws:s3:::versioning-example", + "arn:aws:s3:::versioning-example/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogStream", + "logs:CreateLogGroup", + "logs:PutLogEvents", + "logs:GetLogEvents", + "logs:GetLogRecord", + "logs:GetLogGroupFields", + "logs:GetQueryResults" + ], + "Resource": [ + "arn:aws:logs:us-west-1:${data.aws_account_id}:log-group:airflow-example-*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "logs:DescribeLogGroups" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetAccountPublicAccessBlock" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": "cloudwatch:PutMetricData", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "sqs:ChangeMessageVisibility", + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage", + "sqs:SendMessage" + ], + "Resource": "arn:aws:sqs:us-west-1:*:airflow-celery-*" + }, + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:DescribeKey", + "kms:GenerateDataKey*", + "kms:Encrypt" + ], + "NotResource": "arn:aws:kms:*:${data.aws_account_id}:key/*", + "Condition": { + "StringLike": { + "kms:ViaService": [ + "sqs.us-west-1.amazonaws.com" + ] + } + } + } + ] + } + +--- + +apiVersion: s3.aws.upbound.io/v1beta1 +kind: BucketVersioning +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta1/bucketversioning + labels: + testing.upbound.io/example-name: versioning_example + name: versioning-example +spec: + forProvider: + bucketSelector: + matchLabels: + testing.upbound.io/example-name: example + region: us-west-1 + versioningConfiguration: + status: Enabled + +--- + +apiVersion: s3.aws.upbound.io/v1beta1 +kind: Bucket +metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta1/bucket + labels: + testing.upbound.io/example-name: example + name: example-${Rand.RFC1123Subdomain} +spec: + forProvider: + region: us-west-1 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta1/securitygroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + region: us-west-1 + description: Allow TLS inbound traffic + name: allow_tls + tags: + Name: allow_tls + vpcIdRef: + name: sample-vpc + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + name: sample-subnet1 +spec: + forProvider: + region: us-west-1 + availabilityZone: us-west-1b + vpcIdRef: + name: sample-vpc + cidrBlock: 172.16.10.0/24 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: Subnet +metadata: + name: sample-subnet2 +spec: + forProvider: + region: us-west-1 + availabilityZone: us-west-1c + vpcIdRef: + name: sample-vpc + cidrBlock: 172.16.11.0/24 + +--- + +apiVersion: ec2.aws.upbound.io/v1beta1 +kind: VPC +metadata: + annotations: + meta.upbound.io/example-id: ec2/v1beta1/vpc + labels: + testing.upbound.io/example-name: example + name: sample-vpc +spec: + forProvider: + region: us-west-1 + cidrBlock: 172.16.0.0/16 + tags: + Name: DemoVpc diff --git a/internal/controller/mwaa/environment/zz_controller.go b/internal/controller/mwaa/environment/zz_controller.go new file mode 100755 index 0000000000..9284448c64 --- /dev/null +++ b/internal/controller/mwaa/environment/zz_controller.go @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package environment + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-aws/apis/mwaa/v1beta1" + features "github.com/upbound/provider-aws/internal/features" +) + +// Setup adds a controller that reconciles Environment managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.Environment_GroupVersionKind.String()) + var initializers managed.InitializerChain + for _, i := range o.Provider.Resources["aws_mwaa_environment"].InitializerFns { + initializers = append(initializers, i(mgr.GetClient())) + } + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1beta1.Environment_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.Environment_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["aws_mwaa_environment"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1beta1.Environment_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1beta1.Environment + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta1.Environment{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1beta1.Environment") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1beta1.EnvironmentList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1beta1.EnvironmentList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1beta1.Environment_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1beta1.Environment{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_monolith_setup.go b/internal/controller/zz_monolith_setup.go index ce1dfa73e5..6e00d782ec 100755 --- a/internal/controller/zz_monolith_setup.go +++ b/internal/controller/zz_monolith_setup.go @@ -633,6 +633,7 @@ import ( broker "github.com/upbound/provider-aws/internal/controller/mq/broker" configurationmq "github.com/upbound/provider-aws/internal/controller/mq/configuration" usermq "github.com/upbound/provider-aws/internal/controller/mq/user" + environmentmwaa "github.com/upbound/provider-aws/internal/controller/mwaa/environment" clusterneptune "github.com/upbound/provider-aws/internal/controller/neptune/cluster" clusterendpoint "github.com/upbound/provider-aws/internal/controller/neptune/clusterendpoint" clusterinstanceneptune "github.com/upbound/provider-aws/internal/controller/neptune/clusterinstance" @@ -1579,6 +1580,7 @@ func Setup_monolith(mgr ctrl.Manager, o controller.Options) error { broker.Setup, configurationmq.Setup, usermq.Setup, + environmentmwaa.Setup, clusterneptune.Setup, clusterendpoint.Setup, clusterinstanceneptune.Setup, diff --git a/internal/controller/zz_mwaa_setup.go b/internal/controller/zz_mwaa_setup.go new file mode 100755 index 0000000000..390ac8a913 --- /dev/null +++ b/internal/controller/zz_mwaa_setup.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/crossplane/upjet/pkg/controller" + + environment "github.com/upbound/provider-aws/internal/controller/mwaa/environment" +) + +// Setup_mwaa creates all controllers with the supplied logger and adds them to +// the supplied manager. +func Setup_mwaa(mgr ctrl.Manager, o controller.Options) error { + for _, setup := range []func(ctrl.Manager, controller.Options) error{ + environment.Setup, + } { + if err := setup(mgr, o); err != nil { + return err + } + } + return nil +} diff --git a/package/crds/mwaa.aws.upbound.io_environments.yaml b/package/crds/mwaa.aws.upbound.io_environments.yaml new file mode 100644 index 0000000000..76f1522c5f --- /dev/null +++ b/package/crds/mwaa.aws.upbound.io_environments.yaml @@ -0,0 +1,1492 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: environments.mwaa.aws.upbound.io +spec: + group: mwaa.aws.upbound.io + names: + categories: + - crossplane + - managed + - aws + kind: Environment + listKind: EnvironmentList + plural: environments + singular: environment + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Environment is the Schema for the Environments API. Creates a + MWAA Environment + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EnvironmentSpec defines the desired state of Environment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + airflowConfigurationOptionsSecretRef: + description: The airflow_configuration_options parameter specifies + airflow override options. Check the Official documentation for + all possible configuration options. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + airflowVersion: + description: Airflow version of your environment, will be set + by default to the latest version that MWAA supports. + type: string + dagS3Path: + description: The relative path to the DAG folder on your Amazon + S3 storage bucket. For example, dags. For more information, + see Importing DAGs on Amazon MWAA. + type: string + endpointManagement: + type: string + environmentClass: + description: Environment class for the cluster. Possible options + are mw1.small, mw1.medium, mw1.large. Will be set by default + to mw1.small. Please check the AWS Pricing for more information + about the environment classes. + type: string + executionRoleArn: + description: The Amazon Resource Name (ARN) of the task execution + role that the Amazon MWAA and its environment can assume. Check + the official AWS documentation for the detailed role specification. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsKey: + description: The Amazon Resource Name (ARN) of your KMS key that + you want to use for encryption. Will be set to the ARN of the + managed KMS key aws/airflow by default. Please check the Official + Documentation for more information. + type: string + kmsKeyRef: + description: Reference to a Key in kms to populate kmsKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeySelector: + description: Selector for a Key in kms to populate kmsKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + loggingConfiguration: + description: The Apache Airflow logs you want to send to Amazon + CloudWatch Logs. + items: + properties: + dagProcessingLogs: + description: Log configuration options for processing DAGs. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + schedulerLogs: + description: Log configuration options for the schedulers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + taskLogs: + description: Log configuration options for DAG tasks. See + Module logging configuration for more information. Enabled + by default with INFO log level. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + webserverLogs: + description: Log configuration options for the webservers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + workerLogs: + description: Log configuration options for the workers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + type: object + type: array + maxWorkers: + description: The maximum number of workers that can be automatically + scaled up. Value need to be between 1 and 25. Will be 10 by + default. + type: number + minWorkers: + description: The minimum number of workers that you want to run + in your environment. Will be 1 by default. + type: number + networkConfiguration: + description: Specifies the network configuration for your Apache + Airflow Environment. This includes two private subnets as well + as security groups for the Airflow environment. Each subnet + requires internet connection, otherwise the deployment will + fail. See Network configuration below for details. + items: + properties: + securityGroupIds: + description: Security groups IDs for the environment. At + least one of the security group needs to allow MWAA resources + to talk to each other, otherwise MWAA cannot be provisioned. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The private subnet IDs in which the environment + should be created. MWAA requires two subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + pluginsS3ObjectVersion: + description: The plugins.zip file version you want to use. + type: string + pluginsS3Path: + description: The relative path to the plugins.zip file on your + Amazon S3 storage bucket. For example, plugins.zip. If a relative + path is provided in the request, then plugins_s3_object_version + is required. For more information, see Importing DAGs on Amazon + MWAA. + type: string + region: + description: Region is the region you'd like your resource to + be created in. + type: string + requirementsS3ObjectVersion: + description: The requirements.txt file version you want to use. + type: string + requirementsS3Path: + description: The relative path to the requirements.txt file on + your Amazon S3 storage bucket. For example, requirements.txt. + If a relative path is provided in the request, then requirements_s3_object_version + is required. For more information, see Importing DAGs on Amazon + MWAA. + type: string + schedulers: + description: The number of schedulers that you want to run in + your environment. v2.0.2 and above accepts 2 - 5, default 2. + v1.10.12 accepts 1. + type: number + sourceBucketArn: + description: The Amazon Resource Name (ARN) of your Amazon S3 + storage bucket. For example, arn:aws:s3:::airflow-mybucketname. + type: string + sourceBucketArnRef: + description: Reference to a Bucket in s3 to populate sourceBucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceBucketArnSelector: + description: Selector for a Bucket in s3 to populate sourceBucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startupScriptS3ObjectVersion: + description: The version of the startup shell script you want + to use. You must specify the version ID that Amazon S3 assigns + to the file every time you update the script. + type: string + startupScriptS3Path: + description: The relative path to the script hosted in your bucket. + The script runs as your environment starts before starting the + Apache Airflow process. Use this script to install dependencies, + modify configuration options, and set environment variables. + See Using a startup script. Supported for environment versions + 2.x and later. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + webserverAccessMode: + description: 'Specifies whether the webserver should be accessible + over the internet or via your specified VPC. Possible options: + PRIVATE_ONLY (default) and PUBLIC_ONLY.' + type: string + weeklyMaintenanceWindowStart: + description: Specifies the start date for the weekly maintenance + window. + type: string + required: + - region + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + airflowConfigurationOptionsSecretRef: + additionalProperties: + type: string + type: object + airflowVersion: + description: Airflow version of your environment, will be set + by default to the latest version that MWAA supports. + type: string + dagS3Path: + description: The relative path to the DAG folder on your Amazon + S3 storage bucket. For example, dags. For more information, + see Importing DAGs on Amazon MWAA. + type: string + endpointManagement: + type: string + environmentClass: + description: Environment class for the cluster. Possible options + are mw1.small, mw1.medium, mw1.large. Will be set by default + to mw1.small. Please check the AWS Pricing for more information + about the environment classes. + type: string + executionRoleArn: + description: The Amazon Resource Name (ARN) of the task execution + role that the Amazon MWAA and its environment can assume. Check + the official AWS documentation for the detailed role specification. + type: string + executionRoleArnRef: + description: Reference to a Role in iam to populate executionRoleArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + executionRoleArnSelector: + description: Selector for a Role in iam to populate executionRoleArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kmsKey: + description: The Amazon Resource Name (ARN) of your KMS key that + you want to use for encryption. Will be set to the ARN of the + managed KMS key aws/airflow by default. Please check the Official + Documentation for more information. + type: string + kmsKeyRef: + description: Reference to a Key in kms to populate kmsKey. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kmsKeySelector: + description: Selector for a Key in kms to populate kmsKey. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + loggingConfiguration: + description: The Apache Airflow logs you want to send to Amazon + CloudWatch Logs. + items: + properties: + dagProcessingLogs: + description: Log configuration options for processing DAGs. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + schedulerLogs: + description: Log configuration options for the schedulers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + taskLogs: + description: Log configuration options for DAG tasks. See + Module logging configuration for more information. Enabled + by default with INFO log level. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + webserverLogs: + description: Log configuration options for the webservers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + workerLogs: + description: Log configuration options for the workers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + type: object + type: array + maxWorkers: + description: The maximum number of workers that can be automatically + scaled up. Value need to be between 1 and 25. Will be 10 by + default. + type: number + minWorkers: + description: The minimum number of workers that you want to run + in your environment. Will be 1 by default. + type: number + networkConfiguration: + description: Specifies the network configuration for your Apache + Airflow Environment. This includes two private subnets as well + as security groups for the Airflow environment. Each subnet + requires internet connection, otherwise the deployment will + fail. See Network configuration below for details. + items: + properties: + securityGroupIds: + description: Security groups IDs for the environment. At + least one of the security group needs to allow MWAA resources + to talk to each other, otherwise MWAA cannot be provisioned. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The private subnet IDs in which the environment + should be created. MWAA requires two subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + pluginsS3ObjectVersion: + description: The plugins.zip file version you want to use. + type: string + pluginsS3Path: + description: The relative path to the plugins.zip file on your + Amazon S3 storage bucket. For example, plugins.zip. If a relative + path is provided in the request, then plugins_s3_object_version + is required. For more information, see Importing DAGs on Amazon + MWAA. + type: string + requirementsS3ObjectVersion: + description: The requirements.txt file version you want to use. + type: string + requirementsS3Path: + description: The relative path to the requirements.txt file on + your Amazon S3 storage bucket. For example, requirements.txt. + If a relative path is provided in the request, then requirements_s3_object_version + is required. For more information, see Importing DAGs on Amazon + MWAA. + type: string + schedulers: + description: The number of schedulers that you want to run in + your environment. v2.0.2 and above accepts 2 - 5, default 2. + v1.10.12 accepts 1. + type: number + sourceBucketArn: + description: The Amazon Resource Name (ARN) of your Amazon S3 + storage bucket. For example, arn:aws:s3:::airflow-mybucketname. + type: string + sourceBucketArnRef: + description: Reference to a Bucket in s3 to populate sourceBucketArn. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceBucketArnSelector: + description: Selector for a Bucket in s3 to populate sourceBucketArn. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startupScriptS3ObjectVersion: + description: The version of the startup shell script you want + to use. You must specify the version ID that Amazon S3 assigns + to the file every time you update the script. + type: string + startupScriptS3Path: + description: The relative path to the script hosted in your bucket. + The script runs as your environment starts before starting the + Apache Airflow process. Use this script to install dependencies, + modify configuration options, and set environment variables. + See Using a startup script. Supported for environment versions + 2.x and later. + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + webserverAccessMode: + description: 'Specifies whether the webserver should be accessible + over the internet or via your specified VPC. Possible options: + PRIVATE_ONLY (default) and PUBLIC_ONLY.' + type: string + weeklyMaintenanceWindowStart: + description: Specifies the start date for the weekly maintenance + window. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dagS3Path is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dagS3Path) + || (has(self.initProvider) && has(self.initProvider.dagS3Path))' + - message: spec.forProvider.networkConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.networkConfiguration) + || (has(self.initProvider) && has(self.initProvider.networkConfiguration))' + status: + description: EnvironmentStatus defines the observed state of Environment. + properties: + atProvider: + properties: + airflowVersion: + description: Airflow version of your environment, will be set + by default to the latest version that MWAA supports. + type: string + arn: + description: The ARN of the MWAA Environment + type: string + createdAt: + description: The Created At date of the MWAA Environment + type: string + dagS3Path: + description: The relative path to the DAG folder on your Amazon + S3 storage bucket. For example, dags. For more information, + see Importing DAGs on Amazon MWAA. + type: string + databaseVpcEndpointService: + description: The VPC endpoint for the environment's Amazon RDS + database + type: string + endpointManagement: + type: string + environmentClass: + description: Environment class for the cluster. Possible options + are mw1.small, mw1.medium, mw1.large. Will be set by default + to mw1.small. Please check the AWS Pricing for more information + about the environment classes. + type: string + executionRoleArn: + description: The Amazon Resource Name (ARN) of the task execution + role that the Amazon MWAA and its environment can assume. Check + the official AWS documentation for the detailed role specification. + type: string + id: + type: string + kmsKey: + description: The Amazon Resource Name (ARN) of your KMS key that + you want to use for encryption. Will be set to the ARN of the + managed KMS key aws/airflow by default. Please check the Official + Documentation for more information. + type: string + lastUpdated: + items: + properties: + createdAt: + description: The Created At date of the MWAA Environment + type: string + error: + items: + properties: + errorCode: + type: string + errorMessage: + type: string + type: object + type: array + status: + description: The status of the Amazon MWAA Environment + type: string + type: object + type: array + loggingConfiguration: + description: The Apache Airflow logs you want to send to Amazon + CloudWatch Logs. + items: + properties: + dagProcessingLogs: + description: Log configuration options for processing DAGs. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + cloudWatchLogGroupArn: + description: Provides the ARN for the CloudWatch group + where the logs will be published + type: string + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + schedulerLogs: + description: Log configuration options for the schedulers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + cloudWatchLogGroupArn: + description: Provides the ARN for the CloudWatch group + where the logs will be published + type: string + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + taskLogs: + description: Log configuration options for DAG tasks. See + Module logging configuration for more information. Enabled + by default with INFO log level. + items: + properties: + cloudWatchLogGroupArn: + description: Provides the ARN for the CloudWatch group + where the logs will be published + type: string + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + webserverLogs: + description: Log configuration options for the webservers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + cloudWatchLogGroupArn: + description: Provides the ARN for the CloudWatch group + where the logs will be published + type: string + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + workerLogs: + description: Log configuration options for the workers. + See Module logging configuration for more information. + Disabled by default. + items: + properties: + cloudWatchLogGroupArn: + description: Provides the ARN for the CloudWatch group + where the logs will be published + type: string + enabled: + description: Enabling or disabling the collection + of logs + type: boolean + logLevel: + description: 'Logging level. Valid values: CRITICAL, + ERROR, WARNING, INFO, DEBUG. Will be INFO by default.' + type: string + type: object + type: array + type: object + type: array + maxWorkers: + description: The maximum number of workers that can be automatically + scaled up. Value need to be between 1 and 25. Will be 10 by + default. + type: number + minWorkers: + description: The minimum number of workers that you want to run + in your environment. Will be 1 by default. + type: number + networkConfiguration: + description: Specifies the network configuration for your Apache + Airflow Environment. This includes two private subnets as well + as security groups for the Airflow environment. Each subnet + requires internet connection, otherwise the deployment will + fail. See Network configuration below for details. + items: + properties: + securityGroupIds: + description: Security groups IDs for the environment. At + least one of the security group needs to allow MWAA resources + to talk to each other, otherwise MWAA cannot be provisioned. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetIds: + description: The private subnet IDs in which the environment + should be created. MWAA requires two subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + pluginsS3ObjectVersion: + description: The plugins.zip file version you want to use. + type: string + pluginsS3Path: + description: The relative path to the plugins.zip file on your + Amazon S3 storage bucket. For example, plugins.zip. If a relative + path is provided in the request, then plugins_s3_object_version + is required. For more information, see Importing DAGs on Amazon + MWAA. + type: string + requirementsS3ObjectVersion: + description: The requirements.txt file version you want to use. + type: string + requirementsS3Path: + description: The relative path to the requirements.txt file on + your Amazon S3 storage bucket. For example, requirements.txt. + If a relative path is provided in the request, then requirements_s3_object_version + is required. For more information, see Importing DAGs on Amazon + MWAA. + type: string + schedulers: + description: The number of schedulers that you want to run in + your environment. v2.0.2 and above accepts 2 - 5, default 2. + v1.10.12 accepts 1. + type: number + serviceRoleArn: + description: The Service Role ARN of the Amazon MWAA Environment + type: string + sourceBucketArn: + description: The Amazon Resource Name (ARN) of your Amazon S3 + storage bucket. For example, arn:aws:s3:::airflow-mybucketname. + type: string + startupScriptS3ObjectVersion: + description: The version of the startup shell script you want + to use. You must specify the version ID that Amazon S3 assigns + to the file every time you update the script. + type: string + startupScriptS3Path: + description: The relative path to the script hosted in your bucket. + The script runs as your environment starts before starting the + Apache Airflow process. Use this script to install dependencies, + modify configuration options, and set environment variables. + See Using a startup script. Supported for environment versions + 2.x and later. + type: string + status: + description: The status of the Amazon MWAA Environment + type: string + tags: + additionalProperties: + type: string + description: Key-value map of resource tags. + type: object + x-kubernetes-map-type: granular + tagsAll: + additionalProperties: + type: string + description: A map of tags assigned to the resource, including + those inherited from the provider default_tags configuration + block. + type: object + x-kubernetes-map-type: granular + webserverAccessMode: + description: 'Specifies whether the webserver should be accessible + over the internet or via your specified VPC. Possible options: + PRIVATE_ONLY (default) and PUBLIC_ONLY.' + type: string + webserverUrl: + description: The webserver URL of the MWAA Environment + type: string + webserverVpcEndpointService: + description: The VPC endpoint for the environment's web server + type: string + weeklyMaintenanceWindowStart: + description: Specifies the start date for the weekly maintenance + window. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} From 0bafaf3e711851002234d1eab94e7691d524d8ac Mon Sep 17 00:00:00 2001 From: OwnerBe Date: Fri, 14 Jun 2024 18:17:35 +1200 Subject: [PATCH 2/2] after running make reviewable Signed-off-by: OwnerBe --- config/mwaa/config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/config/mwaa/config.go b/config/mwaa/config.go index f9ce6065a9..a207fcdf1e 100644 --- a/config/mwaa/config.go +++ b/config/mwaa/config.go @@ -2,6 +2,7 @@ package mwaa import ( "github.com/crossplane/upjet/pkg/config" + "github.com/upbound/provider-aws/config/common" )