diff --git a/.secrets.baseline b/.secrets.baseline index 8ede85939..7a459b129 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-06-21T21:12:27Z", + "generated_at": "2022-07-29T15:31:31Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -2235,12 +2235,21 @@ "type": "Secret Keyword" } ], + "tf_files/aws/eks/sample.tfvars": [ + { + "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", + "is_secret": false, + "is_verified": false, + "line_number": 107, + "type": "Hex High Entropy String" + } + ], "tf_files/aws/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", "is_secret": false, "is_verified": false, - "line_number": 135, + "line_number": 133, "type": "Hex High Entropy String" } ], @@ -2412,15 +2421,6 @@ "type": "Hex High Entropy String" } ], - "tf_files/aws/rds/sample.tfvars": [ - { - "hashed_secret": "76c3c4836dee37d8d0642949f84092a9a24bbf46", - "is_secret": false, - "is_verified": false, - "line_number": 7, - "type": "Secret Keyword" - } - ], "tf_files/aws/slurm/README.md": [ { "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", diff --git a/tf_files/aws/access/sample.tfvars b/tf_files/aws/access/sample.tfvars new file mode 100644 index 000000000..5e7b9b853 --- /dev/null +++ b/tf_files/aws/access/sample.tfvars @@ -0,0 +1,5 @@ +#The URL to an S3 bucket we want to work with +access_url = "" + +#The ARN to an Amazon ACM-managed certificate +access_cert = "" \ No newline at end of file diff --git a/tf_files/aws/account-policies/sample.tfvars b/tf_files/aws/account-policies/sample.tfvars index 7a6d09a0d..2147c1e2e 100644 --- a/tf_files/aws/account-policies/sample.tfvars +++ b/tf_files/aws/account-policies/sample.tfvars @@ -1 +1,6 @@ -# defaults shold usually be ok - check variables.tf +#The AWS region we are working in +region = "us-east-1" + + +#The IAM roles to be created +roles = ["devopsdirector", "bsdisocyber", "projectmanagerplanx", "devopsplanx", "devplanx"] \ No newline at end of file diff --git a/tf_files/aws/account_management-logs/sample.tfvars b/tf_files/aws/account_management-logs/sample.tfvars new file mode 100644 index 000000000..8b6cd3bd9 --- /dev/null +++ b/tf_files/aws/account_management-logs/sample.tfvars @@ -0,0 +1,9 @@ +#ID of AWS account that owns the public AMIs +#TODO clarification +csoc_account_id = "433568766270" + +#TODO check what these are used for. This module seems to use csoc_common_logging, +#which seems to use modules/common-logging. Neither of those appear to have these two +account_name = "" + +alarm_actions = "" diff --git a/tf_files/aws/batch/sample.tfvars b/tf_files/aws/batch/sample.tfvars new file mode 100644 index 000000000..a129bf0fa --- /dev/null +++ b/tf_files/aws/batch/sample.tfvars @@ -0,0 +1,67 @@ +#A tag used to identify resources associated with this job. +job_id = "" + +#This is a prefix that will be applied to resources generated as part of this deployment. It is for tracking purposes. +#This is generally the long name of the job, which is the hostname + job type + job ID. +prefix = "" + +#The name of the AWS batch job definition +batch_job_definition_name = "" + +#This is the location of a JSON file that contains an AWS Batch job definition, containing information such as +#the name of the container to use and resources to allocate. +#More information can be found here: https://docs.aws.amazon.com/batch/latest/userguide/job_definitions.html +container_properties = "" + +#The name of the IAM instance role to be attached to the machines running this batch job. An instance role is a limited role +#applied to EC2 instances to allow them to access designated resources. +#More information can be found at: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +iam_instance_role = "" + +#The instance profile to attach to attach to EC2 machines. The instance profile is associated with a role, and is the +#resource that is associated with a specific EC2 instance to give it access to desired resources. More information can be +#found at: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html +iam_instance_profile_role = "" + +#The role that allows AWS Batch itself (not the EC2 instances) to access needed resources. More information can be found at: +#https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html +aws_batch_service_role = "" + +#The name of the security group associated with this batch job +aws_batch_compute_environment_sg = "" + +#The name of the batch compute environment to run the jobs in. A job environment consits of ECS container instances that can +#run the job. +compute_environment_name = "" + +#What type of EC2 instance to use in order to handle the job. +instance_type = ["c4.large"] + +priority = 10 + +#The maximum number of EC2 vCPUs that an environment can use. +max_vcpus = 256 + +#The minimum number of EC2 vCPUs that an environment should maintain. +min_vcpus = 0 + +#What type of compute environment to use. Valid selections are [EC2, SPOT] +compute_env_type = "EC2" + +#Valid options are [MANAGED, UNMANAGED] +#This controls whether AWS manages spinning up the resources for us, or if we bring our own environment. +#DO NOT USE UNMANAGED unless you know what you're doing. +compute_type = "MANAGED" + +#The EC2 key pair that is used for instances launched in the compute environment. +ec2_key_pair = "giangb" + +#The name of the job queue to create as part of this deployment. +batch_job_queue_name = "" + +#The name of the SQS queue that will be created as a part of this deployment. The queue is the primary way that different nodes +#communicate that they have completed a part of the batch job, and pass their completed parts to the next stage of the pipeline +sqs_queue_name = "" + +#The name of the bucket the results should be output to. +output_bucket_name = "" diff --git a/tf_files/aws/bucket_manifest_utils/sample.tfvars b/tf_files/aws/bucket_manifest_utils/sample.tfvars new file mode 100644 index 000000000..63d5e434f --- /dev/null +++ b/tf_files/aws/bucket_manifest_utils/sample.tfvars @@ -0,0 +1,44 @@ +#Path to the function file +lambda_function_file = "" + +#Name of the function you are creating +lambda_function_name = "" + +#Description of the function +lambda_function_description = "" + +#IAM role ARN to attach to the function +lambda_function_iam_role_arn = "" + +#The name of the Amazon Lambda function that will handle the task. +#For a Python-focused example, see here: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html +lambda_function_handler = "lambda_function.handler" + +#Language and version to use to run the lambda function. +#For more information, see: https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html +lambda_function_runtime = "python3.7" + + +#Timeout of the function in seconds +lambda_function_timeout = 3 + +#How much RAM in MB will be used +lambda_function_memory_size = 128 + +#A map containing key-value pairs that define environment variables for the function +lambda_function_env = {} + +#A map contaning key-value pairs used in AWS to filter and search for resources +lambda_function_tags = {} + +#Whether the function will be attached to a VPC. Valid options are [true, false] +lambda_function_with_vpc = false + +#List of security groups for the lambda function with a vpc +lambda_function_security_groups = [] + +#List of subnets for the lambda function with a vpc +lambda_function_subnets_id = [] + + + diff --git a/tf_files/aws/cognito/sample.tfvars b/tf_files/aws/cognito/sample.tfvars index 05ebe2548..bf480e475 100644 --- a/tf_files/aws/cognito/sample.tfvars +++ b/tf_files/aws/cognito/sample.tfvars @@ -1,10 +1,44 @@ -vpc_name = "INSERT VPC NAME HERE" -cognito_provider_name = "federation name" -cognito_domain_name = "subname for .auth.us-east-1.amazoncognito.com" -cognito_callback_urls = ["https://url1"] -cognito_provider_details = {"MetadataURL"="https://someurl"} -tags = { - "Organization" = "PlanX" - "Environment" = "CSOC" -} +#A list of allowed OAuth Flows +cognito_oauth_flows = ["code", "implicit"] + +#A user directory for Amazon Cognito, which handles sign-on for users. This is generally given the same name as the +#name of the app using the service. +cognito_user_pool_name = "fence" + +#The identity provider types that Cognito will use. An identity provider is a service that stores and manages +#identities. See: https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateIdentityProvider.html#CognitoUserPools-CreateIdentityProvider-request-ProviderType +cognito_provider_type = "SAML" + +#The attribute mapping is how Cognito translates the information about a user recieved from an identitiy provider into +#the attributes that Cognito expects from a user. +#For more information, see: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html +cognito_attribute_mapping = { + "email" = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" + } + +#The OAuth scopes specify what information from a user's account Cognito is able to access. Scopes are provider-specific, and +#you will need to consult the documentation for your identity provider to determine what scopes are necessary and valid +cognito_oauth_scopes = ["email", "openid"] + +#Details about the auth provider, for this module most likely the MetadataURL or MetadataFILE +cognito_provider_details = {} + +#The name of the VPC that the Cognito pool will be created in +vpc_name = "" + +#The address of the sign-in and sign-up pages +cognito_domain_name = "" + +#The URL(s) that can be redirected to after a successful sign-in +cognito_callback_urls = [] + +#The name of the provided identity provider. This is the name used within AWS +cognito_provider_name = "" + +#A map contaning key-value pairs used in AWS to filter and search for resources +tags = { + "Organization" = "PlanX" + "Environment" = "CSOC" + } + diff --git a/tf_files/aws/commons/sample.tfvars b/tf_files/aws/commons/sample.tfvars new file mode 100644 index 000000000..b73e57a6c --- /dev/null +++ b/tf_files/aws/commons/sample.tfvars @@ -0,0 +1,288 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-28 12:08:31.473975 + +#The name of the VPC for this commons +vpc_name = "Commons1" + +#The CIDR block to allocate to the VPC for this commons +vpc_cidr_block = "172.24.17.0/20" + +#A secondary CIDR block to allocate to the VPC for this commons, in case of network expansion +secondary_cidr_block = false + +#The type(s) of traffic covered by flow logs +vpc_flow_traffic = "ALL" + +#The region to bring up this commons in +aws_region = "us-east-1" + +#An AWS ARN for the certificate to use on the Load Balancer in front of the commons. Because all access to a commons is through HTTPS, this is required +aws_cert_name = "AWS-CERTIFICATE-NAME" + +# +#TODO Figure out how to explain this +csoc_account_id = "433568766270" + +#The CIDR of the VPC from which the commands to bring up this commons are being run; this will enable access +peering_cidr = "10.128.0.0/20" + +#The size of the fence DB, in GiB +fence_db_size = 10 + +#The size of the sheepdog DB, in GiB +sheepdog_db_size = 10 + +#The size of the indexd DB, in GiB +indexd_db_size = 10 + +#The password for the fence DB +db_password_fence= "" + +#The password for the gdcapi DB +db_password_gdcapi = "" + +#This indexd guid prefix should come from Trevar/ZAC +indexd_prefix = "dg.XXXX/" + +#The password for the peregrine DB +db_password_peregrine= "" + +#The password for the sheepdog DB +db_password_sheepdog= "" + +#The password for the indexd DB +db_password_indexd= "" + +#The URL for the data dictionary schema. It must be in JSON format. For more info, see: https://gen3.org/resources/user/dictionary/ +dictionary_url= "" + +#A configuration to specify a customization profile for the the commons' front-end +portal_app = "dev" + +#If you wish to start fence pre-populated with data, this is the RDS snapshot that fence will start off of +fence_snapshot = "" + +#If you wish to start gdcapi pre-populated with data, this is the RDS snapshot that gdcapi will start off of +gdcapi_snapshot = "" + +#If you wish to start peregrine pre-populated with data, this is the RDS snapshot that peregrine will start off of +peregrine_snapshot = "" + +#If you wish to start sheepdog pre-populated with data, this is the RDS snapshot that it will start off of +sheepdog_snapshot = "" + +#If you wish to start indexd pre-populated with data, this is the RDS snapshot that it will start off of +indexd_snapshot = "" + +#Instance type to use for fence. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +fence_db_instance = "db.t3.small" + +#Instance type to use for sheepdog. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +sheepdog_db_instance = "db.t3.small" + +#Instance type to use for indexd. For more information on DB instance types, see: +#https://aws.amazon.com/rds/instance-types/ +indexd_db_instance = "db.t3.small" + +#Hostname that the commons will use for access; i.e. the URL that people will use to access the commons over the internet +hostname = "dev.bionimbus.org" + +#A list of SSH keys that will be added to compute resources deployed by this module, including Squid proxy instances +kube_ssh_key= "" + +#Google client ID for authentication purposes. If you don't want to enable Google sign in, leave blank +google_client_id= "" + +#Secret for the above client ID. Set this to blank as well if you do not want Google sign in +google_client_secret= "" + +#GDCAPI secret key +gdcapi_secret_key= "" + +#Search criteria for squid AMI look up +squid_image_search_criteria = "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*" + +#The ID of the VPC that the commands to bring this commons up are run in, for access purposes +peering_vpc_id = "vpc-e2b51d99" + +#The name of the NLB service endpoint for Squid +squid-nlb-endpointservice-name = "com.amazonaws.vpce.us-east-1.vpce-svc-0ce2261f708539011" + +#A webhook used to send alerts in a Slack channel https://api.slack.com/messaging/webhooks +slack_webhook = "" + +#A webhook used to send alerts in a secondary Slack channel https://api.slack.com/messaging/webhooks +secondary_slack_webhook = "" + +#Threshold for database storage utilization. Represents a percentage, if this limit is reached, the Slack webhooks are used to send an alert +alarm_threshold = "85" + +#The name of the organization, for tagging the resources for easier tracking +organization_name = "Basic Service" + +#NOT CURRENTLY IN USE +mailgun_smtp_host = "smtp.mailgun.org" + +#NOT CURRENTLY IN USE +mailgun_api_url = "https://api.mailgun.net/v3/" + +#Whether or not fence should be deployed in a highly-available configuraiton +fence_ha = false + +#Whether or not sheepdog should be deployed in a highly-available configuration +sheepdog_ha = false + +#Whether or not indexd should be deployed in a highly-available configuration +indexd_ha = false + +#A maintenance window for fence +fence_maintenance_window = "SAT:09:00-SAT:09:59" + +#A maintenance window for sheepdog +sheepdog_maintenance_window = "SAT:10:00-SAT:10:59" + +#A maintenance window for indexd +indexd_maintenance_window = "SAT:11:00-SAT:11:59" + +#How many snapshots should be kept for fence +fence_backup_retention_period = "4" + +#How many snapshots should be kept for sheepdog +sheepdog_backup_retention_period = "4" + +#How many snapshots should be kept for indexd +indexd_backup_retention_period = "4" + +#A backup window for fence +fence_backup_window = "06:00-06:59" + +#A backup window for sheepdog +sheepdog_backup_window = "07:00-07:59" + +#A backup window for indexd +indexd_backup_window = "08:00-08:59" + +#The version of the fence engine to run (by default postgres) +fence_engine_version = "13.3" + +#The version of the sheepdog engine to run +sheepdog_engine_version = "13.3" + +#The version of the indexd engine to run +indexd_engine_version = "13.3" + +#Whether or not to enable automatic upgrades of minor version for fence +fence_auto_minor_version_upgrade = "true" + +#Whether or not to enable automatic upgrades of minor versions for indexd +indexd_auto_minor_version_upgrade = "true" + +#Whether or not to enable automatic upgrades of minor versions for sheepdog +sheepdog_auto_minor_version_upgrade = "true" + +#Bucket name where to pull users.yaml for permissions +users_bucket_name = "cdis-gen3-users" + +#Name of fence database. Not the same as instance identifier +fence_database_name = "fence" + +#Name of sheepdog database. Not the same as instance identifier +sheepdog_database_name = "gdcapi" + +#Name of indexd database. Not the same as instance identifier +indexd_database_name = "indexd" + +#Username for fence DB +fence_db_username = "fence_user" + +#Username for sheepdog DB +sheepdog_db_username = "sheepdog" + +#Username for indexd DB +indexd_db_username = "indexd_user" + +#Whether or not fence can automatically upgrade major versions +fence_allow_major_version_upgrade = "true" + +#Whether or not sheepdog can automatically upgrade major versions +sheepdog_allow_major_version_upgrade = "true" + +#Whether or not indexd can automatically upgrade major versions +indexd_allow_major_version_upgrade = "true" + +#Instance type for HA squid +ha-squid_instance_type = "t3.medium" + +#Volume size for HA squid instances +ha-squid_instance_drive_size = 8 + +#Bootstrapt script for ha-squid instances +ha-squid_bootstrap_script = "squid_running_on_docker.sh" + +#additional variables to pass along with the bootstrapscript +ha-squid_extra_vars = ["squid_image=master"] + +#For testing purposes, when something else than the master +branch = "master" + +#When fence bot has to access another bucket that wasn't created by the VPC module +fence-bot_bucket_access_arns = [] + +#Should you want to deploy HA-squid +deploy_ha_squid = false + +#If ha squid is enabled and you want to set your own capasity +ha-squid_cluster_desired_capasity = 2 + +#If ha squid is enabled and you want to set your own min size +ha-squid_cluster_min_size = 1 + +#If ha squid is enabled and you want to set your own max size +ha-squid_cluster_max_size = 3 + +#Whether or not to deploy the database instance +deploy_sheepdog_db = true + +#Whether or not to deploy the database instance +deploy_fence_db = true + +#Whether or not to deploy the database instance +deploy_indexd_db = true + +#Engine to deploy the db instance +sheepdog_engine = "postgres" + +#Engine to deploy the db instance +fence_engine = "postgres" + +#Engine to deploy the db instance +indexd_engine = "postgres" + +#Instance type for the single proxy instance +single_squid_instance_type = "t2.micro" + +#Let k8s workers be on a /22 subnet per AZ +network_expansion = false + +#Whether or not the storage for the RDS instances should be encrypted +rds_instance_storage_encrypted = true + +#Maximum allocated storage for autosacaling +fence_max_allocated_storage = 0 + +#Maximum allocated storage for autosacaling +sheepdog_max_allocated_storage = 0 + +#Maximum allocated storage for autosacaling +indexd_max_allocated_storage = 0 + +#Used to authenticate with Qualys, which is used for security scanning. Optional +activation_id = "" + +#Used to authenticate with Qualys as well. Also optional +customer_id = "" + +#Whether or not to set up the commons in accordance with FIPS, a federal information standard +fips = false + diff --git a/tf_files/aws/commons_sns/sample.tfvars b/tf_files/aws/commons_sns/sample.tfvars new file mode 100644 index 000000000..c56256579 --- /dev/null +++ b/tf_files/aws/commons_sns/sample.tfvars @@ -0,0 +1,12 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 10:48:11.054601 + +#The type of cluster that the jobs are running in. kube-aws is deprecated, so it should mostly be EKS clusters +#Acceptable values are: "EKS", "kube-aws" +cluster_type = "EKS" + +#The email addresses that notifications from this instance should be sent to +emails = ["someone@uchicago.edu","otherone@uchicago.edu"] + +#The subject of the emails sent to the addresses enumerated previously +topic_display = "cronjob manitor" + diff --git a/tf_files/aws/commons_vpc_es/sample.tfvars b/tf_files/aws/commons_vpc_es/sample.tfvars new file mode 100644 index 000000000..cc601d123 --- /dev/null +++ b/tf_files/aws/commons_vpc_es/sample.tfvars @@ -0,0 +1,32 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 11:33:44.445657 + +#Slack webhook to send alerts to a Slack channel. Slack webhooks are deprecated, so this may need to change at some point +#See: https://api.slack.com/legacy/custom-integrations/messaging/webhooks +slack_webhook = "" + +#A Slack webhook to send alerts to a secondary channel +secondary_slack_webhook = "" + +#The instance type for ElasticSearch. More information on instance types can be found here: +#https://docs.aws.amazon.com/opensearch-service/latest/developerguide/supported-instance-types.html +instance_type = "m4.large.elasticsearch" + +#The size of the attached Elastic Block Store volume, in GB +ebs_volume_size_gb = 20 + +#Boolean to control whether or not this cluster should be encrypted +encryption = "true" + +#How many instances to have in this ElasticSearch cluster +instance_count = 3 + +#For tagging purposes +organization_name = "Basic Service" + +#What version to use when deploying ES +es_version = "6.8" + +#Whether or not to deploy a linked role for ES. A linked role is a role that allows for easier management of ES, by automatically +#granting it the access it needs. For more information, see: https://docs.aws.amazon.com/opensearch-service/latest/developerguide/slr.html +es_linked_role = true + diff --git a/tf_files/aws/commons_vpc_es/variables.tf b/tf_files/aws/commons_vpc_es/variables.tf index 85f035213..b6e41cf03 100644 --- a/tf_files/aws/commons_vpc_es/variables.tf +++ b/tf_files/aws/commons_vpc_es/variables.tf @@ -4,6 +4,7 @@ variable "vpc_name" {} variable "slack_webhook" { default = "" } + variable "secondary_slack_webhook" { default = "" } diff --git a/tf_files/aws/csoc_admin_vm/sample.tfvars b/tf_files/aws/csoc_admin_vm/sample.tfvars new file mode 100644 index 000000000..500c1a75f --- /dev/null +++ b/tf_files/aws/csoc_admin_vm/sample.tfvars @@ -0,0 +1,37 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 11:45:02.625524 + +#ID of AWS account the owns the public AMIs +#TODO Figure out what this means +ami_account_id = "707767160287" + +# +#TODO Figure out how to phrase this, I believe it's been used before +csoc_account_id = "433568766270" + +#The region in which to spin up this infrastructure. +aws_region = "us-east-1" + +#The ID of the VPC on which to bring up this VM +csoc_vpc_id = "vpc-e2b51d99" + +#The ID of the subnet on which to bring up this VM +csoc_subnet_id = "subnet-6127013c" + +#The ID of the child account. +child_account_id = "707767160287" + +#The region for the child account +child_account_region = "us-east-1" + +#NOT CURRENTLY USED +child_name = "cdistest" + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#A list of VPC CIDR blocks that are allowed egress from the security group created by this module +vpc_cidr_list= "" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_key_name= "" \ No newline at end of file diff --git a/tf_files/aws/csoc_admin_vm/variables.tf b/tf_files/aws/csoc_admin_vm/variables.tf index dae2f64fa..c0c846943 100644 --- a/tf_files/aws/csoc_admin_vm/variables.tf +++ b/tf_files/aws/csoc_admin_vm/variables.tf @@ -1,4 +1,5 @@ # id of AWS account that owns the public AMI's + variable "ami_account_id" { # cdis-test default = "707767160287" diff --git a/tf_files/aws/csoc_common_logging/sample.tfvars b/tf_files/aws/csoc_common_logging/sample.tfvars new file mode 100644 index 000000000..d99b428f0 --- /dev/null +++ b/tf_files/aws/csoc_common_logging/sample.tfvars @@ -0,0 +1,35 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:00:53.938872 + +#ID of the AWS account that owns the public AMIs +csoc_account_id = "433568766270" + +#The AWS region this infrastructure will be spun up in +aws_region = "us-east-1" + +#The child account that will be set as the owner of the resources created by this module +child_account_id = "707767160287" + +#The region in which the child account exists +child_account_region = "us-east-1" + +#The name of the environment that this will run on, for example, kidsfirst, cdistest +common_name = "cdistest" + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#A cutoff for how long of a response time is accepted, in milliseconds +threshold = "65.0" + +#A webhook to send alerts to a Slack channel +slack_webhook = "" + +#The ARN of a lambda function to send logs to logDNA +log_dna_function = "arn:aws:lambda:us-east-1:433568766270:function:logdna_cloudwatch" + +#Timeout threshold for the Lambda function to wait before exiting +timeout = 300 + +#Memory allocation for the Lambda function, in MB +memory_size = 512 + diff --git a/tf_files/aws/csoc_management-logs/sample.tfvars b/tf_files/aws/csoc_management-logs/sample.tfvars new file mode 100644 index 000000000..3d83cceca --- /dev/null +++ b/tf_files/aws/csoc_management-logs/sample.tfvars @@ -0,0 +1,12 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:15:45.245756 + +#A list of account IDs that are allowed to use the PutSubscriptionFilter action. For more information, see: +#https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html +accounts_id = ["830067555646", "474789003679", "655886864976", "663707118480", "728066667777", "433568766270", "733512436101", "584476192960", "236835632492", "662843554732", "803291393429", "446046036926", "980870151884", "562749638216", "707767160287", "302170346065", "636151780898", "895962626746", "222487244010", "369384647397", "547481746681"] + +#The name of the Elastic Search cluster +elasticsearch_domain = "commons-logs" + +#The S3 bucket used to store logs +log_bucket_name = "management-logs-remote-accounts" + diff --git a/tf_files/aws/csoc_management-logs/variables.tf b/tf_files/aws/csoc_management-logs/variables.tf index 382240b57..93bbd1838 100644 --- a/tf_files/aws/csoc_management-logs/variables.tf +++ b/tf_files/aws/csoc_management-logs/variables.tf @@ -1,4 +1,3 @@ - variable "accounts_id" { type = "list" default = ["830067555646", "474789003679", "655886864976", "663707118480", "728066667777", "433568766270", "733512436101", "584476192960", "236835632492", "662843554732", "803291393429", "446046036926", "980870151884", "562749638216", "707767160287", "302170346065", "636151780898", "895962626746", "222487244010", "369384647397", "547481746681"] diff --git a/tf_files/aws/csoc_qualys_vm/sample.tfvars b/tf_files/aws/csoc_qualys_vm/sample.tfvars new file mode 100644 index 000000000..8c0602fec --- /dev/null +++ b/tf_files/aws/csoc_qualys_vm/sample.tfvars @@ -0,0 +1,43 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:32:59.347063 + +#The name to use for the Qualys VM. This field is mandatory. This VM will be used +#to run Qualys, a security application. +vm_name = "qualys_scanner_prod" + +#The ID of the VPC to spin up this VM +vpc_id = "vpc-e2b51d99" + +#The CIDR block for the VPC subnet the VM will be +env_vpc_subnet = "10.128.3.0/24" + +#Route table the VM will be associated with +qualys_pub_subnet_routetable_id = "rtb-7ee06301" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_key_name = "rarya_id_rsa" + +#The code used to register with Qualys. This field is mandatory +user_perscode ="20079167409920" + +#A filter to apply against the names of AMIs when searching. We search, rather than specifying a specific image, +#to ensure that all of the latest security updates are present. +image_name_search_criteria = "a04e299c-fb8e-4ee2-9a75-94b76cf20fb2" + +#A filter to apply against the descriptions of AMIs when searching. We search, rather than specifying a specific image, +#to ensure that all of the latest security updates are present. +image_desc_search_criteria = "" + +#Account id of the AMI owner, which is used to further filter the search for an AMI +ami_account_id = "679593333241" + +#Organization for tagging puposes +organization = "PlanX" + +#Environment for tagging purposes +environment = "CSOC" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.medium" + diff --git a/tf_files/aws/csoc_qualys_vm/variables.tf b/tf_files/aws/csoc_qualys_vm/variables.tf index 1899cf3f8..f289a9195 100644 --- a/tf_files/aws/csoc_qualys_vm/variables.tf +++ b/tf_files/aws/csoc_qualys_vm/variables.tf @@ -15,6 +15,7 @@ variable "qualys_pub_subnet_routetable_id"{ } # name of aws_key_pair ssh key to attach to VM's + variable "ssh_key_name" { default = "rarya_id_rsa" } diff --git a/tf_files/aws/data_bucket/sample.tfvars b/tf_files/aws/data_bucket/sample.tfvars new file mode 100644 index 000000000..3887b7ba9 --- /dev/null +++ b/tf_files/aws/data_bucket/sample.tfvars @@ -0,0 +1,13 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 12:55:22.764041 + +#The name of the bucket to be created +bucket_name= "" + +#Value for 'Environment' key to tag the new resources with +environment= "" + +#This variable is used to conditionally create a cloud trail. +#Using this module to create another bucket in the same "environment" with a nonzero count for this variable will +#result in an error because aspects of the cloud trail will already exist. +cloud_trail_count = "1" + diff --git a/tf_files/aws/data_bucket/variables.tf b/tf_files/aws/data_bucket/variables.tf index 22134e193..db8710a6c 100644 --- a/tf_files/aws/data_bucket/variables.tf +++ b/tf_files/aws/data_bucket/variables.tf @@ -1,7 +1,9 @@ variable "bucket_name" {} + variable "environment" { # value for 'Environment' key to tag the new resources with } + variable "cloud_trail_count" { # this variable is used to conditionally create a cloud trail # Using this module to create another bucket in the same "environment" with nonzero diff --git a/tf_files/aws/data_bucket_queue/sample.tfvars b/tf_files/aws/data_bucket_queue/sample.tfvars index ed55578f4..f2756707e 100644 --- a/tf_files/aws/data_bucket_queue/sample.tfvars +++ b/tf_files/aws/data_bucket_queue/sample.tfvars @@ -1,2 +1,3 @@ +#This bucket is required by config.tf bucket_name=WHATEVER diff --git a/tf_files/aws/demolab/sample.tfvars b/tf_files/aws/demolab/sample.tfvars new file mode 100644 index 000000000..54a885258 --- /dev/null +++ b/tf_files/aws/demolab/sample.tfvars @@ -0,0 +1,16 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 13:08:48.948730 + +#The name of the VPC this demo lab will be located on +vpc_name= "" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.small" + +#The number of instances in the demo lab +instance_count = 5 + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ssh_public_key= "" + diff --git a/tf_files/aws/eks/sample.tfvars b/tf_files/aws/eks/sample.tfvars new file mode 100644 index 000000000..da176e73e --- /dev/null +++ b/tf_files/aws/eks/sample.tfvars @@ -0,0 +1,129 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 13:47:23.877126 + +#The VPC this EKS cluster should be spun up +vpc_name= "" + +#The name of an AWS SSH key pair to attach to EC2 instances. For more information, +#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html +ec2_keyname = "someone@uchicago.edu" + +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.large" + +#The type of instance to use for nodes running jupyter +jupyter_instance_type = "t3.large" + +#The type of instance to use for nodes running workflows +workflow_instance_type = "t3.2xlarge" + +#This is the CIDR of the network your adminVM is on. Since the commons creates its own VPC, you need to pair them up to allow communication between them later. +peering_cidr = "10.128.0.0/20" + +#A CIDR block, if needed to expand available addresses for workflows +secondary_cidr_block = "" + +#The ID of the VPC this cluster is to be peered with +peering_vpc_id = "vpc-e2b51d99" + +#This is the policy that was created before that allows the cluster to access the users bucket in bionimbus. +#Usually the same name as the VPC, but not always. +users_policy= "" + +#The size of the volumes for the workers, in GB +worker_drive_size = 30 + +#The EKS version this cluster should run against +eks_version = "1.16" + +#Whether you want your workers on a /24 or /23 subnet, /22 is available, but the VPC module should have been deployed +#using the `network_expansion = true` variable, otherwise wks will fail +workers_subnet_size = 24 + +#The script used to start up the workers +#https://github.com/uc-cdis/cloud-automation/tree/master/flavors/eks` +bootstrap_script = "bootstrap-with-security-updates.sh" + +#The script used to start up Jupyter nodes +#https://github.com/uc-cdis/cloud-automation/tree/master/flavors/eks +jupyter_bootstrap_script = "bootstrap-with-security-updates.sh" + +#If your bootstrap script requires another kernel, you could point to it with this variable. Available kernels will be in +#`gen3-kernels` bucket. +kernel = "N/A" + +#The size, in GB, of the drives to be attached to Jupyter workers\ +jupyter_worker_drive_size = 30 + +#A script used to start up a workflow +workflow_bootstrap_script = "bootstrap.sh" + +#The size, in GB, of the drives to be attached to workflow workers +workflow_worker_drive_size = 30 + +#CIDRs you want to skip the proxy when going out +cidrs_to_route_to_gw = [] + +#Organization name, for tagging purposes +organization_name = "Basic Services" + +#The number of Jupyter workers +jupyter_asg_desired_capacity = 0 + +#The maximum number of Jupyter workers +jupyter_asg_max_size = 10 + +#The minimum number of Jupyter workers +jupyter_asg_min_size = 0 + +#The number of Jupyter workers +workflow_asg_desired_capacity = 0 + +#The maximum number of Jupyter workers +workflow_asg_max_size = 50 + +#The minimum number of Jupyter workers +workflow_asg_min_size = 0 + +#Whether to add a service account to your cluster +iam-serviceaccount = true + +#URL for the lambda function to use to check for the proxy +domain_test = "www.google.com" + +#Is HA squid deployed? +ha_squid = false + +#Deploy workflow nodepool? +deploy_workflow = false + +#If migrating from single to ha, set to true, should not disrrupt connectivity +dual_proxy = false + +#Should all Jupyter notebooks exist in the same AZ? +single_az_for_jupyter = false + +#Thumbprint for the AWS OIDC identity provider +oidc_eks_thumbprint = ["9e99a48a9960b14926bb7f3b02e22da2b0ab7280"] + +#The ARN of an SNS topic that will be used to send alerts +sns_topic_arn = "arn:aws:sns:us-east-1:433568766270:planx-csoc-alerts-topic" + +#Used for authenticating Qualys software, which is used to perform security scans +activation_id = "" + +#Used for authenticating Qualys software, which is used to perform security scans +customer_id = "" + +#This controls whether or not we use FIPS enabled AMIs +fips = false + +#The key that was used to encrypt the FIPS enabled AMI. This is needed so ASG can decrypt the AMI +fips_ami_kms = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99" + +#This is the FIPS enabled AMI in cdistest account +fips_enabled_ami = "ami-0de87e3680dcb13ec" + +#A list of AZs to be used by EKS nodes +availability_zones = ["us-east-1a", "us-east-1c", "us-east-1d"] + diff --git a/tf_files/aws/eks/variables.tf b/tf_files/aws/eks/variables.tf index b4275dc6b..0dc78a8ab 100644 --- a/tf_files/aws/eks/variables.tf +++ b/tf_files/aws/eks/variables.tf @@ -1,4 +1,3 @@ - variable "vpc_name" {} variable "ec2_keyname" { @@ -31,7 +30,6 @@ variable "peering_vpc_id" { variable "users_policy" {} - variable "worker_drive_size" { default = 30 } @@ -149,17 +147,20 @@ variable "customer_id" { } # This controls whether or not we use FIPS enabled AMI's + variable "fips" { default = false } # the key that was used to encrypt the FIPS enabled AMI # This is needed to ASG can decrypt the ami + variable "fips_ami_kms" { default = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99" } # This is the FIPS enabled AMI in cdistest account. + variable "fips_enabled_ami" { default = "ami-0de87e3680dcb13ec" } diff --git a/tf_files/aws/encrypted-rds/sample.tfvars b/tf_files/aws/encrypted-rds/sample.tfvars index 09468f5a7..f3e1574d3 100644 --- a/tf_files/aws/encrypted-rds/sample.tfvars +++ b/tf_files/aws/encrypted-rds/sample.tfvars @@ -1,2 +1,210 @@ -# Mandatory variables -vpc_name = devplanetv1 +#Automatically generated from a corresponding variables.tf on 2022-07-12 15:15:28.628361 + +#The name of the VPC this RDS instance will be attached to +vpc_name = "vpcName" + +#The CIDR block used in the VPC +vpc_cidr_block = "172.24.17.0/20" + +#The region to spin up all the resources in +aws_region = "us-east-1" + +# +#TODO Look this one up and get it right +csoc_account_id = "433568766270" + +#The CIDR for the peering VPC +peering_cidr = "10.128.0.0/20" + +#The size, in GB, of the Fence DB +fence_db_size = 10 + +#The size, in GB, of the Sheepdog DB +sheepdog_db_size = 10 + +#The size, in GB, of the Indexd DB +indexd_db_size = 10 + +#The password for the Fence DB +db_password_fence= "" + +#The password for the GDCAPI DB +db_password_gdcapi = "" + +#The password for the Peregrine DB +db_password_peregrine= "" + +#The password for the Sheepdog DB +db_password_sheepdog= "" + +#The password for the Indexd DB +db_password_indexd= "" + +#A snapshot of an RDS databse, used to populate this DB with data +fence_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +gdcapi_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +peregrine_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +sheepdog_snapshot = "" + +#A snapshot of an RDS databse, used to populate this DB with data +indexd_snapshot = "" + +#The instance type to run the Fence DB on +#https://aws.amazon.com/rds/instance-types/ +fence_db_instance = "db.t3.small" + +#The instance type to run the Sheepdog DB on +#https://aws.amazon.com/rds/instance-types/ +sheepdog_db_instance = "db.t3.small" + +#The instance type to run the Indexd DB on +#https://aws.amazon.com/rds/instance-types/ +indexd_db_instance = "db.t3.small" + +#The ID of the peered VPC +peering_vpc_id = "vpc-e2b51d99" + +#A webhook used to send alerts in a Slack channel +#https://api.slack.com/messaging/webhooks +slack_webhook = "" + +#A webhook used to send alerts in a secondary Slack channel +#https://api.slack.com/messaging/webhooks +secondary_slack_webhook = "" + +#Threshold for database storage utilization. This is a number that represents a percentage of storage used. +#Once this alarm is triggered, the webhook is used to send a notification via Slack +alarm_threshold = "85" + +#Organization used for tagging & tracking purposes +organization_name = "Basic Service" + +#Boolean that represents if Fence should be deployed in a high-availability configuration +fence_ha = false + +#Boolean that represents if Sheepdog should be deployed in a high-availability configuration +sheepdog_ha = false + +#Boolean that represents if Indexd should be deployed in a high-availabiity configuration +indexd_ha = false + +#The maintenance window for Fence +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +fence_maintenance_window = "SAT:09:00-SAT:09:59" + +#Boolean that represents if the RDS instance's storage should be encrypted +rds_instance_storage_encrypted = true + +#The maintenance window for Sheepdog +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +sheepdog_maintenance_window = "SAT:10:00-SAT:10:59" + +#The maintenance window for Indexd +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +indexd_maintenance_window = "SAT:11:00-SAT:11:59" + +#How many snapshots of the database should be kept at a time +fence_backup_retention_period = "4" + +#How many snapshots of the database should be kept at a time +sheepdog_backup_retention_period = "4" + +#How many snapshots of the database should be kept at a time +indexd_backup_retention_period = "4" + +#The time range when Fence can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +fence_backup_window = "06:00-06:59" + +#The time range when Sheepdog can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +sheepdog_backup_window = "07:00-07:59" + +#The time range when Indexd can be backed up +#Format is ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00" +indexd_backup_window = "08:00-08:59" + +#The version of the database software used to run the database +fence_engine_version = "13.3" + +#The version of the database software used to run the database +sheepdog_engine_version = "13.3" + +#The version of the database software used to run the database +indexd_engine_version = "13.3" + +#Whether the database can automatically update minor versions +fence_auto_minor_version_upgrade = "true" + +#Whether the database can automatically update minor versions +indexd_auto_minor_version_upgrade = "true" + +#Whether the database can automatically update minor versions +sheepdog_auto_minor_version_upgrade = "true" + +#Name of the Fence database. Not the same as the instance identifier +fence_database_name = "fence" + +#Name of the Sheepdog database. Not the same as the instance identifier +sheepdog_database_name = "gdcapi" + +#Name of the Indexd database. Not the same as the isntance identifier +indexd_database_name = "indexd" + +#The username for the Fence database +fence_db_username = "fence_user" + +#The username for the Sheepdog database +sheepdog_db_username = "sheepdog" + +#the username for the Indexd database +indexd_db_username = "indexd_user" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +fence_allow_major_version_upgrade = "true" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +sheepdog_allow_major_version_upgrade = "true" + +#Boolean that controls if the database is allowed to automatically upgrade major versions +indexd_allow_major_version_upgrade = "true" + +#Whether or not to deploy the database instance +deploy_sheepdog_db = true + +#Whether or not to deploy the database instance +deploy_fence_db = true + +#Whether or not to deploy the database instance +deploy_indexd_db = true + +#Engine to deploy the db instance +sheepdog_engine = "postgres" + +#Engine to deploy the db instance +fence_engine = "postgres" + +#Engine to deploy the db instance +indexd_engine = "postgres" + +#The security group to add the DB instances to +security_group_local_id = "securityGroupId" + +#The subnet group for databases that this DB should be spun up in +aws_db_subnet_group_name = "subnetName" + +#Maximum allocated storage for autoscaling +fence_max_allocated_storage = 0 + +#Maximum allocated storage for autoscaling +sheepdog_max_allocated_storage = 0 + +#Maximum allocated storage for autoscaling +indexd_max_allocated_storage = 0 + diff --git a/tf_files/aws/kubecost/sample.tfvars b/tf_files/aws/kubecost/sample.tfvars index 040e428eb..540bd88a1 100644 --- a/tf_files/aws/kubecost/sample.tfvars +++ b/tf_files/aws/kubecost/sample.tfvars @@ -1,2 +1,14 @@ -# Mandatory variables -#vpc_name = devplanetv1 +#Automatically generated from a corresponding variables.tf on 2022-07-12 15:27:27.277857 + +#The name of the VPC to bring these resources up in +vpc_name = "" + +#This is used if the resource is set up as a secondary node +parent_account_id = "" + +#The S3 bucket in which to store the generated Cost and Usage report +cur_s3_bucket = "" + +#This is used if the resource is set up as a primary node. It specifies the account ID for the linked secondary node +slave_account_id = "" + diff --git a/tf_files/aws/publicvm/sample.tfvars b/tf_files/aws/publicvm/sample.tfvars index 60a7f61d2..9893e5b3f 100644 --- a/tf_files/aws/publicvm/sample.tfvars +++ b/tf_files/aws/publicvm/sample.tfvars @@ -1,17 +1,30 @@ -vpc_name = "THE_VPC_NAME - default is: vadcprod" +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:07:24.564137 -instance_type = "default is: t3.small" +#The name of the VPC these resources will be spun up in +vpc_name = "vadcprod" -ssh_in_secgroup = "should already exist - default is: ssh_eks_vadcprod" +#The EC2 instance type to use for VM(s) spun up from this module. For more information on EC2 instance types, see: +#https://aws.amazon.com/ec2/instance-types/ +instance_type = "t3.small" -egress_secgroup = "should already exist - default is: out" +#Security group for SSH +ssh_in_secgroup = "ssh_eks_vadcprod" -subnet_name = "public subnet under vpc_name - default is: public" +#The name of the security group for egress. This should already exist +egress_secgroup = "out" -volume_size = "for the vm - default is 500" +#The public subnet located under vpc_name. By default is set to public +subnet_name = "public" -policies = ["list of policies ARNs to attach to the role that will be attached to this VM"] +#Volume size of the VM in GB (technically GiB, but what's a few bits among friends?) +volume_size = 500 -ami = "ami to use, if empty (default) latest ubuntu available will be used" +#List of policy ARNs to attach to the role that will be attached to this VM +policies = [] + +#The AMI to use for the machine, if nothing is specified, the latest version of Ubuntu available will be used +ami = "" + +#The name for the VM, should be unique. +vm_name= "" -vm_name = "Name for the vm, should be unique, there is no default value for this one, so you must set something here" diff --git a/tf_files/aws/publicvm/variables.tf b/tf_files/aws/publicvm/variables.tf index 4ea97a19f..2698e1940 100644 --- a/tf_files/aws/publicvm/variables.tf +++ b/tf_files/aws/publicvm/variables.tf @@ -6,7 +6,6 @@ variable "instance_type" { default = "t3.small" } - variable "ssh_in_secgroup" { default = "ssh_eks_vadcprod" } diff --git a/tf_files/aws/rds/sample.tfvars b/tf_files/aws/rds/sample.tfvars index 88d0fc195..c58a8b209 100644 --- a/tf_files/aws/rds/sample.tfvars +++ b/tf_files/aws/rds/sample.tfvars @@ -1,58 +1,156 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:47:21.465202 -# Mandatory variables -rds_instance_allocated_storage = 20 -rds_instance_engine = "MySQL,postgres,oracle,aurora,SQL,MariaDB" -rds_instance_engine_version = "version for your engine, basically depends on the variable above" -rds_instance_username = "usern ame for access" -#rds_instance_password = "password for access" -rds_instance_port = "1433" -rds_instance_identifier = "planx-tests-db" -#rds_instance_db_subnet_group_name = "subnet group name" -#rds_instance_vpc_security_group_ids = ["sg-XXXXXXXXXX"] - - -# Optional variables, uncomment and change values accordingly - -#rds_instance_name = "what are you naming the db" -#rds_instance_allow_major_version_upgrade = true -#rds_instance_apply_immediately = false -#rds_instance_auto_minor_version_upgrade = true -#rds_instance_availability_zone = "" -#rds_instance_backup_retention_period = 0 -#rds_instance_backup_window = "03:46-04:16" -#rds_instance_character_set_name = "" -#rds_instance_copy_tags_to_snapshot = false -#rds_instance_create = true -#rds_instance_deletion_protection = false -#rds_instance_enabled_cloudwatch_logs_exports = [] -#rds_instance_iam_database_authentication_enabled = false -#rds_instance_instance_class = "db.t3.micro" -#rds_instance_iops = 0 -#rds_instance_kms_key_id = "" -#rds_instance_license_model = false -#rds_instance_maintenance_window = "Mon:00:00-Mon:03:00" -#rds_instance_max_allocated_storage = 0 -#rds_instance_monitoring_interval = 0 -#rds_instance_monitoring_role_arn = "" -#rds_instance_monitoring_role_name = "rds-monitoring-role" -#rds_instance_multi_az = false -#rds_instance_option_group_name = "" -#rds_instance_parameter_group_name = "" -#rds_instance_performance_insights_enabled = false -#rds_instance_performance_insights_retention_period = 7 -#rds_instance_publicly_accessible = false -#rds_instance_replicate_source_db = "" -#rds_instance_skip_final_snapshot = false -#rds_instance_snapshot_identifier = "" -#rds_instance_storage_encrypted = false -#rds_instance_storage_type = "gp2" -#rds_instance_tags = {"something"="stuff", "Something-else"="more-stuff"} -#rds_instance_timeouts = {create = "40m", update = "80m", delete = "40m"} -#rds_instance_timezone = "" -#rds_instance_final_snapshot_identifier = "" - -# backups -#rds_instance_backup_enabled = false -#rds_instance_backup_kms_key = "" -#rds_instance_backup_bucket_name = "" +#Whether to create this resource or not? +rds_instance_create = true + +#Allocated storage in gibibytes +rds_instance_allocated_storage = 20 + +#What type of storage to use for the database. +#More information can be found here: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html +rds_instance_storage_type = "gp2" + +#The database engine to use. Information on types and pricing can be found here: +#https://aws.amazon.com/rds/pricing/?pg=ln&sec=hs +rds_instance_engine = "" + +#The engine version to use. If auto_minor_version_upgrade is enabled, you can provide a prefix of the +#version such as 5.7 (for 5.7.10) and this attribute will ignore differences in the patch version automatically (e.g. 5.7.17) +rds_instance_engine_version = "" + +#The instance type of the RDS instance +#https://aws.amazon.com/rds/instance-types/ +rds_instance_instance_class = "db.t2.micro" + +#Name for the database to be created +rds_instance_name = "" + +#The name of the RDS instance, if omitted, Terraform will assign a random, unique identifier +rds_instance_identifier= "" + +#Username to use for the RDS instance +rds_instance_username = "" + +#Password to use for the RDS instance +rds_instance_password = "" + +#A DB parameter group is a reusable template of values for things like RAM allocation that can be associated with a DB instance. +#For more info, see: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html +rds_instance_parameter_group_name = "" + +#Indicates that major version upgrades are allowed +rds_instance_allow_major_version_upgrade = true + +#Specifies whether any database modifications are applied immediately, or during the next maintenance window +rds_instance_apply_immediately = false + +#Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window +rds_instance_auto_minor_version_upgrade = true + +#The number of days to retain backups for. Must be between 0 and 35 +rds_instance_backup_retention_period = 0 + +#The daily time range (in UTC) during which automated backups are created if they are enabled. Example: '09:46-10:16'. Must not overlap with maintenance_window +rds_instance_backup_window = "03:46-04:16" + +#Name of DB subnet group. DB instance will be created in the VPC associated with the DB subnet group +rds_instance_db_subnet_group_name = "" + +#The window to perform maintenance in +rds_instance_maintenance_window = "Mon:00:00-Mon:03:00" + +#Specifies if the RDS instance is multi-AZ +rds_instance_multi_az = false + +#Name of the DB option group to associate +rds_instance_option_group_name = "" + +#Bool to control if instance is publicly accessible +rds_instance_publicly_accessible = false + +#Determines if a final snapshot will be taken of the database before it is deleted. False means that a backup will be taken, +#and true means that none will be +rds_instance_skip_final_snapshot = false + +#Specifies whether the DB instance is encrypted +rds_instance_storage_encrypted = false + +#A list of VPC security groups to associate with the instance +#For more information, see: https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html +rds_instance_vpc_security_group_ids = [] + +#Tags for the instance, used for searching and filtering +rds_instance_tags = {} + +#The port on which the DB accepts connections +rds_instance_port = "" + +#License model information for this DB instance +rds_instance_license_model = "" + +#Specifies whether Performance Insights are enabled +rds_instance_performance_insights_enabled = false + +#The amount of time in days to retain Performance Insights data. Either 7 (7 days) or 731 (2 years). +rds_instance_performance_insights_retention_period = 7 + +#(Optional) Updated Terraform resource management timeouts. Applies to `aws_db_instance` in particular to permit resource management times +rds_instance_timeouts = { create = "40m" update = "80m" delete = "40m" } + +#Name of the IAM role which will be created when create_monitoring_role is enabled. +rds_instance_monitoring_role_name = "rds-monitoring-role" + +#Specifies the value for Storage Autoscaling +rds_instance_max_allocated_storage = 0 + +#The Availability Zone of the RDS instance +rds_instance_availability_zone = "" + +#The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. Must be specified if monitoring_interval is non-zero. +rds_instance_monitoring_role_arn = "" + +#On delete, copy all Instance tags to the final snapshot (if final_snapshot_identifier is specified) +rds_instance_copy_tags_to_snapshot = false + +#The ARN for the KMS encryption key. If creating an encrypted replica, set this to the destination KMS ARN. If storage_encrypted is set to true and kms_key_id is not specified the default KMS key created in your account will be used +rds_instance_kms_key_id = "" + +#List of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. Valid values (depending on engine): alert, audit, error, general, listener, slowquery, trace, postgresql (PostgreSQL), upgrade (PostgreSQL). +rds_instance_enabled_cloudwatch_logs_exports = [] + +#The amount of provisioned IOPS. Setting this implies a storage_type of 'io1' +rds_instance_iops = 0 + +#The database can't be deleted when this value is set to true. +rds_instance_deletion_protection = false + +#Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled +rds_instance_iam_database_authentication_enabled = false + +#(Optional) Time zone of the DB instance. timezone is currently only supported by Microsoft SQL Server. The timezone can only be set on creation. See MSSQL User Guide for more information. +rds_instance_timezone = "" + +#The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60. +rds_instance_monitoring_interval = 0 + +#Specifies whether or not to create this database from a snapshot. This correlates to the snapshot ID you'd find in the RDS console, e.g: rds:production-2015-06-26-06-05. +rds_instance_snapshot_identifier = "" + +#Specifies that this resource is a Replicate database, and to use this value as the source database. This correlates to the identifier of another Amazon RDS Database to replicate. +rds_instance_replicate_source_db = "" + +#Create IAM role with a defined name that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. +rds_instance_create_monitoring_role = false + +#(Optional) The character set name to use for DB encoding in Oracle instances. This can't be changed. See Oracle Character Sets Supported in Amazon RDS for more information +rds_instance_character_set_name = "" + +#To enable backups onto S3 +rds_instance_backup_enabled = false + +#KMS to enable backups onto S3 +rds_instance_backup_kms_key = "" + +#The bucket to send bacups to +rds_instance_backup_bucket_name = "" diff --git a/tf_files/aws/rds/variables.tf b/tf_files/aws/rds/variables.tf index f97e082d7..c887c202f 100644 --- a/tf_files/aws/rds/variables.tf +++ b/tf_files/aws/rds/variables.tf @@ -1,4 +1,3 @@ - variable "rds_instance_create" { description = "Whether to create this resource or not?" # type = bool @@ -191,7 +190,6 @@ variable "rds_instance_availability_zone" { default = "" } - variable "rds_instance_final_snapshot_identifier" { description = "The name of your final DB snapshot when this DB instance is deleted." # type = "string" @@ -200,7 +198,7 @@ variable "rds_instance_final_snapshot_identifier" { variable "rds_instance_monitoring_role_arn" { description = "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. Must be specified if monitoring_interval is non-zero." -# type = "string" +# type = "string"ß default = "" } diff --git a/tf_files/aws/rds_snapshot/sample.tfvars b/tf_files/aws/rds_snapshot/sample.tfvars new file mode 100644 index 000000000..a471c2fed --- /dev/null +++ b/tf_files/aws/rds_snapshot/sample.tfvars @@ -0,0 +1,17 @@ +#Automatically generated from a corresponding variables.tf on 2022-07-12 16:51:07.398804 + +#The AWS region this snapshot will be taken from +aws_region = "us-east-1" + +#The VPC this snapshot will be taken from +vpc_name= "" + +#The RDS ID that corresponds to the indexd database +indexd_rds_id= "" + +#The RDS ID that corresponds to the Fence database +fence_rds_id= "" + +#The RDS ID that corresponds to the Sheepdog database +sheepdog_rds_id= "" + diff --git a/tf_files/aws/rds_snapshot/variables.tf b/tf_files/aws/rds_snapshot/variables.tf index 1065a13c7..8491e8a8e 100644 --- a/tf_files/aws/rds_snapshot/variables.tf +++ b/tf_files/aws/rds_snapshot/variables.tf @@ -5,10 +5,13 @@ variable "aws_region" { variable "vpc_name" {} # rds instance id + variable "indexd_rds_id" {} # rds instance id + variable "fence_rds_id" {} # rds instance id + variable "sheepdog_rds_id" {} diff --git a/tf_files/aws/role/sample.tfvars b/tf_files/aws/role/sample.tfvars index 49f6fceb0..0e2e3ff71 100644 --- a/tf_files/aws/role/sample.tfvars +++ b/tf_files/aws/role/sample.tfvars @@ -1,3 +1,24 @@ -rolename="rolename" +#The name of the role +rolename="" + +#A description of the role description="Role created with gen3 awsrole" + +#A path to attach to the role. For more information, see: +#https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names path="/gen3_service/" + +#Assume-role policy to attach to the role +ar_policy = <