diff --git a/.gitignore b/.gitignore index 80f066d8..112a7049 100755 --- a/.gitignore +++ b/.gitignore @@ -77,3 +77,4 @@ terraform.rc /aws/packer/todo/todo *.tgz *.DS_Store +*.plan diff --git a/aws/lab/global/iam/.terraform.lock.hcl b/aws/lab/global/iam/.terraform.lock.hcl new file mode 100644 index 00000000..5ca14bb8 --- /dev/null +++ b/aws/lab/global/iam/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.18.0" + constraints = "4.18.0" + hashes = [ + "h1:62MWy6fGx/cVk1DnLcc8rUxCCKhi6/R9fi/Af/ph9ag=", + "zh:100a11324326bf849b4c85d3c40a81e485726eee99c5a229387b8485a7a8da8b", + "zh:2226bbf97101af90e43cd5606d8678f35d7e7b477657d9297c42a1bd2ed42750", + "zh:27d51694300c08c32312f8832b889c57a2821dc022d49d38f9b1e14810f8a3fb", + "zh:2b8792c76986facfd415f967c5d61022f7ceeaa46c158037fe8939e36d954f99", + "zh:3ea787967de772cc3a13469753080c8fa81be5aefc735d3753c7627f63c948e5", + "zh:64d58463cbb2b93d5202ef311a101890a1e083f9587f3eabb9f2e26dd0cf8f43", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:b10eecf4c034a229712825124e7c0b765c5904648550dc8f844f68638531d337", + "zh:d9a3cc46e2746c40ea69bcfb2d12e765ee6bda3e1ed8ce73f272d492ff4836bb", + "zh:df625e57aa3b5fb3e4562da44daf6565289818ba2a7e66f86ad968b43fdb5148", + "zh:eaaa3a5d2a15a87b346e521872120a3ca7f6777a04226a55f51022eaf4097963", + "zh:ec6f4b00ae4f9d536f2a6c2e5a5f149867194268ce9068a9c348bc3e678fbfce", + ] +} diff --git a/aws/lab/global/iam/eks-policies.tf b/aws/lab/global/iam/eks-policies.tf new file mode 100644 index 00000000..775ea5b9 --- /dev/null +++ b/aws/lab/global/iam/eks-policies.tf @@ -0,0 +1,20 @@ +################################################ +# Node Group IAM role policies & attachments # +################################################ + +resource "aws_iam_role_policy_attachment" "eks_node_group_role_policy_attachment" { + for_each = toset(var.node_group_role_policies) + policy_arn = each.value + role = aws_iam_role.eks_node_group_iam_role.name +} + + +################################################ +# Cluster IAM role policies & attachments # +################################################ +resource "aws_iam_role_policy_attachment" "eks_cluster_role_policy_attachment" { + for_each = toset(var.cluster_role_policies) + + policy_arn = each.value + role = aws_iam_role.cluster_iam_role.name +} \ No newline at end of file diff --git a/aws/lab/global/iam/eks-roles.tf b/aws/lab/global/iam/eks-roles.tf new file mode 100644 index 00000000..f3bd1363 --- /dev/null +++ b/aws/lab/global/iam/eks-roles.tf @@ -0,0 +1,69 @@ +data "aws_partition" "current" {} + +locals { + tags = { + account = var.app.account + project = "infra" + application = "eks" + team = "sre" + } +} + +################################################ +# IAM ROLE for self managed node groups # +################################################ +data "aws_iam_policy_document" "eks_node_group_assume_role_policy" { + statement { + sid = "EKSNodeAssumeRole" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role" "eks_node_group_iam_role" { + name = var.node_group_iam_role_name + description = "eks ${var.app.account} self managed node group IAM role" + assume_role_policy = data.aws_iam_policy_document.eks_node_group_assume_role_policy.json + force_detach_policies = true + tags = local.tags +} + +resource "aws_iam_instance_profile" "eks_node_group_profile" { + role = aws_iam_role.eks_node_group_iam_role.name + name = var.node_group_iam_role_name + lifecycle { + create_before_destroy = true + } + tags = local.tags +} + +################################################ +# IAM ROLE for eks cluster # +################################################ +data "aws_iam_policy_document" "eks_cluster_assume_role_policy" { + + statement { + sid = "EKSClusterAssumeRole" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role" "cluster_iam_role" { + name = var.cluster_iam_role_name + description = "eks ${var.app.account} cluster IAM role" + assume_role_policy = data.aws_iam_policy_document.eks_cluster_assume_role_policy.json + force_detach_policies = true + tags = local.tags +} + + + diff --git a/aws/lab/global/iam/provider.tf b/aws/lab/global/iam/provider.tf new file mode 100644 index 00000000..fbe1d900 --- /dev/null +++ b/aws/lab/global/iam/provider.tf @@ -0,0 +1,23 @@ +terraform { + + backend "s3" { + bucket = "todo-tf-state-lab" + key = "lab/us-east-2/global/iam.tf" + region = "us-east-2" + encrypt = true + kms_key_id = "alias/todo-tf-state-key" + dynamodb_table = "todo-tf-state-lab" + } + required_providers { + aws = { + source = "hashicorp/aws" + version = "= 4.18.0" + } + } + + required_version = "= 1.2.2" +} + +provider "aws" { + region = var.app.region +} \ No newline at end of file diff --git a/aws/lab/global/iam/variables.tf b/aws/lab/global/iam/variables.tf new file mode 100644 index 00000000..a6373bf3 --- /dev/null +++ b/aws/lab/global/iam/variables.tf @@ -0,0 +1,30 @@ +variable "app" { + default = { + account = "lab" + region = "us-east-2" + } +} + +variable "cluster_iam_role_name" { + default = "lab-eks-cluster-role" +} + +variable "cluster_role_policies" { + default = [ + "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController" + ] +} + +variable "node_group_iam_role_name" { + default = "lab-eks-node-group-role" +} + +variable "node_group_role_policies" { + default = [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + ] +} \ No newline at end of file diff --git a/aws/lab/global/route53/.terraform.lock.hcl b/aws/lab/global/route53/.terraform.lock.hcl index ff22d4f2..60b6c24c 100644 --- a/aws/lab/global/route53/.terraform.lock.hcl +++ b/aws/lab/global/route53/.terraform.lock.hcl @@ -2,21 +2,21 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/aws" { - version = "4.22.0" - constraints = ">= 2.49.0" + version = "4.18.0" + constraints = ">= 2.49.0, 4.18.0" hashes = [ - "h1:fmPkEDTodRW9XE0dqpTzBFUtfB3nYurbwzKy//8N93o=", - "zh:299efb8ba733b7742f0ef1c5c5467819e0c7bf46264f5f36ba6b6674304a5244", - "zh:4db198a41d248491204d4ca644662c32f748177d5cbe01f3c7adbb957d4d77f0", - "zh:62ebc2b05b25eafecb1a75f19d6fc5551faf521ada9df9e5682440d927f642e1", - "zh:636b590840095b4f817c176034cf649f543c0ce514dc051d6d0994f0a05c53ef", - "zh:8594bd8d442288873eee56c0b4535cbdf02cacfcf8f6ddcf8cd5f45bb1d3bc80", - "zh:8e18a370949799f20ba967eec07a84aaedf95b3ee5006fe5af6eae13fbf39dc3", + "h1:62MWy6fGx/cVk1DnLcc8rUxCCKhi6/R9fi/Af/ph9ag=", + "zh:100a11324326bf849b4c85d3c40a81e485726eee99c5a229387b8485a7a8da8b", + "zh:2226bbf97101af90e43cd5606d8678f35d7e7b477657d9297c42a1bd2ed42750", + "zh:27d51694300c08c32312f8832b889c57a2821dc022d49d38f9b1e14810f8a3fb", + "zh:2b8792c76986facfd415f967c5d61022f7ceeaa46c158037fe8939e36d954f99", + "zh:3ea787967de772cc3a13469753080c8fa81be5aefc735d3753c7627f63c948e5", + "zh:64d58463cbb2b93d5202ef311a101890a1e083f9587f3eabb9f2e26dd0cf8f43", "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", - "zh:aa968514231e404fb53311d8eae2e8b6bde1fdad1f4dd5a592ab93d9cbf11af4", - "zh:af8e5c48bf36d4fff1a6fca760d5b85f14d657cbdf95e9cd5e898c68104bad31", - "zh:d8a75ba36bf8b6f2e49be5682f48eccb6c667a4484afd676ae347213ae208622", - "zh:dd7c419674a47e587dabe98b150a8f1f7e31c248c68e8bf5e9ca0a400b5e2c4e", - "zh:fdeb6314a2ce97489bbbece59511f78306955e8a23b02cbd1485bd04185a3673", + "zh:b10eecf4c034a229712825124e7c0b765c5904648550dc8f844f68638531d337", + "zh:d9a3cc46e2746c40ea69bcfb2d12e765ee6bda3e1ed8ce73f272d492ff4836bb", + "zh:df625e57aa3b5fb3e4562da44daf6565289818ba2a7e66f86ad968b43fdb5148", + "zh:eaaa3a5d2a15a87b346e521872120a3ca7f6777a04226a55f51022eaf4097963", + "zh:ec6f4b00ae4f9d536f2a6c2e5a5f149867194268ce9068a9c348bc3e678fbfce", ] } diff --git a/aws/lab/global/route53/provider.tf b/aws/lab/global/route53/provider.tf index 7e965f85..847b8424 100644 --- a/aws/lab/global/route53/provider.tf +++ b/aws/lab/global/route53/provider.tf @@ -2,11 +2,23 @@ terraform { backend "s3" { bucket = "todo-tf-state-lab" - key = "lab/global/route53.tf" + key = "lab/us-east-2/global/route53.tf" region = "us-east-2" encrypt = true kms_key_id = "alias/todo-tf-state-key" dynamodb_table = "todo-tf-state-lab" } + + required_providers { + aws = { + source = "hashicorp/aws" + version = "= 4.18.0" + } + } + required_version = "= 1.2.2" +} + +provider "aws" { + region = var.app.region } \ No newline at end of file diff --git a/aws/lab/global/route53/variables.tf b/aws/lab/global/route53/variables.tf index 5c3cb5b7..a8a28703 100644 --- a/aws/lab/global/route53/variables.tf +++ b/aws/lab/global/route53/variables.tf @@ -2,6 +2,7 @@ variable "app" { default = { environment = "dev" account = "lab" + region = "us-east-2" } } diff --git a/aws/lab/us-east-2/dev/eks/cluster/node_groups.tf b/aws/lab/us-east-2/dev/eks/cluster/node_groups.tf index b5f793bd..48bdbb26 100644 --- a/aws/lab/us-east-2/dev/eks/cluster/node_groups.tf +++ b/aws/lab/us-east-2/dev/eks/cluster/node_groups.tf @@ -1,18 +1,15 @@ locals { - - node_group_iam_role_name = "${var.app.account}-${var.app.environment}-eks-node-group-role" - self_managed_node_groups = [ { - name = "node_group_01" - platform = "bottlerocket" - instance_type = "m5.large" - ami_id = data.aws_ami.eks_default_bottlerocket.id - asg_min_size = 1 - asg_desired_capacity = 2 - asg_max_size = 3 - key_name = aws_key_pair.this.key_name - bootstrap_extra_args = <<-EOT + name = "node_group_01" + platform = "bottlerocket" + instance_type = "m5.large" + ami_id = data.aws_ami.eks_default_bottlerocket.id + asg_min_size = 1 + asg_desired_capacity = 2 + asg_max_size = 3 + key_name = aws_key_pair.this.key_name + bootstrap_extra_args = <<-EOT # The admin host container provides SSH access and runs with "superpowers". # It is disabled by default, but can be disabled explicitly. [settings.host-containers.admin] @@ -27,8 +24,6 @@ locals { [settings.kubernetes.node-labels] ingress = "allowed" EOT - iam_role_name = local.node_group_iam_role_name - iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"] } ] } diff --git a/aws/lab/us-east-2/dev/eks/cluster/variables.tf b/aws/lab/us-east-2/dev/eks/cluster/variables.tf index 433f3da7..5d6873d7 100644 --- a/aws/lab/us-east-2/dev/eks/cluster/variables.tf +++ b/aws/lab/us-east-2/dev/eks/cluster/variables.tf @@ -2,6 +2,7 @@ variable "app" { default = { environment = "dev" account = "lab" + id = "{ACCOUNT_ID}FIXME" region = "us-east-2" } } \ No newline at end of file diff --git a/aws/modules/eks/cluster/cloudwatch.tf b/aws/modules/eks/cluster/cloudwatch.tf new file mode 100644 index 00000000..77849a47 --- /dev/null +++ b/aws/modules/eks/cluster/cloudwatch.tf @@ -0,0 +1,20 @@ +resource "aws_cloudwatch_log_group" "eks_cluster_log_group" { + name = "/aws/eks/${local.cluster_name}/cluster" + retention_in_days = 90 + tags = local.tags +} + +resource "aws_iam_role_policy" "eks_cluster_cloudwatch_inline_policy" { + name = "${local.cluster_name}_cloudwatch_inline_policy" + role = data.aws_iam_role.cluster_iam_role.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["logs:CreateLogGroup"] + Effect = "Deny" + Resource = aws_cloudwatch_log_group.eks_cluster_log_group.arn + }, + ] + }) +} \ No newline at end of file diff --git a/aws/modules/eks/cluster/data-sources.tf b/aws/modules/eks/cluster/data-sources.tf index b78f79bd..7ee7d3b9 100644 --- a/aws/modules/eks/cluster/data-sources.tf +++ b/aws/modules/eks/cluster/data-sources.tf @@ -6,6 +6,14 @@ data "aws_vpc" "selected" { id = var.vpc_id } +data "aws_iam_role" "cluster_iam_role" { + name = local.cluster_iam_role_name +} + +data "aws_iam_instance_profile" "node_group_iam_instance_profile" { + name = local.node_group_iam_instance_profile_name +} + # This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes data "aws_iam_policy_document" "ebs" { # Copy of default KMS policy that lets you manage it diff --git a/aws/modules/eks/cluster/kms.tf b/aws/modules/eks/cluster/kms.tf index 5a502034..ecea84cd 100644 --- a/aws/modules/eks/cluster/kms.tf +++ b/aws/modules/eks/cluster/kms.tf @@ -1,13 +1,53 @@ resource "aws_kms_key" "eks" { - description = "EKS Secret Encryption Key" - deletion_window_in_days = 7 - enable_key_rotation = true - tags = local.tags + description = "EKS Secret Encryption Key" + policy = data.aws_iam_policy_document.eks.json + customer_master_key_spec = "SYMMETRIC_DEFAULT" + enable_key_rotation = true + deletion_window_in_days = 7 + tags = local.tags } -resource "aws_kms_key" "ebs" { - description = "Customer managed key to encrypt self managed node group volumes" - deletion_window_in_days = 7 - policy = data.aws_iam_policy_document.ebs.json - tags = local.tags +resource "aws_kms_alias" "eks" { + name = "alias/${var.app.account}-${var.app.environment}-eks" + target_key_id = aws_kms_key.eks.key_id +} + +data "aws_iam_policy_document" "eks" { + statement { + sid = "All access to root user" + actions = ["kms:*"] + resources = ["*"] + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${var.app.id}:root"] + } + } +} + +resource "aws_iam_role_policy_attachment" "eks_cluster_encryption" { + policy_arn = aws_iam_policy.eks_cluster_encryption.arn + role = data.aws_iam_role.cluster_iam_role.name +} + +resource "aws_iam_policy" "eks_cluster_encryption" { + name = "${data.aws_iam_role.cluster_iam_role.name}-ClusterEncryption" + description = "eks cluster ${local.cluster_name} encryption policy" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ListGrants", + "kms:DescribeKey", + ] + Effect = "Allow" + Resource = aws_kms_key.eks.arn + }, + ] + }) + tags = local.tags } \ No newline at end of file diff --git a/aws/modules/eks/cluster/locals.tf b/aws/modules/eks/cluster/locals.tf new file mode 100644 index 00000000..1ff4e245 --- /dev/null +++ b/aws/modules/eks/cluster/locals.tf @@ -0,0 +1,32 @@ +locals { + k8s_version = 1.19 + cluster_name = "${var.app.account}-${var.app.environment}-eks" + cluster_iam_role_name = "${var.app.account}-eks-cluster-role" + node_group_iam_instance_profile_name = "${var.app.account}-eks-node-group-role" + + self_managed_node_groups = { + for config in var.self_managed_node_groups : + config.name => { + name = config.name + platform = config.platform + ami_id = config.ami_id + instance_type = config.instance_type + min_size = config.asg_min_size + max_size = config.asg_max_size + desired_size = config.asg_desired_capacity + key_name = config.key_name + iam_instance_profile_arn = data.aws_iam_instance_profile.node_group_iam_instance_profile.arn + create_iam_instance_profile = false + bootstrap_extra_args = config.bootstrap_extra_args + } + } + + + tags = merge(var.tags, { + account = var.app.account + project = "infra" + environment = var.app.environment + application = "eks" + team = "sre" + }) +} \ No newline at end of file diff --git a/aws/modules/eks/cluster/main.tf b/aws/modules/eks/cluster/main.tf index cc6fbf0d..8f67e543 100755 --- a/aws/modules/eks/cluster/main.tf +++ b/aws/modules/eks/cluster/main.tf @@ -1,35 +1,3 @@ -locals { - k8s_version = 1.19 - cluster_name = "${var.app.account}-${var.app.environment}-eks" - - self_managed_node_groups = { - for config in var.self_managed_node_groups : - config.name => { - name = config.name - platform = config.platform - ami_id = config.ami_id - instance_type = config.instance_type - min_size = config.asg_min_size - max_size = config.asg_max_size - desired_size = config.asg_desired_capacity - key_name = config.key_name - iam_role_name = config.iam_role_name - iam_role_use_name_prefix = false - iam_role_additional_policies = config.iam_role_additional_policies - bootstrap_extra_args = config.bootstrap_extra_args - } - } - - - tags = merge(var.tags, { - account = var.app.account - project = "infra" - environment = var.app.environment - application = "eks" - team = "sre" - }) -} - module "eks" { source = "terraform-aws-modules/eks/aws" version = "18.24.0" @@ -64,9 +32,8 @@ module "eks" { }] # cluster IAM configuration - iam_role_name = "${var.app.account}-${var.app.environment}-eks-cluster-role" - iam_role_use_name_prefix = false - + create_iam_role = false + iam_role_arn = data.aws_iam_role.cluster_iam_role.arn # Self managed node groups will not automatically create the aws-auth configmap so we need to create_aws_auth_configmap = true diff --git a/aws/modules/eks/cluster/variables.tf b/aws/modules/eks/cluster/variables.tf index 1c4b5e98..76aa715d 100755 --- a/aws/modules/eks/cluster/variables.tf +++ b/aws/modules/eks/cluster/variables.tf @@ -3,6 +3,7 @@ variable "app" { { environment = string # dev, uat account = string # lab, prd + id = string region = string } ) diff --git a/aws/tf.sh b/aws/tf.sh new file mode 100644 index 00000000..6d787496 --- /dev/null +++ b/aws/tf.sh @@ -0,0 +1,165 @@ +#!/bin/bash + +me="$(basename "$(test -L "$0" && readlink "$0" || echo "$0")")" + +fmt() { + find . \( -path '*/.terraform/*' \) -prune -false -o -type f -name '*.tf' | while read f + do + echo $f + terraform fmt $f + + done +} + +plan() { + local chdir=$1 + local plan_file=$2 + terraform -chdir=$chdir plan -out $plan_file +} + +apply() { + local chdir=$1 + local plan_file=$2 + terraform -chdir=$chdir apply $plan_file +} + +module_apply() { + local account=$1 + local region=$2 + local chdir + local plan_file + if [[ $region == "global" ]]; then + component=$3 + chdir=$account/global/$component + plan_file=$account-global-${component////-}.plan + else + environment=$3 + component=$4 + chdir=$account/$region/$environment/$component + plan_file=$account-$region-$environment-${component////-}.plan + fi + echo "\n🔧 Running command[apply] for account[$account] region[$region] environment[$environment] component[$component]" + apply $chdir $plan_file +} + +v2_buila_all() { + local account=$1 + local region=$2 + local environment=$3 + module_config=$(jq '."'"$account"'"' todo-v2-tf-modules-config.json) + global_modules=$(jq -r '.global[]' <<< $module_config) + region_modules=$(jq -r '."'"$region"'"[]' <<< $module_config) + for global_module in $global_modules; do + tf lab global $global_module init -reconfigure + tf lab global $global_module validate + tf lab global $global_module plan + done + for region_module in $region_modules; do + if [ $( jq 'has("'"${region_module}"'")' <<< $module_config ) == "true" ]; then + submodules=$( jq -r '."'"$region_module"'"[] | paths(scalars) as $p | [ ( [ $p[] | tostring ] | join("/") ) , (getpath($p)) ] | join("/")' <<< $module_config) + for submodule in $submodules; do + component=$region_module/${submodule///[[:digit:]]/} + echo "Applying for region :- ${region}" + tf $account $region $environment $component init -reconfigure + tf $account $region $environment $component validate + tf $account $region $environment $component plan + done + fi + done +} + + +tf() { + local account=$1 + local region=$2 + if [[ $region == "global" ]]; then + local component=$3 + local cmd=$4 + echo "\n🔧 Running command[$cmd] for account[$account] component[$component]" + if [[ $cmd == "plan" ]]; then + terraform -chdir=$account/global/$component $cmd -out $account-global-${component////-}.plan + elif [[ $cmd == "apply" ]]; then + terraform -chdir=$account/global/$component $cmd $account-global-${component////-}.plan + else + terraform -chdir=$account/global/$component $cmd + fi + else + local environment=$3 + local component=$4 + local cmd=$5 + echo "\n🔧 Running command[$cmd] for for account[$account] region[$region] environment[$environment] component[$component]" + if [[ $cmd == "plan" ]]; then + plan $account/$region/$environment/$component $account-$region-$environment-${component////-}.plan + elif [[ $cmd == "apply" ]]; then + apply $account/$region/$environment/$component $account-$region-$environment-${component////-}.plan + else + terraform -chdir=$account/$region/$environment/$component $cmd + fi + fi +} + +help() { + echo "Usage: ${me} [OPTIONS] COMMAND" + echo "" + echo "Author:" + echo " TODO APP INFRA Contributors - <$(git config --get remote.origin.url)>" + echo "" + echo "Options:" + echo " --debug, -D Enable debug mode" + echo " help, -h show help" + echo "" + echo "Commands:" + echo " v2-build-all Build(init/validate/plan) all modules for todoV2" +} + + +if [ $# -lt 1 ]; then + help +fi + + +while test -n "$1"; do + case "$1" in + -h|help) + help + shift + ;; + fmt) + fmt + shift + ;; + apply) + if [[ $# -gt 4 ]]; then + echo "Too many args"; + help + exit 1; + elif [[ $# -lt 3 ]]; then + echo "Less args"; + help + exit 1; + fi + module_apply $2 $3 $4 $5 + exit $? + ;; + v2-build-all) + if [[ $# -gt 4 ]]; then + echo "Too many args"; + help + exit 1; + elif [[ $# -lt 4 ]]; then + echo "Less args"; + help + exit 1; + fi + v2_buila_all $2 $3 $4 + exit $? + ;; + *) + echo "Unrecognized option : ${1}" + help + exit 1; + esac +done + + + diff --git a/aws/todo-v2-tf-modules-config.json b/aws/todo-v2-tf-modules-config.json new file mode 100644 index 00000000..36d0f352 --- /dev/null +++ b/aws/todo-v2-tf-modules-config.json @@ -0,0 +1,25 @@ +{ + "lab": { + "global": [ + "iam", + "route53" + ], + "us-east-2": [ + "vpc", + "eks", + "apps" + ], + "apps": [ + { + "todo": { + "v2": [ + "kms", + "ecr", + "mongo", + "redis" + ] + } + } + ] + } +} \ No newline at end of file diff --git a/k8s/setup.sh b/k8s/setup.sh index 493095fd..2a97223b 100755 --- a/k8s/setup.sh +++ b/k8s/setup.sh @@ -1,4 +1,4 @@ -# #!/bin/bash +#!/bin/bash me="$(basename "$(test -L "$0" && readlink "$0" || echo "$0")")"