diff --git a/.dockleconfig b/.dockleconfig new file mode 100644 index 000000000..051fd7789 --- /dev/null +++ b/.dockleconfig @@ -0,0 +1,4 @@ +# This file is allows you to specify a list of files that is acceptable to Dockle +# To allow multiple files, use a list of names, example below. Make sure to remove the leading # +# DOCKLE_ACCEPT_FILES="file1,path/to/file2,file3/path,etc" +# https://github.com/goodwithtech/dockle#accept-suspicious-environment-variables--files--file-extensions diff --git a/.github/actions/configure-aws-credentials/action.yml b/.github/actions/configure-aws-credentials/action.yml new file mode 100644 index 000000000..0bbb9a471 --- /dev/null +++ b/.github/actions/configure-aws-credentials/action.yml @@ -0,0 +1,58 @@ +name: 'Configure AWS Credentials' +description: 'Configure AWS Credentials for a given application and | + environment so that the GitHub Actions workflow can access AWS resources. | + This is a wrapper around https://github.com/aws-actions/configure-aws-credentials | + that first determines the account, role, and region based on the | + account_names_by_environment configuration in app-config' +inputs: + app_name: + description: 'Name of application folder under /infra' + required: true + environment: + description: 'Name of environment (dev, staging, prod) that AWS resources live in, or "shared" for resources that are shared across environments' + required: true +runs: + using: "composite" + steps: + - name: Get AWS account authentication details (AWS account, IAM role, AWS region) + run: | + # Get AWS account authentication details (AWS account, IAM role, AWS region) + # associated with the application environment to figure out which AWS + # account to log into, which IAM role to assume, and which AWS region to use + + echo "::group::AWS account authentication details" + + terraform -chdir=infra/project-config init > /dev/null + terraform -chdir=infra/project-config refresh > /dev/null + AWS_REGION=$(terraform -chdir=infra/project-config output -raw default_region) + echo "AWS_REGION=$AWS_REGION" + GITHUB_ACTIONS_ROLE_NAME=$(terraform -chdir=infra/project-config output -raw github_actions_role_name) + echo "GITHUB_ACTIONS_ROLE_NAME=$GITHUB_ACTIONS_ROLE_NAME" + + terraform -chdir=infra/${{ inputs.app_name }}/app-config init > /dev/null + terraform -chdir=infra/${{ inputs.app_name }}/app-config refresh > /dev/null + ACCOUNT_NAME=$(terraform -chdir=infra/${{ inputs.app_name }}/app-config output -json account_names_by_environment | jq -r .${{ inputs.environment }}) + echo "ACCOUNT_NAME=$ACCOUNT_NAME" + + # Get the account id associated with the account name extracting the + # ACCOUNT_ID part of the tfbackend file name which looks like + # ..s3.tfbackend. + # The cut command splits the string with period as the delimeter and + # extracts the second field. + ACCOUNT_ID=$(ls infra/accounts/$ACCOUNT_NAME.*.s3.tfbackend | cut -d. -f2) + echo "ACCOUNT_ID=$ACCOUNT_ID" + + AWS_ROLE_TO_ASSUME=arn:aws:iam::$ACCOUNT_ID:role/$GITHUB_ACTIONS_ROLE_NAME + echo "AWS_ROLE_TO_ASSUME=$AWS_ROLE_TO_ASSUME" + + echo "::endgroup::" + + echo "Setting env vars AWS_ROLE_TO_ASSUME and AWS_REGION..." + echo "AWS_ROLE_TO_ASSUME=$AWS_ROLE_TO_ASSUME" >> "$GITHUB_ENV" + echo "AWS_REGION=$AWS_REGION" >> "$GITHUB_ENV" + shell: bash + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ env.AWS_ROLE_TO_ASSUME }} + aws-region: ${{ env.AWS_REGION }} diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/.github/workflows/build-and-publish.yml b/.github/workflows/build-and-publish.yml new file mode 100644 index 000000000..a5e824360 --- /dev/null +++ b/.github/workflows/build-and-publish.yml @@ -0,0 +1,41 @@ +name: Build and Publish + +on: + workflow_call: + inputs: + ref: + description: The branch, tag or SHA to checkout. When checking out the repository that triggered a workflow, this defaults to the reference or SHA for that event. Otherwise, use branch or tag that triggered the workflow run. + required: true + type: string + workflow_dispatch: + inputs: + ref: + description: The branch, tag or SHA to checkout. When checking out the repository that triggered a workflow, this defaults to the reference or SHA for that event. Otherwise, use branch or tag that triggered the workflow run. + required: true + type: string + +jobs: + build-and-publish: + name: Build and publish + runs-on: ubuntu-latest + + permissions: + contents: read + id-token: write + + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ inputs.ref }} + + - name: Build release + run: make release-build + + - name: Configure AWS credentials + uses: ./.github/actions/configure-aws-credentials + with: + app_name: app + environment: shared + + - name: Publish release + run: make release-publish diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml new file mode 100644 index 000000000..0d0fb1e6d --- /dev/null +++ b/.github/workflows/cd.yml @@ -0,0 +1,62 @@ +name: Deploy +# Need to set a default value for when the workflow is triggered from a git push, +# which bypasses the default configuration for inputs and cannot use env.ENVIRONMENT +# since env context is not accessible in this context +run-name: Deploy ${{ github.ref_name }} to ${{ inputs.environment || 'dev' }} + +on: + # !! Uncomment the following lines once you've set up the dev environment and ready to turn on continuous deployment + # push: + # branches: + # - 'main' + # paths: + # - 'app/**' + # - 'bin/**' + # - 'infra/**' + workflow_dispatch: + inputs: + environment: + description: "target environment" + required: true + default: "dev" + type: choice + options: + - dev + - staging + - prod + +env: + APP_NAME: app + # Need to set a default value for when the workflow is triggered from a git push, + # which bypasses the default configuration for inputs + ENVIRONMENT: ${{ inputs.environment || 'dev' }} + +# Need to repeat the expression since env.ENVIRONMENT is not accessible in this context +concurrency: cd-${{ inputs.environment || 'dev' }} + +jobs: + # Don't need to call the build-and-publish workflow since the database-migrations + # workflow already calls it + database-migrations: + name: Database migrations + uses: ./.github/workflows/database-migrations.yml + with: + environment: ${{ inputs.environment || 'dev' }} + deploy: + name: Deploy + runs-on: ubuntu-latest + needs: [database-migrations] + permissions: + contents: read + id-token: write + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS credentials + uses: ./.github/actions/configure-aws-credentials + with: + app_name: ${{ env.APP_NAME }} + environment: ${{ env.ENVIRONMENT }} + + - name: Deploy release + run: make release-deploy APP_NAME=$APP_NAME ENVIRONMENT="$ENVIRONMENT" diff --git a/.github/workflows/check-infra-auth.yml b/.github/workflows/check-infra-auth.yml new file mode 100644 index 000000000..120e24006 --- /dev/null +++ b/.github/workflows/check-infra-auth.yml @@ -0,0 +1,30 @@ +name: Check GitHub Actions AWS Authentication + +on: + workflow_dispatch: + inputs: + aws_region: + description: AWS region + default: us-east-1 + required: false + role_to_assume: + description: ARN of IAM role to assume + required: true + +permissions: + contents: read + id-token: write + +jobs: + caller-identity: + name: Check caller identity + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-region: ${{ inputs.aws_region }} + role-to-assume: ${{ inputs.role_to_assume }} + - run: aws sts get-caller-identity diff --git a/.github/workflows/ci-infra.yml b/.github/workflows/ci-infra.yml new file mode 100644 index 000000000..889067833 --- /dev/null +++ b/.github/workflows/ci-infra.yml @@ -0,0 +1,103 @@ +name: CI Infra Checks + +on: + push: + branches: + - main + paths: + - infra/** + - .github/workflows/ci-infra.yml + pull_request: + paths: + - infra/** + - test/** + - .github/workflows/ci-infra.yml + +jobs: + check-terraform-format: + name: Check Terraform format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.4.6 + terraform_wrapper: false + - name: Run infra-lint + run: | + echo "If this fails, run 'make infra-format'" + make infra-lint + validate-terraform: + name: Validate Terraform modules + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.4.6 + terraform_wrapper: false + - name: Run infra-validate + run: make infra-validate + check-compliance-with-checkov: + name: Check compliance with checkov + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Run Checkov check + # Pin to specific checkov version rather than running from checkov@master + # since checkov frequently adds new checks that can cause CI checks to fail unpredictably. + # There is currently no way to specify the checkov version to pin to (See https://github.com/bridgecrewio/checkov-action/issues/41) + # so we need to pin the version of the checkov-action, which indirectly pins the checkov version. + # In this case, checkov-action v12.2296.0 is mapped to checkov v2.3.194. + uses: bridgecrewio/checkov-action@v12.2296.0 + with: + directory: infra + framework: terraform + quiet: true # only displays failed checks + check-compliance-with-tfsec: + name: Check compliance with tfsec + runs-on: ubuntu-latest + + permissions: + contents: read + pull-requests: write + + steps: + - uses: actions/checkout@v3 + - name: Run tfsec check + uses: aquasecurity/tfsec-pr-commenter-action@v1.2.0 + with: + github_token: ${{ github.token }} + # !! Uncomment to trigger automated infra tests once dev environment is set up + # infra-test-e2e: + # name: End-to-end tests + # runs-on: ubuntu-latest + # + # permissions: + # contents: read + # id-token: write + # + # steps: + # - uses: actions/checkout@v3 + + # - uses: hashicorp/setup-terraform@v2 + # with: + # terraform_version: 1.2.1 + # terraform_wrapper: false + + # - uses: actions/setup-go@v3 + # with: + # go-version: ">=1.19.0" + + # - name: Configure AWS credentials + # uses: ./.github/actions/configure-aws-credentials + # with: + # app_name: app + # # Run infra CI on dev environment + # environment: dev + + # - name: Run Terratest + # run: make infra-test diff --git a/.github/workflows/ci-vulnerability-scans.yml b/.github/workflows/ci-vulnerability-scans.yml new file mode 100644 index 000000000..b95887318 --- /dev/null +++ b/.github/workflows/ci-vulnerability-scans.yml @@ -0,0 +1,137 @@ +# GitHub Actions CI workflow that runs vulnerability scans on the application's Docker image +# to ensure images built are secure before they are deployed. + +# NOTE: The workflow isn't able to pass the docker image between jobs, so each builds the image. +# A future PR will pass the image between the scans to reduce overhead and increase speed +name: CI Vulnerability Scans + +on: + push: + branches: + - main + paths: + - app/** + - .grype.yml + - .hadolint.yaml + - .trivyignore + - .github/workflows/ci-vulnerability-scans.yml + pull_request: + paths: + - app/** + - .grype.yml + - .hadolint.yaml + - .trivyignore + - .github/workflows/ci-vulnerability-scans.yml + +jobs: + hadolint-scan: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + # Scans Dockerfile for any bad practices or issues + - name: Scan Dockerfile by hadolint + uses: hadolint/hadolint-action@v3.1.0 + with: + dockerfile: app/Dockerfile + format: tty + failure-threshold: warning + output-file: hadolint-results.txt + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + cat hadolint-results.txt >> $GITHUB_STEP_SUMMARY + + trivy-scan: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Build and tag Docker image for scanning + id: build-image + run: | + make release-build + IMAGE_NAME=$(make release-image-name) + IMAGE_TAG=$(make release-image-tag) + echo "image=$IMAGE_NAME:$IMAGE_TAG" >> $GITHUB_OUTPUT + + - name: Run Trivy vulnerability scan + uses: aquasecurity/trivy-action@master + with: + scan-type: image + image-ref: ${{ steps.build-image.outputs.image }} + format: table + exit-code: 1 + ignore-unfixed: true + vuln-type: os + scanners: vuln,secret + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + echo "View results in GitHub Action logs" >> $GITHUB_STEP_SUMMARY + + anchore-scan: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Build and tag Docker image for scanning + id: build-image + run: | + make release-build + IMAGE_NAME=$(make release-image-name) + IMAGE_TAG=$(make release-image-tag) + echo "image=$IMAGE_NAME:$IMAGE_TAG" >> $GITHUB_OUTPUT + + - name: Run Anchore vulnerability scan + uses: anchore/scan-action@v3 + with: + image: ${{ steps.build-image.outputs.image }} + output-format: table + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + echo "View results in GitHub Action logs" >> $GITHUB_STEP_SUMMARY + + dockle-scan: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Build and tag Docker image for scanning + id: build-image + run: | + make release-build + IMAGE_NAME=$(make release-image-name) + IMAGE_TAG=$(make release-image-tag) + echo "image=$IMAGE_NAME:$IMAGE_TAG" >> $GITHUB_OUTPUT + + # Dockle doesn't allow you to have an ignore file for the DOCKLE_ACCEPT_FILES + # variable, this will save the variable in this file to env for Dockle + - name: Set any acceptable Dockle files + run: | + if grep -q "^DOCKLE_ACCEPT_FILES=.*" .dockleconfig; then + grep -s '^DOCKLE_ACCEPT_FILES=' .dockleconfig >> $GITHUB_ENV + fi + + - name: Run Dockle container linter + uses: erzz/dockle-action@v1.3.1 + with: + image: ${{ steps.build-image.outputs.image }} + exit-code: '1' + failure-threshold: WARN + accept-filenames: ${{ env.DOCKLE_ACCEPT_FILES }} + + - name: Save output to workflow summary + if: always() # Runs even if there is a failure + run: | + echo "```json" >> $GITHUB_STEP_SUMMARY + cat dockle-report.json >> $GITHUB_STEP_SUMMARY + echo "```" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/database-migrations.yml b/.github/workflows/database-migrations.yml new file mode 100644 index 000000000..d20da9fa4 --- /dev/null +++ b/.github/workflows/database-migrations.yml @@ -0,0 +1,43 @@ +name: Database migrations + +on: + workflow_call: + inputs: + app_name: + description: "name of application folder under infra directory" + default: app + type: string + environment: + description: "the name of the application environment (e.g. dev, staging, prod)" + required: true + type: string + +concurrency: database-migrations-${{ inputs.environment }} + +jobs: + build-and-publish: + name: Build + uses: ./.github/workflows/build-and-publish.yml + with: + ref: ${{ github.ref }} + run-migrations: + name: Run migrations + runs-on: ubuntu-latest + needs: [build-and-publish] + + permissions: + contents: read + id-token: write + + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS credentials + uses: ./.github/actions/configure-aws-credentials + with: + app_name: ${{ inputs.app_name }} + environment: ${{ inputs.environment }} + + - name: Run migrations + run: | + make release-run-database-migrations APP_NAME=${{ inputs.app_name }} ENVIRONMENT=${{ inputs.environment }} diff --git a/.hadolint.yaml b/.hadolint.yaml new file mode 100644 index 000000000..d552e3548 --- /dev/null +++ b/.hadolint.yaml @@ -0,0 +1,6 @@ +# List of settings and ignore or safelist findings for the hadolint scanner + +# For more information on any settings you can specify, see the actions' documentation here +# https://github.com/hadolint/hadolint#configure +failure-threshold: warning +ignored: [] diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 000000000..eecff5489 --- /dev/null +++ b/.trivyignore @@ -0,0 +1,9 @@ +# List of vulnerabilities to ignore for the trivy scan +# Please add safelists in the following format to make it easier when checking +# Package/module name: URL to vulnerability for checking updates +# Versions: URL to the version history +# Dependencies: Name of any other packages or modules that are dependent on this version +# Link to the dependencies for ease of checking for updates +# Issue: Why there is a finding and why this is here or not been removed +# Last checked: Date last checked in scans +#The-CVE-or-vuln-id # Remove comment at start of line diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..52a1db4c7 --- /dev/null +++ b/Makefile @@ -0,0 +1,182 @@ +PROJECT_ROOT ?= $(notdir $(PWD)) + +# For now only support a single app in the folder `app/` within the repo +# In the future, support multiple apps, and which app is being operated +# on will be determined by the APP_NAME Makefile argument +APP_NAME ?= app + +# Use `=` instead of `:=` so that we only execute `./bin/current-account-alias.sh` when needed +# See https://www.gnu.org/software/make/manual/html_node/Flavors.html#Flavors +CURRENT_ACCOUNT_ALIAS = `./bin/current-account-alias.sh` + +CURRENT_ACCOUNT_ID = $(./bin/current-account-id.sh) + +# Get the list of reusable terraform modules by getting out all the modules +# in infra/modules and then stripping out the "infra/modules/" prefix +MODULES := $(notdir $(wildcard infra/modules/*)) + +# Check that given variables are set and all have non-empty values, +# die with an error otherwise. +# +# Params: +# 1. Variable name(s) to test. +# 2. (optional) Error message to print. +# Based off of https://stackoverflow.com/questions/10858261/how-to-abort-makefile-if-variable-not-set +check_defined = \ + $(strip $(foreach 1,$1, \ + $(call __check_defined,$1,$(strip $(value 2))))) +__check_defined = \ + $(if $(value $1),, \ + $(error Undefined $1$(if $2, ($2))$(if $(value @), \ + required by target `$@'))) + + +.PHONY : \ + infra-validate-modules \ + infra-validate-env-template \ + infra-check-compliance \ + infra-check-compliance-checkov \ + infra-check-compliance-tfsec \ + infra-lint \ + infra-format \ + release-build \ + release-publish \ + release-deploy \ + image-registry-login \ + db-migrate \ + db-migrate-down \ + db-migrate-create + +infra-set-up-account: ## Configure and create resources for current AWS profile and save tfbackend file to infra/accounts/$ACCOUNT_NAME.ACCOUNT_ID.s3.tfbackend + @:$(call check_defined, ACCOUNT_NAME, human readable name for account e.g. "prod" or the AWS account alias) + ./bin/set-up-current-account.sh $(ACCOUNT_NAME) + +infra-configure-app-build-repository: ## Configure infra/$APP_NAME/build-repository tfbackend and tfvars files + ./bin/configure-app-build-repository.sh $(APP_NAME) + +infra-configure-app-database: ## Configure infra/$APP_NAME/database module's tfbackend and tfvars files for $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/configure-app-database.sh $(APP_NAME) $(ENVIRONMENT) + +infra-configure-monitoring-secrets: ## Set $APP_NAME's incident management service integration URL for $ENVIRONMENT + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + @:$(call check_defined, URL, incident management service (PagerDuty or VictorOps) integration URL) + ./bin/configure-monitoring-secret.sh $(APP_NAME) $(ENVIRONMENT) $(URL) + +infra-configure-app-service: ## Configure infra/$APP_NAME/service module's tfbackend and tfvars files for $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/configure-app-service.sh $(APP_NAME) $(ENVIRONMENT) + +infra-update-current-account: ## Update infra resources for current AWS profile + ./bin/terraform-init-and-apply.sh infra/accounts `./bin/current-account-config-name.sh` + +infra-update-app-build-repository: ## Create or update $APP_NAME's build repository + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + ./bin/terraform-init-and-apply.sh infra/$(APP_NAME)/build-repository shared + +infra-update-app-database: ## Create or update $APP_NAME's database module for $ENVIRONMENT + # APP_NAME has a default value defined above, but check anyways in case the default is ever removed + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/terraform-init-and-apply.sh infra/$(APP_NAME)/database $(ENVIRONMENT) + +infra-update-app-database-roles: ## Create or update database roles and schemas for $APP_NAME's database in $ENVIRONMENT + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/create-or-update-database-roles.sh $(APP_NAME) $(ENVIRONMENT) + +infra-update-app-service: ## Create or update $APP_NAME's web service module + # APP_NAME has a default value defined above, but check anyways in case the default is ever removed + @:$(call check_defined, APP_NAME, the name of subdirectory of /infra that holds the application's infrastructure code) + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "staging") + ./bin/terraform-init-and-apply.sh infra/$(APP_NAME)/service $(ENVIRONMENT) + + +# Validate all infra root and child modules. +infra-validate: \ + infra-validate-modules \ + # !! Uncomment the following line once you've set up the infra/project-config module + # infra-validate-env-template + +# Validate all infra root and child modules. +# Validate all infra reusable child modules. The prerequisite for this rule is obtained by +# prefixing each module with the string "infra-validate-module-" +infra-validate-modules: $(patsubst %, infra-validate-module-%, $(MODULES)) + +infra-validate-module-%: + @echo "Validate library module: $*" + terraform -chdir=infra/modules/$* init -backend=false + terraform -chdir=infra/modules/$* validate + +infra-validate-env-template: + @echo "Validate module: env-template" + terraform -chdir=infra/app/env-template init -backend=false + terraform -chdir=infra/app/env-template validate + +infra-check-compliance: infra-check-compliance-checkov infra-check-compliance-tfsec + +infra-check-compliance-checkov: + checkov --directory infra + +infra-check-compliance-tfsec: + tfsec infra + +infra-lint: ## Lint infra code + terraform fmt -recursive -check infra + +infra-format: ## Format infra code + terraform fmt -recursive infra + +infra-test: ## Run end-to-end infra Terratest test suite + cd infra/test && go test -v -timeout 30m + +######################## +## Release Management ## +######################## + +# Include project name in image name so that image name +# does not conflict with other images during local development +IMAGE_NAME := $(PROJECT_ROOT)-$(APP_NAME) + +GIT_REPO_AVAILABLE := $(shell git rev-parse --is-inside-work-tree 2>/dev/null) + +# Generate a unique tag based solely on the git hash. +# This will be the identifier used for deployment via terraform. +ifdef GIT_REPO_AVAILABLE +IMAGE_TAG := $(shell git rev-parse HEAD) +else +IMAGE_TAG := "unknown-dev.$(DATE)" +endif + +# Generate an informational tag so we can see where every image comes from. +DATE := $(shell date -u '+%Y%m%d.%H%M%S') +INFO_TAG := $(DATE).$(USER) + +release-build: ## Build release for $APP_NAME and tag it with current git hash + cd $(APP_NAME) && $(MAKE) release-build \ + OPTS="--tag $(IMAGE_NAME):latest --tag $(IMAGE_NAME):$(IMAGE_TAG)" + +release-publish: ## Publish release to $APP_NAME's build repository + ./bin/publish-release.sh $(APP_NAME) $(IMAGE_NAME) $(IMAGE_TAG) + +release-run-database-migrations: ## Run $APP_NAME's database migrations in $ENVIRONMENT + ./bin/run-database-migrations.sh $(APP_NAME) $(IMAGE_TAG) $(ENVIRONMENT) + +release-deploy: ## Deploy release to $APP_NAME's web service in $ENVIRONMENT + @:$(call check_defined, ENVIRONMENT, the name of the application environment e.g. "prod" or "dev") + ./bin/deploy-release.sh $(APP_NAME) $(IMAGE_TAG) $(ENVIRONMENT) + +release-image-name: ## Prints the image name of the release image + @echo $(IMAGE_NAME) + +release-image-tag: ## Prints the image tag of the release image + @echo $(IMAGE_TAG) + +######################## +## Scripts and Helper ## +######################## + +help: ## Prints the help documentation and info about each command + @grep -E '^[/a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/bin/check-github-actions-auth.sh b/bin/check-github-actions-auth.sh new file mode 100755 index 000000000..0e1746ca6 --- /dev/null +++ b/bin/check-github-actions-auth.sh @@ -0,0 +1,48 @@ +#!/bin/bash +set -euo pipefail + +GITHUB_ACTIONS_ROLE=$1 + +# This is used later to determine the run id of the workflow run +# See comment below about "Getting workflow run id" +PREV_RUN_CREATE_TIME=$(gh run list --workflow check-infra-auth.yml --limit 1 --json createdAt --jq ".[].createdAt") + +echo "Run check-infra-auth workflow with role_to_assume=$GITHUB_ACTIONS_ROLE" +gh workflow run check-infra-auth.yml --field role_to_assume=$GITHUB_ACTIONS_ROLE + +######################### +## Get workflow run id ## +######################### + +echo "Get workflow run id" +# The following commands aims to get the workflow run id of the run that was +# just triggered by the previous workflow dispatch event. There's currently no +# simple and reliable way to do this, so for now we are going to accept that +# there is a race condition. +# +# The current implementation involves getting the create time of the previous +# run. Then continuously checking the list of workflow runs until we see a +# newly created run. Then we get the id of this new run. +# +# References: +# * This stack overflow article suggests a complicated overengineered approach: +# https://stackoverflow.com/questions/69479400/get-run-id-after-triggering-a-github-workflow-dispatch-event +# * This GitHub community discussion also requests this feature: +# https://github.com/orgs/community/discussions/17389 + +echo "Previous workflow run created at $PREV_RUN_CREATE_TIME" +echo "Check workflow run create time until we find a newer workflow run" +while : ; do + echo -n "." + RUN_CREATE_TIME=$(gh run list --workflow check-infra-auth.yml --limit 1 --json createdAt --jq ".[].createdAt") + [[ $RUN_CREATE_TIME > $PREV_RUN_CREATE_TIME ]] && break +done +echo "Found newer workflow run created at $RUN_CREATE_TIME" + +echo "Get id of workflow run" +WORKFLOW_RUN_ID=$(gh run list --workflow check-infra-auth.yml --limit 1 --json databaseId --jq ".[].databaseId") +echo "Workflow run id: $WORKFLOW_RUN_ID" + +echo "Watch workflow run until it exits" +# --exit-status causes command to exit with non-zero status if run fails +gh run watch $WORKFLOW_RUN_ID --exit-status diff --git a/bin/configure-app-build-repository.sh b/bin/configure-app-build-repository.sh new file mode 100755 index 000000000..a56eb9651 --- /dev/null +++ b/bin/configure-app-build-repository.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script configures the build-repository module for the specified application. +# It creates a shared.tfvars file and shared.s3.tfbackend in the module directory. +# The configuration will be shared across all of the application's environments. +# +# Positional parameters: +# APP_NAME (required) – the name of subdirectory of /infra that holds the +# application's infrastructure code. +# ----------------------------------------------------------------------------- +set -euo pipefail + + +APP_NAME=$1 + +#-------------------------------------- +# Create terraform backend config file +#-------------------------------------- + +MODULE_DIR="infra/$APP_NAME/build-repository" +BACKEND_CONFIG_NAME="shared" + +./bin/create-tfbackend.sh $MODULE_DIR $BACKEND_CONFIG_NAME + +#-------------------- +# Create tfvars file +#-------------------- + +TF_VARS_FILE="$MODULE_DIR/terraform.tfvars" +REGION=$(terraform -chdir=infra/accounts output -raw region) + +echo "===========================================" +echo "Setting up tfvars file for build-repository" +echo "===========================================" +echo "Input parameters" +echo " APP_NAME=$APP_NAME" +echo + +# Create output file from example file +cp $MODULE_DIR/example.tfvars $TF_VARS_FILE + +# Replace the placeholder values +sed -i.bak "s//$REGION/g" $TF_VARS_FILE + +# Remove the backup file created by sed +rm $TF_VARS_FILE.bak + +echo "Created file: $TF_VARS_FILE" +echo "------------------ file contents ------------------" +cat $TF_VARS_FILE +echo "----------------------- end -----------------------" diff --git a/bin/configure-app-database.sh b/bin/configure-app-database.sh new file mode 100755 index 000000000..d4356a932 --- /dev/null +++ b/bin/configure-app-database.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script configures the database module for the specified application +# and environment by creating the .tfvars file and .tfbackend file for the module. +# +# Positional parameters: +# APP_NAME (required) – the name of subdirectory of /infra that holds the +# application's infrastructure code. +# ENVIRONMENT is the name of the application environment (e.g. dev, staging, prod) +# ----------------------------------------------------------------------------- +set -euo pipefail + +APP_NAME=$1 +ENVIRONMENT=$2 + +#-------------------------------------- +# Create terraform backend config file +#-------------------------------------- + +MODULE_DIR="infra/$APP_NAME/database" +BACKEND_CONFIG_NAME="$ENVIRONMENT" + +./bin/create-tfbackend.sh $MODULE_DIR $BACKEND_CONFIG_NAME + +#-------------------- +# Create tfvars file +#-------------------- + +TF_VARS_FILE="$MODULE_DIR/$ENVIRONMENT.tfvars" + +# Get the name of the S3 bucket that was created to store the tf state +# and the name of the DynamoDB table that was created for tf state locks. +# This will be used to configure the S3 backends in all the application +# modules +TF_STATE_BUCKET_NAME=$(terraform -chdir=infra/accounts output -raw tf_state_bucket_name) +TF_LOCKS_TABLE_NAME=$(terraform -chdir=infra/accounts output -raw tf_locks_table_name) +TF_STATE_KEY="$MODULE_DIR/$BACKEND_CONFIG_NAME.tfstate" +REGION=$(terraform -chdir=infra/accounts output -raw region) + + +echo "=======================================" +echo "Setting up tfvars file for app database" +echo "=======================================" +echo "Input parameters" +echo " APP_NAME=$APP_NAME" +echo " ENVIRONMENT=$ENVIRONMENT" +echo + +cp $MODULE_DIR/example.tfvars $TF_VARS_FILE +sed -i.bak "s//$ENVIRONMENT/g" $TF_VARS_FILE +sed -i.bak "s//$REGION/g" $TF_VARS_FILE +rm $TF_VARS_FILE.bak + +echo "Created file: $TF_VARS_FILE" +echo "------------------ file contents ------------------" +cat $TF_VARS_FILE +echo "----------------------- end -----------------------" diff --git a/bin/configure-app-service.sh b/bin/configure-app-service.sh new file mode 100755 index 000000000..6c1347bf1 --- /dev/null +++ b/bin/configure-app-service.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script configures the service module for the specified application +# and environment by creating the .tfvars file and .tfbackend file for the module. +# +# Positional parameters: +# APP_NAME (required) – the name of subdirectory of /infra that holds the +# application's infrastructure code. +# ENVIRONMENT is the name of the application environment (e.g. dev, staging, prod) +# ----------------------------------------------------------------------------- +set -euo pipefail + +APP_NAME=$1 +ENVIRONMENT=$2 + +#-------------------------------------- +# Create terraform backend config file +#-------------------------------------- + +MODULE_DIR="infra/$APP_NAME/service" +BACKEND_CONFIG_NAME="$ENVIRONMENT" + +./bin/create-tfbackend.sh $MODULE_DIR $BACKEND_CONFIG_NAME + +#-------------------- +# Create tfvars file +#-------------------- + +TF_VARS_FILE="$MODULE_DIR/$ENVIRONMENT.tfvars" + +# Get values needed to populate the tfvars file (see infra/app/service/example.tfvars) +TF_STATE_BUCKET_NAME=$(terraform -chdir=infra/accounts output -raw tf_state_bucket_name) +TF_STATE_KEY="$MODULE_DIR/$BACKEND_CONFIG_NAME.tfstate" +REGION=$(terraform -chdir=infra/accounts output -raw region) + +echo "======================================" +echo "Setting up tfvars file for app service" +echo "======================================" +echo "Input parameters" +echo " APP_NAME=$APP_NAME" +echo " ENVIRONMENT=$ENVIRONMENT" +echo + +cp $MODULE_DIR/example.tfvars $TF_VARS_FILE +sed -i.bak "s//$ENVIRONMENT/g" $TF_VARS_FILE +sed -i.bak "s//$TF_STATE_BUCKET_NAME/g" $TF_VARS_FILE +sed -i.bak "s||$TF_STATE_KEY|g" $TF_VARS_FILE +sed -i.bak "s//$REGION/g" $TF_VARS_FILE +rm $TF_VARS_FILE.bak + +echo "Created file: $TF_VARS_FILE" +echo "------------------ file contents ------------------" +cat $TF_VARS_FILE +echo "----------------------- end -----------------------" diff --git a/bin/configure-monitoring-secret.sh b/bin/configure-monitoring-secret.sh new file mode 100755 index 000000000..b6d51aecb --- /dev/null +++ b/bin/configure-monitoring-secret.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script creates SSM parameter for storing integration URL for incident management +# services. Script creates new SSM attribute or updates existing. +# +# Positional parameters: +# APP_NAME (required) – the name of subdirectory of /infra that holds the +# application's infrastructure code. +# ENVIRONMENT is the name of the application environment (e.g. dev, staging, prod) +# INTEGRATION_ENDPOINT_URL is the url for the integration endpoint for external +# incident management services (e.g. Pagerduty, Splunk-On-Call) +# ----------------------------------------------------------------------------- +set -euo pipefail + +APP_NAME=$1 +ENVIRONMENT=$2 +INTEGRATION_ENDPOINT_URL=$3 + +terraform -chdir=infra/$APP_NAME/app-config init > /dev/null +terraform -chdir=infra/$APP_NAME/app-config refresh > /dev/null + +HAS_INCIDENT_MANAGEMENT_SERVICE=$(terraform -chdir=infra/$APP_NAME/app-config output -raw has_incident_management_service) +if [ $HAS_INCIDENT_MANAGEMENT_SERVICE = "false" ]; then + echo "Application does not have incident management service, no secret to create" + exit 0 +fi + +SECRET_NAME=$(terraform -chdir=infra/$APP_NAME/app-config output -json environment_configs | jq -r ".$ENVIRONMENT.incident_management_service_integration.integration_url_param_name") + +echo "=====================" +echo "Setting up SSM secret" +echo "=====================" +echo "APPLICATION_NAME=$APP_NAME" +echo "ENVIRONMENT=$ENVIRONMENT" +echo "INTEGRATION_URL=$INTEGRATION_ENDPOINT_URL" +echo +echo "Creating SSM secret: $SECRET_NAME" + +aws ssm put-parameter \ + --name "$SECRET_NAME" \ + --value "$INTEGRATION_ENDPOINT_URL" \ + --type SecureString \ + --overwrite diff --git a/bin/create-or-update-database-roles.sh b/bin/create-or-update-database-roles.sh new file mode 100755 index 000000000..1e4df77b9 --- /dev/null +++ b/bin/create-or-update-database-roles.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Script that invokes the database role-manager AWS Lambda function to create +# or update the Postgres user roles for a particular environment. +# The Lambda function is created by the infra/app/database root module and is +# defined in the infra/app/database child module. +# +# Positional parameters: +# APP_NAME (required) – the name of subdirectory of /infra that holds the +# application's infrastructure code. +# ENVIRONMENT (required) - the name of the application environment (e.g. dev +# staging, prod) +# ----------------------------------------------------------------------------- +set -euo pipefail + +APP_NAME=$1 +ENVIRONMENT=$2 + +./bin/terraform-init.sh infra/$APP_NAME/database $ENVIRONMENT +DB_ROLE_MANAGER_FUNCTION_NAME=$(terraform -chdir=infra/$APP_NAME/database output -raw role_manager_function_name) + +echo "================================" +echo "Creating/updating database users" +echo "================================" +echo "Input parameters" +echo " APP_NAME=$APP_NAME" +echo " ENVIRONMENT=$ENVIRONMENT" +echo +echo "Invoking Lambda function: $DB_ROLE_MANAGER_FUNCTION_NAME" +aws lambda invoke --function-name $DB_ROLE_MANAGER_FUNCTION_NAME --no-cli-pager response.json +echo "Lambda function response:" +cat response.json +rm response.json diff --git a/bin/create-tfbackend.sh b/bin/create-tfbackend.sh new file mode 100755 index 000000000..5381134c8 --- /dev/null +++ b/bin/create-tfbackend.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script creates a terraform backend config file for a terraform module. +# It is not meant to be used directly. Instead, it is called by other scripts +# that set up and configure the infra/accounts module and the infra/app/ modules +# such as infra/app/build-repository and infra/app/service +# +# Positional parameters: +# MODULE_DIR (required) - the directory of the root module that will be configured +# BACKEND_CONFIG_NAME (required) - the name of the backend that will be created. +# For environment specific configs, the BACKEND_CONFIG_NAME will be the same +# as ENVIRONMENT. For shared configs, the BACKEND_CONFIG_NAME will be "shared". +# TF_STATE_KEY (optional) - the S3 object key of the tfstate file in the S3 bucket +# Defaults to [MODULE_DIR]/[BACKEND_CONFIG_NAME].tfstate +# ----------------------------------------------------------------------------- +set -euo pipefail + +MODULE_DIR=$1 +BACKEND_CONFIG_NAME=$2 +TF_STATE_KEY="${3:-$MODULE_DIR/$BACKEND_CONFIG_NAME.tfstate}" + +# The local tfbackend config file that will store the terraform backend config +BACKEND_CONFIG_FILE="$MODULE_DIR/$BACKEND_CONFIG_NAME.s3.tfbackend" + +# Get the name of the S3 bucket that was created to store the tf state +# and the name of the DynamoDB table that was created for tf state locks. +# This will be used to configure the S3 backends in all the application +# modules +TF_STATE_BUCKET_NAME=$(terraform -chdir=infra/accounts output -raw tf_state_bucket_name) +TF_LOCKS_TABLE_NAME=$(terraform -chdir=infra/accounts output -raw tf_locks_table_name) +REGION=$(terraform -chdir=infra/accounts output -raw region) + +echo "====================================" +echo "Create terraform backend config file" +echo "====================================" +echo "Input parameters" +echo " MODULE_DIR=$MODULE_DIR" +echo " BACKEND_CONFIG_NAME=$BACKEND_CONFIG_NAME" +echo + +# Create output file from example file +cp infra/example.s3.tfbackend $BACKEND_CONFIG_FILE + +# Replace the placeholder values +sed -i.bak "s//$TF_STATE_BUCKET_NAME/g" $BACKEND_CONFIG_FILE +sed -i.bak "s||$TF_STATE_KEY|g" $BACKEND_CONFIG_FILE +sed -i.bak "s//$TF_LOCKS_TABLE_NAME/g" $BACKEND_CONFIG_FILE +sed -i.bak "s//$REGION/g" $BACKEND_CONFIG_FILE + +# Remove the backup file created by sed +rm $BACKEND_CONFIG_FILE.bak + + +echo "Created file: $BACKEND_CONFIG_FILE" +echo "------------------ file contents ------------------" +cat $BACKEND_CONFIG_FILE +echo "----------------------- end -----------------------" diff --git a/bin/current-account-alias.sh b/bin/current-account-alias.sh new file mode 100755 index 000000000..f7003fc39 --- /dev/null +++ b/bin/current-account-alias.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Print the current AWS account alias +set -euo pipefail +echo -n "$(aws iam list-account-aliases --query "AccountAliases" --max-items 1 --output text)" diff --git a/bin/current-account-config-name.sh b/bin/current-account-config-name.sh new file mode 100755 index 000000000..7e9a2eaf3 --- /dev/null +++ b/bin/current-account-config-name.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Print the config name for the current AWS account +# Do this by getting the current account and searching for a file in +# infra/accounts that matches "..s3.tfbackend". +# The config name is "."" +set -euo pipefail +ls -1 infra/accounts | grep "$(./bin/current-account-id.sh)" | grep s3.tfbackend | sed 's/.s3.tfbackend//' diff --git a/bin/current-account-id.sh b/bin/current-account-id.sh new file mode 100755 index 000000000..92f368bf7 --- /dev/null +++ b/bin/current-account-id.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Print the current AWS account id +set -euo pipefail +echo -n "$(aws sts get-caller-identity --query "Account" --output text)" diff --git a/bin/current-region.sh b/bin/current-region.sh new file mode 100755 index 000000000..c25b2c11d --- /dev/null +++ b/bin/current-region.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Print the current AWS region +set -euo pipefail +echo -n "$(aws configure list | grep region | awk '{print $2}')" diff --git a/bin/deploy-release.sh b/bin/deploy-release.sh new file mode 100755 index 000000000..00ad10262 --- /dev/null +++ b/bin/deploy-release.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -euo pipefail + +APP_NAME=$1 +IMAGE_TAG=$2 +ENVIRONMENT=$3 + +echo "--------------" +echo "Deploy release" +echo "--------------" +echo "Input parameters:" +echo " APP_NAME=$APP_NAME" +echo " IMAGE_TAG=$IMAGE_TAG" +echo " ENVIRONMENT=$ENVIRONMENT" +echo +echo "Starting $APP_NAME deploy of $IMAGE_TAG to $ENVIRONMENT" + +MODULE_DIR="infra/$APP_NAME/service" +CONFIG_NAME="$ENVIRONMENT" +TF_CLI_ARGS_apply="-input=false -auto-approve -var=image_tag=$IMAGE_TAG" ./bin/terraform-init-and-apply.sh $MODULE_DIR $CONFIG_NAME + +echo "Completed $APP_NAME deploy of $IMAGE_TAG to $ENVIRONMENT" diff --git a/bin/publish-release.sh b/bin/publish-release.sh new file mode 100755 index 000000000..f5a814202 --- /dev/null +++ b/bin/publish-release.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -euo pipefail + +APP_NAME=$1 +IMAGE_NAME=$2 +IMAGE_TAG=$3 + +echo "---------------" +echo "Publish release" +echo "---------------" +echo "Input parameters:" +echo " APP_NAME=$APP_NAME" +echo " IMAGE_NAME=$IMAGE_NAME" +echo " IMAGE_TAG=$IMAGE_TAG" + +# Need to init module when running in CD since GitHub actions does a fresh checkout of repo +terraform -chdir=infra/$APP_NAME/app-config init > /dev/null +terraform -chdir=infra/$APP_NAME/app-config refresh > /dev/null +IMAGE_REPOSITORY_NAME=$(terraform -chdir=infra/$APP_NAME/app-config output -raw image_repository_name) + +REGION=$(./bin/current-region.sh) +read -r IMAGE_REGISTRY_ID IMAGE_REPOSITORY_URL <<< $(aws ecr describe-repositories --repository-names $IMAGE_REPOSITORY_NAME --query "repositories[0].[registryId,repositoryUri]" --output text) +IMAGE_REGISTRY=$IMAGE_REGISTRY_ID.dkr.ecr.$REGION.amazonaws.com + +echo "Build repository info:" +echo " REGION=$REGION" +echo " IMAGE_REGISTRY=$IMAGE_REGISTRY" +echo " IMAGE_REPOSITORY_NAME=$IMAGE_REPOSITORY_NAME" +echo " IMAGE_REPOSITORY_URL=$IMAGE_REPOSITORY_URL" +echo +echo "Authenticating Docker with ECR" +aws ecr get-login-password --region $REGION \ + | docker login --username AWS --password-stdin $IMAGE_REGISTRY +echo +echo "Check if tag has already been published..." +RESULT="" +RESULT=$(aws ecr describe-images --repository-name $IMAGE_REPOSITORY_NAME --image-ids imageTag=$IMAGE_TAG --region $REGION 2> /dev/null ) || true +if [ ! -z "$RESULT" ];then + echo "Image with tag $IMAGE_TAG already published" + exit 0 +fi + +echo "New tag. Publishing image" +docker tag $IMAGE_NAME:$IMAGE_TAG $IMAGE_REPOSITORY_URL:$IMAGE_TAG +docker push $IMAGE_REPOSITORY_URL:$IMAGE_TAG diff --git a/bin/run-command.sh b/bin/run-command.sh new file mode 100755 index 000000000..a7e336846 --- /dev/null +++ b/bin/run-command.sh @@ -0,0 +1,98 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Run an application command using the application image +# +# Positional parameters: +# APP_NAME (required) – the name of subdirectory of /infra that holds the +# application's infrastructure code. +# ENVIRONMENT (required) – the name of the application environment (e.g. dev, +# staging, prod) +# COMMAND (required) – a JSON list representing the command to run +# e.g. To run the command `db-migrate-up` with no arguments, set +# COMMAND='["db-migrate-up"]' +# e.g. To run the command `echo "Hello, world"` set +# COMMAND='["echo", "Hello, world"]') +# ----------------------------------------------------------------------------- +set -euo pipefail + +APP_NAME="$1" +ENVIRONMENT="$2" +COMMAND="$3" +ENVIRONMENT_VARIABLES=${4:-""} + +echo "===============" +echo "Running command" +echo "===============" +echo "Input parameters" +echo " APP_NAME=$APP_NAME" +echo " ENVIRONMENT=$ENVIRONMENT" +echo " COMMAND=$COMMAND" +echo " ENVIRONMENT_VARIABLES=$ENVIRONMENT_VARIABLES" +echo + +# Use the same cluster, task definition, and network configuration that the application service uses +CLUSTER_NAME=$(terraform -chdir=infra/$APP_NAME/service output -raw service_cluster_name) +SERVICE_NAME=$(terraform -chdir=infra/$APP_NAME/service output -raw service_name) + +SERVICE_TASK_DEFINITION_ARN=$(aws ecs describe-services --no-cli-pager --cluster $CLUSTER_NAME --services $SERVICE_NAME --query "services[0].taskDefinition" --output text) +# For subsequent commands, use the task definition family rather than the service's task definition ARN +# because in the case of migrations, we'll deploy a new task definition revision before updating the +# service, so the service will be using an old revision, but we want to use the latest revision. +TASK_DEFINITION_FAMILY=$(aws ecs describe-task-definition --no-cli-pager --task-definition $SERVICE_TASK_DEFINITION_ARN --query "taskDefinition.family" --output text) + +NETWORK_CONFIG=$(aws ecs describe-services --no-cli-pager --cluster $CLUSTER_NAME --services $SERVICE_NAME --query "services[0].networkConfiguration") +CURRENT_REGION=$(./bin/current-region.sh) +AWS_USER_ID=$(aws sts get-caller-identity --no-cli-pager --query UserId --output text) + +ENVIRONMENT_OVERRIDES="" +if [ ! -z "$ENVIRONMENT_VARIABLES" ]; then + ENVIRONMENT_OVERRIDES="\"environment\": $ENVIRONMENT_VARIABLES," +fi +CONTAINER_NAME=$(aws ecs describe-task-definition --task-definition $TASK_DEFINITION_FAMILY --query "taskDefinition.containerDefinitions[0].name" --output text) +OVERRIDES=$(cat << EOF +{ + "containerOverrides": [ + { + $ENVIRONMENT_OVERRIDES + "name": "$CONTAINER_NAME", + "command": $COMMAND + } + ] +} +EOF +) + +AWS_ARGS=( + ecs run-task + --region=$CURRENT_REGION + --cluster=$CLUSTER_NAME + --task-definition=$TASK_DEFINITION_FAMILY + --started-by=$AWS_USER_ID + --launch-type=FARGATE + --platform-version=1.4.0 + --network-configuration "$NETWORK_CONFIG" + --overrides "$OVERRIDES" +) +echo "Running AWS CLI command" +printf " ... %s\n" "${AWS_ARGS[@]}" +echo +TASK_ARN=$(aws --no-cli-pager "${AWS_ARGS[@]}" --query "tasks[0].taskArn" --output text) + +echo "Waiting for task to stop" +echo " TASK_ARN=$TASK_ARN" +echo +aws ecs wait tasks-stopped --region $CURRENT_REGION --cluster $CLUSTER_NAME --tasks $TASK_ARN + +CONTAINER_EXIT_CODE=$(aws ecs describe-tasks --cluster $CLUSTER_NAME --tasks $TASK_ARN --query "tasks[0].containers[?name=='$CONTAINER_NAME'].exitCode" --output text) + +if [[ "$CONTAINER_EXIT_CODE" == "null" || "$CONTAINER_EXIT_CODE" != "0" ]]; then + echo "Task failed" >&2 + # Although we could avoid extra calls to AWS CLI if we just got the full JSON response from + # `aws ecs describe-tasks` and parsed it with jq, we are trying to avoid unnecessary dependencies. + CONTAINER_STATUS=$(aws ecs describe-tasks --cluster $CLUSTER_NAME --tasks $TASK_ARN --query "tasks[0].containers[?name=='$CONTAINER_NAME'].[lastStatus,exitCode,reason]" --output text) + TASK_STATUS=$(aws ecs describe-tasks --cluster $CLUSTER_NAME --tasks $TASK_ARN --query "tasks[0].[lastStatus,stopCode,stoppedAt,stoppedReason]" --output text) + + echo "Container status (lastStatus, exitCode, reason): $CONTAINER_STATUS" >&2 + echo "Task status (lastStatus, stopCode, stoppedAt, stoppedReason): $TASK_STATUS" >&2 + exit 1 +fi diff --git a/bin/run-database-migrations.sh b/bin/run-database-migrations.sh new file mode 100755 index 000000000..c48d79348 --- /dev/null +++ b/bin/run-database-migrations.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Run database migrations +# 1. Update the application's task definition with the latest build, but +# do not update the service +# 2. Run the "db-migrate" command in the container as a new task +# +# Positional parameters: +# APP_NAME (required) – the name of subdirectory of /infra that holds the +# application's infrastructure code. +# IMAGE_TAG (required) – the tag of the latest build +# ENVIRONMENT (required) – the name of the application environment (e.g. dev, +# staging, prod) +# ----------------------------------------------------------------------------- +set -euo pipefail + +APP_NAME="$1" +IMAGE_TAG="$2" +ENVIRONMENT="$3" + +echo "==================" +echo "Running migrations" +echo "==================" +echo "Input parameters" +echo " APP_NAME=$APP_NAME" +echo " IMAGE_TAG=$IMAGE_TAG" +echo " ENVIRONMENT=$ENVIRONMENT" +echo +echo "Step 0. Check if app has a database" + +terraform -chdir=infra/$APP_NAME/app-config init > /dev/null +terraform -chdir=infra/$APP_NAME/app-config refresh > /dev/null +HAS_DATABASE=$(terraform -chdir=infra/$APP_NAME/app-config output -raw has_database) +if [ $HAS_DATABASE = "false" ]; then + echo "Application does not have a database, no migrations to run" + exit 0 +fi + +DB_MIGRATOR_USER=$(terraform -chdir=infra/$APP_NAME/app-config output -json environment_configs | jq -r ".$ENVIRONMENT.database_config.migrator_username") + +echo +echo "::group::Step 1. Update task definition without updating service" + +MODULE_DIR="infra/$APP_NAME/service" +CONFIG_NAME="$ENVIRONMENT" +TF_CLI_ARGS_apply="-input=false -auto-approve -target=module.service.aws_ecs_task_definition.app -var=image_tag=$IMAGE_TAG" ./bin/terraform-init-and-apply.sh $MODULE_DIR $CONFIG_NAME + +echo "::endgroup::" +echo +echo '::group::Step 2. Run "db-migrate" command' + +COMMAND='["db-migrate"]' + +# Indent the later lines more to make the output of run-command prettier +ENVIRONMENT_VARIABLES=$(cat << EOF +[{ "name" : "DB_USER", "value" : "$DB_MIGRATOR_USER" }] +EOF +) + +./bin/run-command.sh $APP_NAME $ENVIRONMENT "$COMMAND" "$ENVIRONMENT_VARIABLES" +echo "::endgroup::" diff --git a/bin/set-up-current-account.sh b/bin/set-up-current-account.sh new file mode 100755 index 000000000..eed1532b8 --- /dev/null +++ b/bin/set-up-current-account.sh @@ -0,0 +1,94 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# This script sets up the terraform backend for the AWS account that you are +# currently authenticated into and creates the terraform backend config file. +# +# The script takes a human readable account name that is used to prefix the tfbackend +# file that is created. This is to make it easier to visually identify while +# tfbackend file corresponds to which AWS account. The account ID is still +# needed since all AWS accounts are guaranteed to have an account ID, and the +# account ID cannot change, whereas other things like the AWS account alias +# can change and is not guaranteed to exist. +# +# Positional parameters: +# ACCOUNT_NAME (required) - human readable name for the AWS account that you're +# authenticated into. The account name will be used to prefix the created +# tfbackend file so that it's easier to visually identify as opposed to +# identifying the file using the account id. +# For example, you have an account per environment, the account name can be +# the name of the environment (e.g. "prod" or "staging"). Or if you are +# setting up an account for all lower environments, account name can be "lowers". +# If your AWS account has an account alias, you can also use that. +# ----------------------------------------------------------------------------- +set -euo pipefail + +ACCOUNT_NAME=$1 + +ACCOUNT_ID="$(./bin/current-account-id.sh)" +REGION="$(./bin/current-region.sh)" + +# Get project name +terraform -chdir=infra/project-config refresh > /dev/null +PROJECT_NAME=$(terraform -chdir=infra/project-config output -raw project_name) + +TF_STATE_BUCKET_NAME="$PROJECT_NAME-$ACCOUNT_ID-$REGION-tf" +TF_STATE_KEY="infra/account.tfstate" + +echo "==================" +echo "Setting up account" +echo "==================" +echo "ACCOUNT_NAME=$ACCOUNT_NAME" +echo "ACCOUNT_ID=$ACCOUNT_ID" +echo "PROJECT_NAME=$PROJECT_NAME" +echo "TF_STATE_BUCKET_NAME=$TF_STATE_BUCKET_NAME" +echo "TF_STATE_KEY=$TF_STATE_KEY" +echo "REGION=$REGION" +echo +echo "------------------------------------------------------------------------------" +echo "Bootstrapping the account by creating an S3 backend with minimal configuration" +echo "------------------------------------------------------------------------------" +echo +echo "Creating bucket: $TF_STATE_BUCKET_NAME" +# For creating buckets outside of us-east-1, a LocationConstraint needs to be set +# For creating buckets in us-east-1, LocationConstraint cannot be set +# See https://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html +CREATE_BUCKET_CONFIGURATION="" +if [ $REGION != "us-east-1" ]; then + CREATE_BUCKET_CONFIGURATION="--create-bucket-configuration LocationConstraint=$REGION" +fi +aws s3api create-bucket --bucket $TF_STATE_BUCKET_NAME --region $REGION $CREATE_BUCKET_CONFIGURATION > /dev/null +echo +echo "----------------------------------" +echo "Creating rest of account resources" +echo "----------------------------------" +echo + +cd infra/accounts + +# Create the infrastructure for the terraform backend such as the S3 bucket +# for storing tfstate files and the DynamoDB table for tfstate locks. +# -reconfigure is used in case this isn't the first account being set up +# and there is already a .terraform directory +terraform init \ + -reconfigure \ + -input=false \ + -backend-config="bucket=$TF_STATE_BUCKET_NAME" \ + -backend-config="key=$TF_STATE_KEY" \ + -backend-config="region=$REGION" + +# Import the bucket that we created in the previous step so we don't recreate it +# But first check if the bucket already exists in the state file. If we are +# re-running account setup and the bucket already exists then skip the import step +if ! terraform state list module.backend.aws_s3_bucket.tf_state; then + terraform import module.backend.aws_s3_bucket.tf_state $TF_STATE_BUCKET_NAME +fi + +terraform apply \ + -input=false \ + -auto-approve + +cd - + +MODULE_DIR=infra/accounts +BACKEND_CONFIG_NAME="$ACCOUNT_NAME.$ACCOUNT_ID" +./bin/create-tfbackend.sh $MODULE_DIR $BACKEND_CONFIG_NAME $TF_STATE_KEY diff --git a/bin/terraform-apply.sh b/bin/terraform-apply.sh new file mode 100755 index 000000000..013636a67 --- /dev/null +++ b/bin/terraform-apply.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Convenience script for running terraform apply for the specified module and configuration name. +# The configuration name is used to determine which .tfvars file to use for the -var-file +# option of terraform apply. +# +# Additional arguments to terraform apply can also be passed in using terraform's built in environment variables +# TF_CLI_ARGS and TF_CLI_ARGS_name. For example, in CI/CD pipelines, you may want to set +# TF_CLI_ARGS="-input=false -auto-approve" to skip the confirmation prompt. +# See https://developer.hashicorp.com/terraform/cli/config/environment-variables#tf_cli_args-and-tf_cli_args_name +# +# Positional parameters: +# MODULE_DIR (required) – The location of the root module to initialize and apply +# CONFIG_NAME (required) – The name of the tfvars config. For accounts, the config name is the AWS account alias. +# For application modules the config name is the name of the environment (e.g. "dev", "staging", "prod"). +# For application modules that are shared across environments, the config name is "shared". +# For example if a backend config file is named "myaccount.s3.tfbackend", then the CONFIG_NAME would be "myaccount" +# ----------------------------------------------------------------------------- +set -euo pipefail + +MODULE_DIR="$1" +CONFIG_NAME="$2" + +# Convenience script for running terraform apply +# CONFIG_NAME – the name of the backend config. +# For example if a backend config file is named "myaccount.s3.tfbackend", then the CONFIG_NAME would be "myaccount" +# MODULE_DIR – the location of the root module to initialize and apply + +# 1. Set working directory to the terraform root module directory + +cd $MODULE_DIR + +# 2. Run terraform apply with the tfvars file (if it exists) that has the same name as the backend config file + +TF_VARS_FILE="$CONFIG_NAME.tfvars" +TF_VARS_OPTION="" +if [ -f $TF_VARS_FILE ]; then + TF_VARS_OPTION="-var-file=$TF_VARS_FILE" +fi + +terraform apply $TF_VARS_OPTION diff --git a/bin/terraform-init-and-apply.sh b/bin/terraform-init-and-apply.sh new file mode 100755 index 000000000..8a01de894 --- /dev/null +++ b/bin/terraform-init-and-apply.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Convenience script for running terraform init followed by terraform apply +# See ./bin/terraform-init.sh and ./bin/terraform-apply.sh for more details. +# +# Positional parameters: +# MODULE_DIR (required) – The location of the root module to initialize and apply +# CONFIG_NAME (required) – The name of the tfbackend and tfvars config. The name +# is expected to be consistent for both the tfvars file and the tfbackend file. +# ----------------------------------------------------------------------------- +set -euo pipefail + +MODULE_DIR="$1" +CONFIG_NAME="$2" + +# Convenience script for running terraform init and terraform apply +# CONFIG_NAME – the name of the backend config. +# For example if a backend config file is named "myaccount.s3.tfbackend", then the CONFIG_NAME would be "myaccount" +# MODULE_DIR – the location of the root module to initialize and apply + +./bin/terraform-init.sh $MODULE_DIR $CONFIG_NAME + +./bin/terraform-apply.sh $MODULE_DIR $CONFIG_NAME diff --git a/bin/terraform-init.sh b/bin/terraform-init.sh new file mode 100755 index 000000000..845105f86 --- /dev/null +++ b/bin/terraform-init.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# Convenience script for running terraform init for the specified module and configuration name. +# The configuration name is used to determine which .tfbackend file to use for the -backend-config +# option of terraform init. +# +# Positional parameters: +# MODULE_DIR (required) – The location of the root module to initialize and apply +# CONFIG_NAME (required) – The name of the backend config. For accounts, the config name is the AWS account alias. +# For application modules the config name is the name of the environment (e.g. "dev", "staging", "prod"). +# For application modules that are shared across environments, the config name is "shared". +# For example if a backend config file is named "myaccount.s3.tfbackend", then the CONFIG_NAME would be "myaccount" +# ----------------------------------------------------------------------------- +set -euo pipefail + +MODULE_DIR="$1" +CONFIG_NAME="$2" + +# Run terraform init with the named backend config file + +BACKEND_CONFIG_FILE="$CONFIG_NAME.s3.tfbackend" + +# Note that the BACKEND_CONFIG_FILE path is relative to MODULE_DIR, not the current working directory +terraform -chdir=$MODULE_DIR init \ + -input=false \ + -reconfigure \ + -backend-config=$BACKEND_CONFIG_FILE diff --git a/docs/code-reviews.md b/docs/code-reviews.md new file mode 100644 index 000000000..fd298b8b7 --- /dev/null +++ b/docs/code-reviews.md @@ -0,0 +1,55 @@ +# Code Reviews + +Code reviews are intended to help all of us grow as engineers and improve the quality of what we ship. +These guidelines are meant to reinforce those two goals. + +## For reviewers + +Aim to respond to code reviews within 1 business day. + +Remember to highlight things that you like and appreciate while reading through the changes, +and to make any other feedback clearly actionable by indicating if it is optional preference, an important consideration, or an error. + +Don't be afraid to comment with a question, or to ask for clarification, or provide a suggestion, +whenever you don’t understand what is going on at first glance — or if you think an approach or decision can be improved. +Suggestions on how to split a large PR into smaller chunks can also help move things along. +Code reviews give us a chance to learn from one another, and to reflect, iterate on, and document why certain decisions are made. + +Once you're ready to approve or request changes, err on the side of trust. +Send a vote of approval if the PR looks ready except for small minor changes, +and trust that the recipient will address your comments before merging by replying via comment or code to any asks. +Use "request changes" sparingly, unless there's a blocking issue or major refactors that should be done. + +## For authors or requesters + +Your PR should be small enough that a reviewer can reasonably respond within 1-2 business days. +For larger changes, break them down into a series of PRs. +If refactors are included in your changes, try to split them out into separate PRs. + +As a PR writer, you should consider your description and comments as documentation; +current and future team members will refer to it to understand your design decisions. +Include relevant context and business requirements, and add preemptive comments (in code or PR) +for sections of code that may be confusing or worth debate. + +### Draft PRs + +If your PR is a work-in-progress, or if you are looking for specific feedback on things, +create a [Draft Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests#draft-pull-requests) +and state what you are looking for in the description. + +### Re-requesting reviews after completing changes + +After you make requested changes in response to code review feedback, please re-request reviews from the reviewers to notify them that the work is ready to be reviewed again. + +Advantages of code review + +catch and prevent bugs +consistent code +find shared code +share knowledge + +Challenges +it can take long +who to ask +how do you know when is "enough" review +what should i be reviewing diff --git a/docs/compliance.md b/docs/compliance.md new file mode 100644 index 000000000..db9a92b87 --- /dev/null +++ b/docs/compliance.md @@ -0,0 +1,29 @@ +# Compliance + +We use [Checkov](https://www.checkov.io/) and [tfsec](https://aquasecurity.github.io/tfsec/) static analysis tools to check for compliance with infrastructure policies. + +## Setup + +To run these tool locally, first install them by running the following commands. + +* Install checkov + + ```bash + brew install checkov + ``` + +* Install tfsec + + ```bash + brew install tfsec + ``` + +## Check compliance + +```bash +make infra-check-compliance +``` + +## Pre-Commit + +If you use [pre-commit](https://www.checkov.io/4.Integrations/pre-commit.html), you can optionally add checkov to your own pre-commit hook by following the instructions [here](https://www.checkov.io/4.Integrations/pre-commit.html). diff --git a/docs/decisions/index.md b/docs/decisions/index.md new file mode 100644 index 000000000..44dbe2a94 --- /dev/null +++ b/docs/decisions/index.md @@ -0,0 +1,20 @@ +# Architectural Decision Log + +This log lists the architectural decisions for [project name]. + + + +* [ADR-0000](infra/0000-use-markdown-architectural-decision-records.md) - Use Markdown Architectural Decision Records +* [ADR-0001](infra/0001-ci-cd-interface.md) - CI/CD Interface +* [ADR-0002](infra/0002-use-custom-implementation-of-github-oidc.md) - Use custom implementation of GitHub OIDC to authenticate GitHub actions with AWS rather than using module in Terraform Registry +* [ADR-0003](infra/0003-manage-ecr-in-prod-account-module.md) - Manage ECR in prod account module +* [ADR-0004](infra/0004-separate-terraform-backend-configs-into-separate-config-files.md) - Separate tfbackend configs into separate files +* [ADR-0005](infra/0005-database-module-design.md) - Database module design +* [ADR-0006](infra/0006-provision-database-users-with-serverless-function.md) - Provision database users with serverless function +* [ADR-0007](infra/0007-database-migration-architecture.md) - Database Migration Infrastructure and Deployment + + + +For new ADRs, please use [template.md](template.md) as basis. +More information on MADR is available at . +General information about architectural decision records is available at . diff --git a/docs/decisions/infra/0000-use-markdown-architectural-decision-records.md b/docs/decisions/infra/0000-use-markdown-architectural-decision-records.md new file mode 100644 index 000000000..1aab9e567 --- /dev/null +++ b/docs/decisions/infra/0000-use-markdown-architectural-decision-records.md @@ -0,0 +1,26 @@ +# Use Markdown Architectural Decision Records + +## Context and Problem Statement + +We want to record architectural decisions made in this project. +Which format and structure should these records follow? + +## Considered Options + +* [MADR](https://adr.github.io/madr/) 2.1.2 – The Markdown Architectural Decision Records +* [Michael Nygard's template](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions) – The first incarnation of the term "ADR" +* [Sustainable Architectural Decisions](https://www.infoq.com/articles/sustainable-architectural-design-decisions) – The Y-Statements +* Other templates listed at +* Formless – No conventions for file format and structure + +## Decision Outcome + +Chosen option: "MADR 2.1.2", because + +* Implicit assumptions should be made explicit. + Design documentation is important to enable people understanding the decisions later on. + See also [A rational design process: How and why to fake it](https://doi.org/10.1109/TSE.1986.6312940). +* The MADR format is lean and fits our development style. +* The MADR structure is comprehensible and facilitates usage & maintenance. +* The MADR project is vivid. +* Version 2.1.2 is the latest one available when starting to document ADRs. diff --git a/docs/decisions/infra/0001-ci-cd-interface.md b/docs/decisions/infra/0001-ci-cd-interface.md new file mode 100644 index 000000000..1a4bc213d --- /dev/null +++ b/docs/decisions/infra/0001-ci-cd-interface.md @@ -0,0 +1,113 @@ +# CI/CD Interface + +* Status: accepted +* Deciders: @lorenyu @kyeah +* Date: 2022-10-04 + +Technical Story: Define Makefile interface between infra and application [#105](https://github.com/navapbc/template-infra/issues/105) + +## Context and Problem Statement + +In order to reuse CI and CD logic for different tech stacks, we need to establish a consistent interface by which different applications can hook into the common CI/CD infrastructure. + +## Decision Drivers + +* We want to define most of the release management logic in `template-infra` but allow application specific methods for building the release. +* The build needs to be able to be run from the CD workflow defined in `template-infra`, but it also needs to be able to be run from the application as part of the CI workflow as one of the CI checks. + +## Proposal + +### CD interface + +Create a `Makefile` in `template-infra` repo that defines the following make targets: + +```makefile +################### +# Building and deploying +################## + +# Generate an informational tag so we can see where every image comes from. +release-build: # assumes there is a Dockerfile in `app` folder + ... code that builds image from app/Dockerfile + +release-publish: + ... code that publishes to ecr + +release-deploy: + ... code that restarts ecs service with new image +``` + +Each of the template applications (template-application-nextjs, template-application-flask) needs to have a `Makefile` in `app/` e.g. `template-application-flask/app/Makefile` with a `release-build` target that builds the release image. The `release-build` target should take an `OPTS` argument to pass into the build command to allow the parent Makefile to pass in arguments like `--tag IMAGE_NAME:IMAGE_TAG` which can facilitate release management. + +```makefile +# template-application-flask/app/Makefile + +release-build: + docker build $(OPTS) --target release . +``` + +By convention, the application's Dockerfile should have a named stage called `release` e.g. + +```Dockerfile +# template-application-flask/app/Dockerfile +... +FROM scratch AS release +... +``` + +### CI interface + +Each application will have their own CI workflow that gets copied into the project's workflows folder as part of installation. `template-application-nextjs` and `template-application-flask` will have `.github/workflows/ci-app.yml`, and `template-infra` will have `.github/workflows/ci-infra.yml`. + +Installation would look something like: + +```bash +cp template-infra/.github/workflows/* .github/workflows/ +cp template-application-nextjs/.github/workflows/* .github/workflows/ +``` + +CI in `template-application-next` might be something like: + +```yml +# template-application-nextjs/.github/workflows/ci-app.yml + +jobs: + lint: + steps: + - run: npm run lint + type-check: + steps: + - run: npm run type-check + test: + steps: + - run: npm test +``` + +CI in `template-application-flask` might be something like: + +```yml +# template-application-nextjs/.github/workflows/ci-app.yml + +jobs: + lint: + steps: + - run: poetry run black + type-check: + steps: + - run: poetry run mypy + test: + steps: + - run: poetry run pytest +``` + +For now we are assuming there's only one deployable application service per repo, but we could evolve this architecture to have the project rename `app` as part of the installation process to something specific like `api` or `web`, and rename `ci-app.yml` appropriately to `ci-api.yml` or `ci-web.yml`, which would allow for multiple application folders to co-exist. + +## Alternative options considered for CD interface + +1. Application template repos also have their own release-build command (could use Make, but doesn't have to) that is called as part of the application's ci-app.yml. The application's version of release-build doesn't have to tag the release, since the template-infra version will do that: + + * Cons: build command in two places, and while 99% of the build logic is within Dockerfile and code, there's still a small chance that difference in build command line arguments could produce a different build in CI than what is used for release + +2. We can run release-build as part of template-infra's ci-infra.yml, so we still get CI test coverage of build process + + * Cons: things like tests and linting in ci-app.yml can't use the docker image to run the tests, which potentially means CI and production are using slightly different environments diff --git a/docs/decisions/infra/0002-use-custom-implementation-of-github-oidc.md b/docs/decisions/infra/0002-use-custom-implementation-of-github-oidc.md new file mode 100644 index 000000000..b9c5a5bfe --- /dev/null +++ b/docs/decisions/infra/0002-use-custom-implementation-of-github-oidc.md @@ -0,0 +1,38 @@ +# Use custom implementation of GitHub OIDC to authenticate GitHub actions with AWS rather than using module in Terraform Registry + +* Status: accepted +* Deciders: @shawnvanderjagt @lorenyu @NavaTim +* Date: 2022-10-05 (Updated 2023-07-12) + +## Context and Problem Statement + +[GitHub recommends using OpenID Connect to authenticate GitHub actions with AWS](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect). There are [existing modules in the Terraform Registry](https://registry.terraform.io/search/modules?q=github%20actions%20oidc) that implements these resources. Should we use an existing module or implement our own? + +## Decision Drivers + +* Secure +* Maintainable +* Simple and easily understood + +## Considered Options + +* Use [unfunco/oidc-github](https://registry.terraform.io/modules/unfunco/oidc-github/aws/latest) module from Terraform registry +* Use a fork of [unfunco/oidc-github](https://registry.terraform.io/modules/unfunco/oidc-github/aws/latest) in [NavaPBC GitHub org](https://github.com/navapbc) +* Use a custom implementation + +## Decision Outcome + +We chose to use a custom implementation because it allowed for the simplest implementation that was easiest to understand while still being in our full control and therefore avoids security issues with external dependencies. It is also easy to upgrade to use the external module if circumstances change. + +## Pros and Cons of the Options + +The [unfunco/oidc-github](https://registry.terraform.io/modules/unfunco/oidc-github/aws/latest) module from Terraform registry is effectively what we need, but there are a few disadvantages to using it: + +Cons of unfunco/oidc-github: + +* Dependency on an external module in the Terraform registry has negative security implications. Furthermore, the module isn't published by an "official" organization. It is maintained by a single developer, further increasing the security risk. +* The module includes extra unnecessary options that make the code more difficult to read and understand +* In particular, the module includes the option to attach the `AdminstratorAccess` policy to the GitHub actions IAM role, which isn't necessary and could raise concerns in an audit. +* ~~The module hardcodes the GitHub OIDC Provider thumbprint, which isn't as elegant as the method in the [Initial setup for CD draft PR #43](https://github.com/navapbc/template-infra/pull/43) from @shawnvanderjagt which simply pulls the thumbprint via:~~ (Update: July 12, 2023) Starting July 6, 2023, AWS began securing communication with GitHub’s OIDC identity provider (IdP) using GitHub's library of trusted root Certificate Authorities instead of using a certificate thumbprint to verify the IdP’s server certificate. This approach ensures that the GitHub OIDC configuration behaves correctly without disruption during future certificate rotations and changes. With this new validation approach in place, your legacy thumbprint(s) are longer be needed for validation purposes. + +Forking the module to the navapbc organization gets rid of the security issue, but the other issues remain. diff --git a/docs/decisions/infra/0003-manage-ecr-in-prod-account-module.md b/docs/decisions/infra/0003-manage-ecr-in-prod-account-module.md new file mode 100644 index 000000000..4af6dd105 --- /dev/null +++ b/docs/decisions/infra/0003-manage-ecr-in-prod-account-module.md @@ -0,0 +1,33 @@ +# Manage ECR in prod account module + +* Status: accepted +* Deciders: @lorenyu @shawnvanderjagt @farrcraft @kyeah +* Date: 2022-10-07 + +## Context and Problem Statement + +In a multi-account setup where there is one account per environment, where should the ECR repository live? + +## Decision Drivers + +* Minimize risk that the approach isn't acceptable with the agency given uncertainty around ability to provision accounts with the agency +* Desire an approach that can adapt equally well to a multi-account setup (with an account per environment) as well as to a single-account setup (with one account across all environments) or a two-account setup (with one account for prod and an account for non-prod) +* Desire an approach that can adapt to situations where there is more than one ECR repository i.e. a project with multiple deployable applications +* Simplicity + +## Considered Options + +* Separate `dist`/`build` account to contain the ECR repository and build artifacts +* Manage the ECR repository as part of the `prod` account +* Manage the ECR repository as part of the `dev` or `stage` account + +## Decision Outcome + +Manage the ECR repository(ies) as part of the prod account module, or for single-account setups, the single account module. Since there will always be a prod account, this approach should have minimal risk for not working for the agency, and will also work for projects that only have or need a single account. + +## Discussion of alternative approach + +However, if account management and creation was not an issue, it could be more elegant to have the production candidate build artifacts be managed in a separate `build` account that all environment accounts reference. This approach is described in the following references: + +* [Medium article: Cross-Account Amazon Elastic Container Registry (ECR) Access for ECS](https://garystafford.medium.com/amazon-elastic-container-registry-ecr-cross-account-access-for-ecs-2f90fcb02c80) +* [AWS whitepaper - Recommended Accounts - Deployments](https://docs.aws.amazon.com/whitepapers/latest/organizing-your-aws-environment/deployments-ou.html) diff --git a/docs/decisions/infra/0004-separate-terraform-backend-configs-into-separate-config-files.md b/docs/decisions/infra/0004-separate-terraform-backend-configs-into-separate-config-files.md new file mode 100644 index 000000000..4fceb9d97 --- /dev/null +++ b/docs/decisions/infra/0004-separate-terraform-backend-configs-into-separate-config-files.md @@ -0,0 +1,32 @@ +# Separate tfbackend configs into separate files + +* Status: accepted +* Deciders: @lorenyu @shawnvanderjagt @kyeah @bneutra +* Date: 2023-05-09 + +## Context + +Up until now, most projects adopted an infrastructure module architecture that is structured as follows: Each application environment (prod, staging, etc) is a separate root module that calls a template module. The template module defines all the application infra resources needed for an environment. Things that could be different per environment (e.g. desired ECS task count) are template variables, and each environment can have local vars (or somewhat equivalently, a tfvars file) that customizes those variables. Importantly, each environment has it’s own backend tfstate file, and the backend config is stored in the environment module’s `main.tf`. + +An alternative approach exists to managing the backend configs. Rather than saving the backend config directly in main.tf, `main.tf` could contain a [partial configuration](https://developer.hashicorp.com/terraform/language/settings/backends/configuration#partial-configuration), and the rest of the backend config would be passed in during terraform init with a command like `terraform init --backend-config=prod.s3.tfbackend`. There would no longer be a need for separate root modules for each environment. What was previously the template module would instead act as the root module, and engineers would work with different environments solely through separate tfbackend files and tfvar files. Doing this would greatly simplify the module architecture at the cost of some complexity when executing terraform commands due to the extra command line parameters. To manage the extra complexity of running terraform commands, a wrapper script (such as with Makefile commands) can be introduced. + +The approach can be further extended to per-environment variable configurations via an analogous approach with [variable definitions files](https://developer.hashicorp.com/terraform/language/values/variables#variable-definitions-tfvars-files) which can be passed in with the `-var-file` command line option to terraform commands. + +## Notes + +For creating accounts, can't use the .tfbackend backend config file approach because the main.tf file can only have one backend configuration, so if we have the backend configuration as a partial configuration of `backend "s3" {}`, then we can't use that same module to configure a new account, since the process for configuring a new account +requires setting the backend configuration to `backend "local" {}`. We could have a separate duplicate module that's has backend set to local. or we could also temporarily update the backend from `"s3"` to `"local"`, but both of those approaches seem confusing. + +Another alternative is to go back to the old way of bootstrapping an account i.e. to do it via a script that creates an S3 bucket via AWS CLI. The bootstrap script would only do the minimal configuration for the S3 bucket, and let terraform handle the remainder of the configuration, such as creating the dynamodb tables. At this point, there is no risk of not having state locking in place since the account infrastructure has not yet been checked into the repository. This might be the cleanest way to have accounts follow the same pattern of using tfbackend config files. + +## Benefits of separate tfvars and tfbackend files + +* **Reduce risk of differences between environments** – When different environments have their own root modules, development teams have historically sometimes added one-off resources to specific environments without adding those resources to the template module and without realizing that they're violating an important goal of having multiple environments – that environments are isolated from each other but function identically. This creates differences between environments that are more than just configuration differences. By forcing the differences to be limited to the `.tfvars` (-var) file, it limits how badly someone can get an environment out of skew. +* **DRY backend configuration** – With only a single module, there is less duplication of infrastructure code in the `main.tf` file. In particular, provider configurations, shared partial backend configuration, and certain other top level local variables and data resources no longer need to be duplicated across environments, and provider versions can also be forced to be consistent. +* **Make receiving updates from template-infra more robust** – Previously, in order for a project to receive updates from the template-infra repo, the project would copy over template files but then revert files that the project has changed. Currently, the many `main.tf` root module files in the template are expected to be changed by the project since they define project specific backend configurations. With the separation of config files, projects are no longer expected to change the `main.tf` files, so the `main.tf` files in `infra/app/build-repository/`, `infra/project-config/`, `infra/app/app-config/`, etc. can be safely copied over from template-infra without needing to be reverted. +* **Reduce the cost of introducing additional infrastructure layers** – In the future we may want too add new infrastructure layers that are created and updated independently of the application layer. Examples include a network layer or a database layer. We may want to keep them separate so that changes to the application infrastructure are isolated from changes to the database infrastructure, which should occur much less frequently. Previously, to add a new layer such as the database layer, we would need two additional folders: a `db-env-template` module and a `db-envs` folder with separate root modules for each environment. This mirrors the same structure that we have for the application. With separate backend config and tfvar files we would only need a single `db` module with separate `.tfbackend` and `.tfvars` files for each environment. + +## Cons of separate tfvars and tfbackend files + +* **Extra layer of abstraction** – The modules themselves aren't as simple to understand since the configuration is spread out across multiple files, the `main.tf` file and the corresponding `.tfvars` and `.tfbackend` file, rather than all in one `main.tf` file. +* **Requires additional parameters when running terraform** – Due to configuration being separated into `.tfvars` and `.tfbackend` files, terraform commands now require a `-var-file` and `-backend-config` command line options. The added complexity may require a wrapper script, introducing yet another layer of abstraction. diff --git a/docs/decisions/infra/0005-database-module-design.md b/docs/decisions/infra/0005-database-module-design.md new file mode 100644 index 000000000..6eaa06f89 --- /dev/null +++ b/docs/decisions/infra/0005-database-module-design.md @@ -0,0 +1,62 @@ +# Database module design + +* Status: proposed +* Deciders: @lorenyu @kyeah @shawnvanderjagt @rocketnova +* Date: 2023-05-25 + +## Context and Problem Statement + +On many projects, setting up the application and database is a multiple-step iterative process. The infrastructure team will first set up an application service without a database, with a simple application health check. The infrastructure team will then work on setting up the database, configuring the application service to have network access to the database cluster, configuring a the database user that the application will authenticate as and a database user that will run migrations, and providing a way for the application to authenticate. Then the application team will update the healthcheck to call the database. + +We want to design the template infrastructure so that each infrastructure layer can be configured and created once rather than needing to revisit prior layers. In other words, we'd like to be able to create the database layer, configure the database users, then create the application layer, without having to go back to make changes to database layer again. + +There are some dependencies to keep in mind: + +1. The creation of the application service layer depends on the creation of database layer, since a proper application healthcheck will need to hit the database. +2. The database layer includes the creation and configuring of the database users (i.e. PostgreSQL users) that will be used by the application and migration processes in addition to the database cluster infrastructure resources. +3. The network rule that allows inbound traffic to the database from the application depends on both the database and application service. + +## Decision Drivers + +* Avoid circular dependencies +* Avoid the need to revisit a layer (e.g. database layer, application layer) more than one time during setup of the application environment +* Keep things simple to understand and customize +* Minimize number of steps to set up an environment + +## Module Architecture Options + +* Option A: Put the database infrastructure in the same root module as the application service +* Option B: Separate the database infrastructure into a separate layer + +### Decision Outcome: Separate the database infrastructure into a separate layer + +Changes to database infrastructure are infrequent and therefore do not need to be incorporated as part of the continuous delivery process of deploying the application as it would needlessly slow down application deploys and also increase the risk of accidental changes to the database layer. When database changes are needed, they are sometimes complex due to the stateful nature of databases and can require multiple steps to make those changes gracefully. For these changes, it is beneficial to separate them from application resources so that application deploys can remain unaffected. Finally, breaking down the environment setup process into smaller, more linear steps – creating the database first before creating the application service – makes the environment setup process easier to understand and troubleshoot than trying to do create everything at once. + +The biggest disadvantage to this approach is the fact that dependencies between root modules cannot be directly expressed in terraform. To mitigate this problem, we should carefully design the interface between root modules to minimize breaking changes in that interface. + +## Pros and Cons of the Options + +### Option A: Put the database infrastructure in the same root module as the application service + +Pros: + +* This is what we've typically done in the past. All the infrastructure necessary for the application environment would live in a single root module, with the exception of shared resources like the ECR image repository. + +Cons: + +* The application service's healthcheck depends on the database cluster to be created and the database user to be provisioned. This cannot easily be done in a single terraform apply. +* Changes to the database infrastructure are often more complex than changes to application infrastructure. Unlike application infrastructure, database changes cannot take the approach of spinning up new infrastructure in desired configuration, redirecting traffic to new infrastructure, then destroying old infrastructure. This is because application infrastructure can be designed to be stateless while databases are inherently stateful. In such cases, making database changes may require careful coordination and block changes to the application infrastructure, potentially including blocking deploys, while the database changes are made. + +### Option B: Separate the database infrastructure into a separate layer + +Pros: + +* Separating the database layer makes explicit the dependency between the database and the application service, and enables an environment setup process that involves only creating resources when all dependencies have been created first. +* Application deploys do not require making requests to the database infrastructure. +* Complex database changes that require multiple steps can be made without negatively impacting application deploys. +* Not all applications require a database. Having the database layer separate reduces the amount of customization needed at the application layer for different systems. + +Cons: + +* Application resources for a single environment are split across multiple root modules +* Dependencies between root modules cannot be expressed directly in terraform to use terraform's built-in dependency graph. Instead, dependencies between root modules need to be configured from one module's outputs to another module's variable definitions file. diff --git a/docs/decisions/infra/0006-provision-database-users-with-serverless-function.md b/docs/decisions/infra/0006-provision-database-users-with-serverless-function.md new file mode 100644 index 000000000..c885f6889 --- /dev/null +++ b/docs/decisions/infra/0006-provision-database-users-with-serverless-function.md @@ -0,0 +1,87 @@ +# Provision database users with serverless function + +* Status: proposed +* Deciders: @lorenyu @kyeah @shawnvanderjagt @rocketnova +* Date: 2023-05-25 + +## Context and Problem Statement + +What is the best method for setting up database users and permissions for the application service and the migrations task? + +## Decision Drivers + +* Minimize number of steps +* Security and compliance + +## Considered Options + +* **Terraform** – Define users and permissions declaratively in Terraform using the [PostgreSQL provider](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs). Apply changes from infrastructure engineer's local machine or from the CI/CD workflow. When initially creating database cluster, make database cluster publicly accessible and define security group rules to allow traffic from local machine or GitHub actions. After creating database users, reconfigure database cluster to make database private. +* **Shell scripts** – Define users and permissions through a shell script. This could use tools like psql or not. It could also define the permissions in a `.sql` file that gets executed. Similar to the terraform option, the database would need to be made accessible to the machine running the script. One way to do this is for the script itself to temporarily enable access to the database using AWS CLI. +* **Jump host using EC2** – Run terraform or a shell script but from an EC2 instance within the VPC. Create the EC2 instance and set up network connectivity between the EC2 instance and the database cluster as part of creating the database infrastructure resources. +* **Container task using ECS** – Build a Docker image that has the code and logic to provision users and permissions and run the code as an ECS task. +* **Serverless function using Lambda** – Write code to provision database users and permissions and run it as a Lambda function. + +### Decision Outcome: AWS Lambda function + +A Lambda function is the simplest tool that can operate within the VPC and therefore get around the obstacle of needing network access to the database cluster. EC2 instances are too expensive to maintain for rarely used operations like database user provisioning, and ECS tasks add complexity to the infrastructure by requiring an additional ECR image repository and image build step. + +## Pros and Cons of the Options + +### Terraform + +Pros + +* Declarative +* Could create database cluster and database users in a single terraform apply + +Cons + +* The database needs to be publicly accessible to the machine that is running the script – either the infrastructure engineer's local machine or the continuous integration service (e.g. GitHub Actions). First, this causes the database setup process to take a minimum of three steps: (1) create the database cluster with publicly accessible configuration, (2) provision the database users, (3) make the database cluster private. Second, even if it is an acceptable risk to make the database publicly accessible when it is first created and before it has any data, it may not be an acceptable risk to do so once the system is in production. Therefore, after the system is in production, there would no longer be a way to reconfigure the database users or otherwise maintain the system using this approach. +* Need to modify the database cluster configuration after creating it in order to make it private. Modifications requires an additional step, and may also require manual changes to the terraform configuration. + +### Shell scripts + +Pros + +* Simple +* Can represent user configuration as a `.sql` script which could simplify database management by keeping it all within SQL + +Cons + +* Same as the cons for Terraform – the database needs to be accessible to the machine running the script + +### Jump host using EC2 + +Pros + +* Can leverage the Terraform and Shell scripts approaches +* Can access the database securely from within the VPC without making the database cluster publicly accessible + +Cons + +* Added infrastructure complexity due to the need to maintain an EC2 instance + +### Container task using ECS + +Pros + +* Flexible: can build everything needed in a Docker container, including installing necessary binaries and bundling required libraries and code +* Can access the database securely from within the VPC without making the database cluster publicly accessible + +Cons + +* Increases complexity of terraform module architecture. There needs to be an ECR repository to store the Docker images. The ECR repository could be in a separate root module, which adds another layer to the module architecture. The ECR repository could be put in the `build-repository` root module, which would would clutter the `build-repository` since it's not related to application builds. Or it could be put in the `database` root module and be manually created using terraform's `-target` flag, but that adds complexity to the setup process. +* Increases number of steps needed to set up the database by at least two, one to create the ECR repository and one to build and publish the Docker image to the ECR repository, before creating the database cluster resources. + +### Serverless function using Lambda + +Pros + +* Flexible: can build many things in a Lambda function +* Can access the database securely from within the VPC without making the database cluster publicly accessible +* Relatively simple + +Cons + +* Adds a new dependency to the application setup process. The setup process will now rely on the programming language used by the Lambda function (Python in this case). +* Can't easily use custom external binaries in AWS Lambda. So will rely mostly on code rather than lower level scripts like psql. diff --git a/docs/decisions/infra/0007-database-migration-architecture.md b/docs/decisions/infra/0007-database-migration-architecture.md new file mode 100644 index 000000000..c9088f50c --- /dev/null +++ b/docs/decisions/infra/0007-database-migration-architecture.md @@ -0,0 +1,92 @@ +# Database Migration Infrastructure and Deployment + +* Status: proposed +* Deciders: @lorenyu, @daphnegold, @chouinar, @Nava-JoshLong, @addywolf-nava, @sawyerh, @acouch, @SammySteiner +* Date: 2023-06-05 + +## Context and Problem Statement + +What is the most optimal setup for database migrations infrastructure and deployment? +This will break down the different options for how the migration is run, but not the +tools or languages the migration will be run with, that will be dependent on the framework the application is using. + +Questions that need to be addressed: + + 1. How will the method get the latest migration code to run? + 2. What infrastructure is required to use this method? + 3. How is the migration deployment re-run in case of errors? + +## Decision Drivers + +* Security +* Simplicity +* Flexibility + +## Considered Options + +* Run migrations from GitHub Actions +* Run migrations from a Lambda function +* Run migrations from an ECS task +* Run migrations from self-hosted GitHub Actions runners + +## Decision Outcome + +Run migrations from an ECS task using the same container image that is used for running the web service. Require a `db-migrate` script in the application container image that performs the migration. When running the migration task using [AWS CLI's run-task command](https://docs.aws.amazon.com/cli/latest/reference/ecs/run-task.html), use the `--overrides` option to override the command to the `db-migrate` command. + +Default to rolling forward instead of rolling back when issues arise (See [Pitfalls with SQL rollbacks and automated database deployments](https://octopus.com/blog/database-rollbacks-pitfalls)). Do not support rolling back out of the box, but still project teams to easily implement database rollbacks through the mechanism of running an application-specific database rollback script through a general purpose `run-command.sh` script. + +Pros + +* No changes to the database network configuration are needed. The database can remain inaccessible from the public internet. +* Database migrations are agnostic to the migration framework that the application uses as long as the application is able to provide a `db-migrate` script that is accessible from the container's PATH and is able to use IAM authentication for connecting to the database. Applications can use [alembic](https://alembic.sqlalchemy.org/), [flyway](https://flywaydb.org/), [prisma](https://www.prisma.io/), another migration framework, or custom built migrations. +* Database migrations use the same application image and task definition as the base application. + +Cons + +* Running migrations require doing a [targeted terraform apply](https://developer.hashicorp.com/terraform/tutorials/state/resource-targeting) to update the task definition without updating the service. Terraform recommends against targeting individual resources as part of a normal workflow. However, this is done to ensure migrations are run before the service is updated. + +## Other options considered + +### Run migrations from GitHub Actions using a direct database connection + +Temporarily update the database to be accessible from the internet and allow incoming network traffic from the GitHub Action runner's IP address. Then run the migrations directly from the GitHub Action runner. At the end, revert the database configuration changes. + +Pros: + +* Simple. Requires no additional infrastructure + +Cons: + +* This method requires temporarily exposing the database to incoming connections from the internet, which may not comply with agency security policies. + +### Run migrations from a Lambda function + +Run migrations from an AWS Lambda function that uses the application's container image. The application container image needs to [implement the lambda runtime api](https://aws.amazon.com/blogs/aws/new-for-aws-lambda-container-image-support/) either by using an AWS base image for Lambda or by implementing the Lambda runtime (see [Working with Lambda container images](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html)). + +Pros: + +* Relatively simple. Lambdas are already used for managing database roles. +* The Lambda function can run from within the VPC, avoiding the need to expose the database to the public internet. +* The Lambda function is separate from the application service, so we avoid the need to modify the service's task definition. + +Cons: + +* Lambda function container images need to [implement the lambda runtime api](https://aws.amazon.com/blogs/aws/new-for-aws-lambda-container-image-support/). This is a complex application requirement that would significantly limit the ease of use of the infrastructure. +* Lambda functions have a maximum runtime of 15 minutes, which can limit certain kinds of migrations. + +### Run migrations from self-hosted GitHub Actions runners + +Then run the migrations directly from a [self-hosted GitHub Action runner](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners). Configure the runner to have network access to the database. + +Pros + +* If a project already uses self-hosted runners, this can be the simplest option as it provides all the benefits running migrations directly from GitHub Actions without the security impact. + +Cons + +* The main downside is that this requires maintaining self-hosted GitHub Action runners, which is too costly to implement and maintain for projects that don't already have it set up. + +## Related ADRS: + +* [Database module design](./0005-database-module-design.md) +* [Provision database users with serverless function](./0006-provision-database-users-with-serverless-function.md) diff --git a/docs/decisions/template.md b/docs/decisions/template.md new file mode 100644 index 000000000..25696bbe7 --- /dev/null +++ b/docs/decisions/template.md @@ -0,0 +1,72 @@ +# [short title of solved problem and solution] + +* Status: [proposed | rejected | accepted | deprecated | … | superseded by [ADR-0005](0005-example.md)] +* Deciders: [list everyone involved in the decision] +* Date: [YYYY-MM-DD when the decision was last updated] + +Technical Story: [description | ticket/issue URL] + +## Context and Problem Statement + +[Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in form of a question.] + +## Decision Drivers + +* [driver 1, e.g., a force, facing concern, …] +* [driver 2, e.g., a force, facing concern, …] +* … + +## Considered Options + +* [option 1] +* [option 2] +* [option 3] +* … + +## Decision Outcome + +Chosen option: "[option 1]", because [justification. e.g., only option, which meets k.o. criterion decision driver | which resolves force force | … | comes out best (see below)]. + +### Positive Consequences + +* [e.g., improvement of quality attribute satisfaction, follow-up decisions required, …] +* … + +### Negative Consequences + +* [e.g., compromising quality attribute, follow-up decisions required, …] +* … + +## Pros and Cons of the Options + +### [option 1] + +[example | description | pointer to more information | …] + +* Good, because [argument a] +* Good, because [argument b] +* Bad, because [argument c] +* … + +### [option 2] + +[example | description | pointer to more information | …] + +* Good, because [argument a] +* Good, because [argument b] +* Bad, because [argument c] +* … + +### [option 3] + +[example | description | pointer to more information | …] + +* Good, because [argument a] +* Good, because [argument b] +* Bad, because [argument c] +* … + +## Links + +* [Link type] [Link to ADR] +* … diff --git a/docs/imgs/initial_setup.svg b/docs/imgs/initial_setup.svg new file mode 100644 index 000000000..d1d1c6b87 --- /dev/null +++ b/docs/imgs/initial_setup.svg @@ -0,0 +1 @@ + diff --git a/docs/imgs/multi_cloud.svg b/docs/imgs/multi_cloud.svg new file mode 100644 index 000000000..35fba1ff6 --- /dev/null +++ b/docs/imgs/multi_cloud.svg @@ -0,0 +1 @@ + diff --git a/docs/imgs/single_cloud.svg b/docs/imgs/single_cloud.svg new file mode 100644 index 000000000..dc07c4af4 --- /dev/null +++ b/docs/imgs/single_cloud.svg @@ -0,0 +1 @@ + diff --git a/docs/infra/README.md b/docs/infra/README.md new file mode 100644 index 000000000..942f747ce --- /dev/null +++ b/docs/infra/README.md @@ -0,0 +1,18 @@ +# Infrastructure Documentation + +## Setup + +* [Configure the project](../../infra/project-config/main.tf) (These values will be used in subsequent infra setup steps to namespace resources and add infrastructure tags.) +* [Set up infrastructure developer tools](./set-up-infrastructure-tools.md) +* [Set up AWS account](./set-up-aws-account.md) +* [Set up application build repository](./set-up-app-build-repository.md) +* [Set up application environment](./set-up-app-env.md) + +## Technical Design + +* [Module architecture](./module-architecture.md) + +## Learning + +* [Introduction to Terraform](./intro-to-terraform.md) +* [Introduction to Terraform workspaces](./intro-to-terraform-workspaces.md) diff --git a/docs/infra/destroy-infrastructure.md b/docs/infra/destroy-infrastructure.md new file mode 100644 index 000000000..eea655d30 --- /dev/null +++ b/docs/infra/destroy-infrastructure.md @@ -0,0 +1,59 @@ +# Destroy infrastructure + +To destroy everything you'll need to undeploy all the infrastructure in reverse order that they were created. In particular, the account root module(s) need to be destroyed last. + +## Instructions + +1. First destroy all your environments. Within `/infra/app/service` run the following, replacing `dev` with the environment you're destroying. + + ```bash + $ terraform init --backend-config=dev.s3.tfbackend + $ terraform destroy -var-file=dev.tfvars + ``` + +2. Then to destroy the backends, first you'll need to add `force_destroy = true` to the S3 buckets, and update the lifecycle block to set `prevent_destroy = false`. Then run `terraform apply` from within the `infra/accounts` directory. The reason we need to do this is because S3 buckets by default are protected from destruction to avoid loss of data. See [Terraform: Destroy/Replace Buckets](https://medium.com/interleap/terraform-destroy-replace-buckets-cf9d63d0029d) for a more in depth explanation. + + ```terraform + # infra/modules/modules/terraform-backend-s3/main.tf + + resource "aws_s3_bucket" "tf_state" { + bucket = var.state_bucket_name + + force_destroy = true + + # Prevent accidental destruction a developer executing terraform destory in the wrong directory. Contains terraform state files. + lifecycle { + prevent_destroy = false + } + } + + ... + + resource "aws_s3_bucket" "tf_log" { + bucket = var.tf_logging_bucket_name + force_destroy = true + } + ``` + +3. Then since we're going to be destroying the tfstate buckets, you'll want to move the tfstate file out of S3 and back to your local system. Comment out or delete the s3 backend configuration: + + ```terraform + # infra/accounts/main.tf + + # Comment out or delete the backend block + backend "s3" { + ... + }2 + ``` + +4. Then run the following from within the `infra/accounts` directory to copy the tfstate back to a local tfstate file: + + ```bash + terraform init -force-copy + ``` + +5. Finally, you can run `terraform destroy` within the `infra/accounts` directory. + + ```bash + terraform destroy + ``` diff --git a/docs/infra/intro-to-terraform-workspaces.md b/docs/infra/intro-to-terraform-workspaces.md new file mode 100644 index 000000000..f692e1e82 --- /dev/null +++ b/docs/infra/intro-to-terraform-workspaces.md @@ -0,0 +1,59 @@ +# Workspaces + +Terraform workspaces are created by default, the default workspace is named "default." Workspaces are used to allow multiple engineers to deploy their own stacks for development and testing. This allows multiple engineers to develop new features in parallel using a single environment without destroying each others infrastructure. Separate resources will be created for each engineer when using the prefix variable. + +## Terraform workspace commands + +`terraform workspace show [Name]` - This command will show the workspace you working in. + +`terraform workspace list [Name]` - This command will list all workspaces. + +`terraform workspace new [Name]` - This command will create a new workspace. + +`terraform workspace select [Name]` - This command will switch your workspace to the workspace you select. + +`terraform workspace delete [Name]` - This command will delete the specified workspace. (does not delete infrastructure, that step will done first) + +## Workspaces and prefix - A How To + + Workspaces are used to allow multiple developers to deploy their own stacks for development and testing. By default "prefix~ is set to `terraform.workspace` in the envs/dev environment, it is `staging` and `prod` in those respective environments. + +### envs/dev/main.tf + +``` tf +locals { + prefix = terraform.workspace +} + +module "example" { + source = "../../modules/example" + prefix = local.prefix +} + +``` + +### modules/example/variables.tf - When creating a new module create the variable "prefix" in your variables.tf + +``` tf + +variable "prefix" { + type = string + description = "prefix used to uniquely identify resources, allows parallel development" + +} + +``` + +### modules/example/main.tf - Use var.prefix to uniquely name resources for parallel development + +``` tf + +# Create the S3 bucket with a unique prefix from terraform.workspace. +resource "aws_s3_bucket" "example" { + bucket = "${var.prefix}-bucket" + +} + +``` + +When in the workspace "shawn", the resulting bucket name created in the aws account will be `shawn-bucket`. This prevents the following undesirable situation: If resources are not actively prefixed and two developers deploy the same resource, the developer who runs their deployment second will overwrite the deployment of the first. diff --git a/docs/infra/intro-to-terraform.md b/docs/infra/intro-to-terraform.md new file mode 100644 index 000000000..c8105303d --- /dev/null +++ b/docs/infra/intro-to-terraform.md @@ -0,0 +1,33 @@ +# Introduction to Terraform + +## Basic Terraform Commands + +The `terraform init` command is used to initialize a working directory containing Terraform configuration files. This is the first command that should be run after writing a new Terraform configuration or cloning an existing one from version control. + +The `terraform plan` command creates an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure. By default, when Terraform creates a plan it: + +- Reads the current state of any already-existing remote objects to make sure that the Terraform state is up-to-date. +- Compares the current configuration to the prior state and noting any differences. +- Proposes a set of change actions that should, if applied, make the remote objects match the configuration. + +The `terraform apply` command executes the actions proposed in a Terraform plan deploying the infrastructure specified in the configuration. Use with caution. The configuration becomes idempotent once a subsequent apply returns 0 changes. + +The `terraform destroy` command is a convenient way to destroy all remote objects managed by a particular Terraform configuration. Use `terraform plan -destroy` to preview what remote objects will be destroyed if you run `terraform destroy`. + +⚠️ WARNING! ⚠️ This is a destructive command! As a best practice, it's recommended that you comment out resources in non-development environments rather than running this command. `terraform destroy` should only be used as a way to cleanup a development environment. e.g. a developers workspace after they are done with it. + +For more information about terraform commands follow the link below: + +- [Basic CLI Features](https://www.terraform.io/cli/commands) + +## Terraform Dependency Lock File + +The [dependency lock file](https://www.terraform.io/language/files/dependency-lock) tracks provider dependencies. It belongs to the configuration as a whole and is created when running `terraform ini`. The lock file is always named `.terraform.lock.hcl`, and this name is intended to signify that it is a lock file for various items that Terraform caches in the `.terraform` subdirectory of your working directory. You should include this file in your version control repository so that you can discuss potential changes to your external dependencies via code review, just as you would discuss potential changes to your configuration itself. + +## Modules + +A module is a container for multiple resources that are used together. Modules can be used to create lightweight abstractions, so that you can describe your infrastructure in terms of its architecture, rather than directly in terms of physical objects. The .tf files in your working directory when you run `terraform plan` or `terraform apply` together form the root module. In this root module you will call modules that you create from the module directory to build the infrastructure required to provide any functionality needed for the application. + +## Terraform Workspaces + +Workspaces are used to allow multiple engineers to deploy their own stacks for development and testing. Read more about it in [Terraform Workspaces](./intro-to-terraform-workspaces.md) diff --git a/docs/infra/making-infra-changes.md b/docs/infra/making-infra-changes.md new file mode 100644 index 000000000..9998569e3 --- /dev/null +++ b/docs/infra/making-infra-changes.md @@ -0,0 +1,56 @@ +# Making and applying infrastructure changes + +## Requirements + +First read [Module Architecture](./module-architecture.md) to understand how the terraform modules are structured. + +## Using make targets (recommended) + +For most changes you can use the Make targets provided in the root level Makefile, and can all be run from the project root. + +Make changes to the account: + +```bash +make infra-update-current-account +``` + +Make changes to the application service in the dev environment: + +```bash +make infra-update-app-service APP_NAME=app ENVIRONMENT=dev +``` + +Make changes to the application build repository (Note that the build repository is shared across environments, so there is no ENVIRONMENT parameter): + +```bash +make infra-update-app-build-repository APP_NAME=app +``` + +You can also pass in extra arguments to `terraform apply` by using the `TF_CLI_ARGS` or `TF_CLI_ARGS_apply` parameter (see [Terraform's docs on TF_CLI_ARGS and TF_CLI_ARGS_name](https://developer.hashicorp.com/terraform/cli/config/environment-variables#tf_cli_args-and-tf_cli_args_name)): + +```bash +# Example +TF_CLI_ARGS_apply='-input=false -auto-approve' make infra-update-app-service APP_NAME=app ENVIRONMENT=dev +TF_CLI_ARGS_apply='-var=image_tag=abcdef1' make infra-update-app-service APP_NAME=app ENVIRONMENT=dev +``` + +## Using terraform CLI wrapper scripts + +An alternative to using the Makefile is to directly use the terraform wrapper scripts that the Makefile uses: + +```bash +project-root$ ./bin/terraform-init.sh app/service dev +project-root$ ./bin/terraform-apply.sh app/service dev +project-root$ ./bin/terraform-init-and-apply.sh app/service dev # calls init and apply in the same script +``` + +Look in the script files for more details on usage. + +## Using terraform CLI directly + +Finally, if the wrapper scripts don't meet your needs, you can always run terraform directly from the root module directory. You may need to do this if you are running terraform commands other than `terraform plan` and `terraform apply`, such as `terraform import`, `terraform taint`, etc. To do this, you'll need to pass in the appropriate `tfvars` and `tfbackend` files to `terraform init` and `terraform apply`. For example, to make changes to the application's service resources in the dev environment, cd to the `infra/app/service` directory and run: + +```bash +infra/app/service$ terraform init -backend-config=dev.s3.tfbackend +infra/app/service$ terraform apply -var-file=dev.tfvars +``` diff --git a/docs/infra/module-architecture.md b/docs/infra/module-architecture.md new file mode 100644 index 000000000..39b52fbb9 --- /dev/null +++ b/docs/infra/module-architecture.md @@ -0,0 +1,86 @@ +# Terraform module architecture + +This doc describes how Terraform modules are structured. + +## Code structure + +The infrastructure code is organized as follows. [Root modules](https://www.terraform.io/language/modules#the-root-module) are modules that are deployed separately from each other, whereas child modules are reusable modules that are called from root modules. + +```text +infra/ Infrastructure code + accounts/ Root module for IaC and IAM resources + app/ Application-specific infrastructure + build-repository/ Root module for resources storing built release candidates used for deploys + network/ (In development) Root module for virtual network resources + database/ (In development) Root module for database resources + service/ Root module for application service resources (load balancer, application service) + modules/ Reusable child modules +``` + +## Module calling structure + +The following diagram describes the relationship between modules and their child modules. Arrows go from the caller module to the child module. + +```mermaid +flowchart TB + + classDef default fill:#FFF,stroke:#000 + classDef root-module fill:#F37100,stroke-width:3,font-family:Arial + classDef child-module fill:#F8E21A,font-family:Arial + + subgraph infra + account:::root-module + + subgraph app + app/build-repository[build-repository]:::root-module + app/network[network]:::root-module + app/database[database]:::root-module + app/service[service]:::root-module + end + + subgraph modules + terraform-backend-s3:::child-module + auth-github-actions:::child-module + container-image-repository:::child-module + network:::child-module + database:::child-module + web-app:::child-module + end + + account --> terraform-backend-s3 + account --> auth-github-actions + app/build-repository --> container-image-repository + app/network --> network + app/database --> database + app/service --> web-app + + end +``` + +## Application environments + +An application may have multiple environments (e.g. dev, staging, prod). The environments share the same root modules but will have different configurations. The configurations are saved as separate `.tfvars` and `.tfbackend` files named after the environment. For example, the `app/service` infrastructure resources for the `dev` environment will be configured via `dev.tfvars` and `dev.s3.tfbackend` files in the `infra/app/service` module directory. + +## Module dependencies + +The following diagram illustrates the dependency structure of the root modules. + +1. Account root modules need to be deployed first to create the S3 bucket and DynamoDB tables needed to configure the Terraform backends in the rest of the root modules. +2. The application's build repository needs to be deployed next to create the resources needed to store the built release candidates that are deployed to the application environments. +3. The individual application environment root modules are deployed last once everything else is set up. These root modules are the ones that are deployed regularly as part of application deployments. + +```mermaid +flowchart RL + +classDef default fill:#F8E21A,stroke:#000,font-family:Arial + +app/service --> app/build-repository --> accounts +app/service --> accounts +app/service --> app/network +app/service --> app/database --> app/network --> accounts +app/database --> accounts +``` + +## Making changes to infrastructure + +Now that you understand how the modules are structured, see [making changes to infrastructure](./making-infra-changes.md). diff --git a/docs/infra/module-dependencies.md b/docs/infra/module-dependencies.md new file mode 100644 index 000000000..6a2ee503a --- /dev/null +++ b/docs/infra/module-dependencies.md @@ -0,0 +1,100 @@ +# Managing module dependencies + +These are the principles that guide the design of the infrastructure template. + +## Use explicit outputs and variables to connect resources across child modules in the same root module + +If a resource in module B depends on a resource in module A, and both modules are called from the same root module, then create an output in module A with the information that is needed by module B, and pass that into module B as an input variable. + +```terraform +# root-module/main.tf + +module "a" { + ... +} + +module "b" { + input = module.a.output +} +``` + +This makes the dependencies between the resources explicit: + +```mermaid +flowchart LR + +subgraph A[module A] + output +end + +subgraph B[module B] + input +end + +input -- depends on --> output +``` + +**Do not** use [data sources](https://developer.hashicorp.com/terraform/language/data-sources) to reference resource dependencies in the same root module. A data source does not represent a dependency in [terraform's dependency graph](https://developer.hashicorp.com/terraform/internals/graph), and therefore there will potentially be a race condition, as Terraform will not know that it needs to create/update the resource in module A before it creates/updates the resource in module B that depends on it. + +## Use config modules and data resources to manage dependencies between root modules + +If a resource in root module S depends on a resource in root module R, it is not possible to specify the dependency directly since the resources are managed in separate state files. In this situation, use a [data source](https://developer.hashicorp.com/terraform/language/data-sources) in module S to reference the resource in module R, and use a shared configuration module that specifies identifying information that is used both to create the resource in R and to query for the resource in S. + +```terraform +# root module R + +module "config" { + ... +} + +resource "parent" "p" { + identifier = module.config.parent_identifier +} +``` + +```terraform +# root module S + +module "config" { + ... +} + +data "parent" "p" { + identifier = module.config.parent_identifier +} + +resource "child" "c" { + input = data.parent.p.some_attribute +} +``` + +This makes the dependency explicit, but indirect. Instead of one resource directly depending on the other, both resources depend on a shared config value(s) that uniquely identifies the parent resource. If the parent resource changes, the data source will also change, triggering the appropriate change in the child resource. If identifying information about the parent resource changes, it must be done through the shared configuration module so that the data source's query remains in sync. + +```mermaid +flowchart RL + +subgraph config[config module] + config_value[config value] +end + +subgraph R[root module R] + parent[parent resource] +end + +subgraph S[root module S] + data.parent[parent data source] + child[child resource] +end + +parent -- depends on --> config_value +data.parent -- depends on --> config_value +child -- depends on --> data.parent +``` + +## When it is not feasible to create resources using static configuration values, use root module outputs and configuration scripts to manage dependencies between root modules + +In rare cases, it is not feasible to use configuration values to create a resource. In this situation, if a resource in root module S depends on a resource in root module R, create an output in R with the information that is needed by module S. Then create a configuration script for S that reads from R's output and saves the relevant information in a `.tfvars` file that S can use to specify input variables. + +One example of this is the terraform bucket name that is used by the `data.terraform_remote_state.current_image_tag` data source in the `service` module. The bucket name is generated dynamically using the current AWS user's account ID, and is therefore is not specified statically via configuration. + +This method should be used minimally as it is the least explicit and most brittle of all the methods. The dependency between modules remains implicit, and there is additional logic in shell scripts to maintain. diff --git a/docs/infra/set-up-app-build-repository.md b/docs/infra/set-up-app-build-repository.md new file mode 100644 index 000000000..fe54f4c64 --- /dev/null +++ b/docs/infra/set-up-app-build-repository.md @@ -0,0 +1,31 @@ +# Set up application build repository + +The application build repository setup process will create infrastructure resources needed to store built release candidate artifacts used to deploy the application to an environment. + +## Requirements + +Before setting up the application's build repository you'll need to have: + +1. [Set up the AWS account](./set-up-aws-account.md) + +## 1. Configure backend + +To create the tfbackend file for the build repository using the backend configuration values from your current AWS account, run + +```bash +make infra-configure-app-build-repository APP_NAME=app +``` + +Pass in the name of the app folder within `infra`. By default this is `app`. + +## 2. Create build repository resources + +Now run the following commands to create the resources, making sure to verify the plan before confirming the apply. + +```bash +make infra-update-app-build-repository APP_NAME=app +``` + +## Set up application environments + +Once you set up the deployment process, you can proceed to [set up application environments](./set-up-app-env.md) diff --git a/docs/infra/set-up-app-env.md b/docs/infra/set-up-app-env.md new file mode 100644 index 000000000..ca55adddc --- /dev/null +++ b/docs/infra/set-up-app-env.md @@ -0,0 +1,51 @@ +# Set up application environment + +The application environment setup process will: + +1. Configure a new application environment and create the infrastructure resources for the application in that environment + +## Requirements + +Before setting up the application's environments you'll need to have: + +1. [A compatible application in the app folder](./application-requirements.md) +2. [Configure the app](/infra/app/app-config/main.tf). Make sure you update `has_database` to `true` or `false` depending on whether or not your application has a database to integrate with. +3. (If the application has a database) [Set up the database for the application](./set-up-database.md) +4. [Set up the application build repository](./set-up-app-build-repository.md) + +## 1. Configure backend + +To create the tfbackend and tfvars files for the new application environment, run + +```bash +make infra-configure-app-service APP_NAME=app ENVIRONMENT= +``` + +`APP_NAME` needs to be the name of the application folder within the `infra` folder. It defaults to `app`. +`ENVIRONMENT` needs to be the name of the environment you are creating. This will create a file called `.s3.tfbackend` in the `infra/app/service` module directory. + +Depending on the value of `has_database` in the [app-config module](/infra/app/app-config/main.tf), the application will be configured with or without database access. + +## 2. Build and publish the application to the application build repository + +Before creating the application resources, you'll need to first build and publish at least one image to the application build repository. + +There are two ways to do this: + +1. Trigger the "Build and Publish" workflow from your repo's GitHub Actions tab. This option requires that the `role-to-assume` GitHub workflow variable has already been setup as part of the overall infra account setup process. +1. Alternatively, run the following from the root directory. This option can take much longer than the GitHub workflow, depending on your machine's architecture. + + ```bash + make release-build + make release-publish + ``` + +Copy the image tag name that was published. You'll need this in the next step. + +## 3. Create application resources with the image tag that was published + +Now run the following commands to create the resources, using the image tag that was published from the previous step. Review the terraform before confirming "yes" to apply the changes. + +```bash +TF_CLI_ARGS_apply="-var=image_tag=" make infra-update-app-service APP_NAME=app ENVIRONMENT= +``` diff --git a/docs/infra/set-up-aws-account.md b/docs/infra/set-up-aws-account.md new file mode 100644 index 000000000..a65abcf40 --- /dev/null +++ b/docs/infra/set-up-aws-account.md @@ -0,0 +1,57 @@ +# Set up AWS account + +The AWS account setup process will: + +1. Create the [Terraform backend](https://www.terraform.io/language/settings/backends/configuration) resources needed to store Terraform's infrastructure state files. The project uses an [S3 backend](https://www.terraform.io/language/settings/backends/s3). +2. Create the [OpenID connect provider in AWS](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html) to allow GitHub actions to access AWS account resources. + +## Prerequisites + +* You'll need to have [set up infrastructure tools](./set-up-infrastructure-tools.md), like Terraform, AWS CLI, and AWS authentication. +* You'll also need to make sure the [project is configured](/infra/project-config/main.tf). + +## Overview of Terraform backend management + +The approach to backend management allows Terraform to both create the resources needed for a remote backend as well as allow terraform to store that configuration state in that newly created backend. This also allows us to seperate infrastructure required to support terraform from infrastructure required to support the application. Because each backend, bootstrap or environment, stores their own terraform.tfstate in these buckets, ensure that any backends that are shared use a unique key. When using a non-default workspace, the state path will be `/workspace_key_prefix/workspace_name/key`, `workspace_key_prefix` default is `env:` + +## Instructions + +### 1. Make sure you're authenticated into the AWS account you want to configure + +The account set up sets up whatever account you're authenticated into. To see which account that is, run + +```bash +aws sts get-caller-identity +``` + +To see a more human readable account alias instead of the account, run + +```bash +aws iam list-account-aliases +``` + +### 2. Create backend resources and tfbackend config file + +Run the following command, replacing `` with a human readable name for the AWS account that you're authenticated into. The account name will be used to prefix the created tfbackend file so that it's easier to visually identify as opposed to identifying the file using the account id. For example, you have an account per environment, the account name can be the name of the environment (e.g. "prod" or "staging"). Or if you are setting up an account for all lower environments, account name can be "lowers". If your AWS account has an account alias, you can also use that. + +```bash +make infra-set-up-account ACCOUNT_NAME= +``` + +This command will create the S3 tfstate bucket and the GitHub OIDC provider. It will also create a `[account name].[account id].s3.tfbackend` file in the `infra/accounts` directory. + +### 3. Update the account names map in app-config + +In [app-config/main.tf](/infra/app/app-config/main.tf), update the `account_names_by_environment` config to reflect the account name you chose. + +## Making changes to the account + +If you make changes to the account terraform and want to apply those changes, run + +```bash +make infra-update-current-account +``` + +## Destroying infrastructure + +To undeploy and destroy infrastructure, see [instructions on destroying infrastructure](./destroy-infrastructure.md). diff --git a/docs/infra/set-up-database.md b/docs/infra/set-up-database.md new file mode 100644 index 000000000..00ce59587 --- /dev/null +++ b/docs/infra/set-up-database.md @@ -0,0 +1,82 @@ +# Set up database + +The database setup process will: + +1. Configure and deploy an application database cluster using [Amazon Aurora Serverless V2](https://aws.amazon.com/rds/aurora/serverless/) +2. Create a [PostgreSQL schema](https://www.postgresql.org/docs/current/ddl-schemas.html) `app` to contain tables used by the application. +3. Create an IAM policy that allows IAM roles with that policy attached to [connect to the database using IAM authentication](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.Connecting.html) +4. Create an [AWS Lambda function](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html), the "role manager", for provisioning the [PostgreSQL database users](https://www.postgresql.org/docs/8.0/user-manag.html) that will be used by the application service and by the migrations task. +5. Invoke the role manager function to create the `app` and `migrator` Postgres users. + +## Requirements + +Before setting up the database you'll need to have: + +1. [Set up the AWS account](./set-up-aws-account.md) +2. pip installed (pip is needed to download dependencies for the role manager Lambda function) + +## 1. Configure backend + +To create the tfbackend file for the new application environment, run + +```bash +make infra-configure-app-database APP_NAME= ENVIRONMENT= +``` + +`APP_NAME` needs to be the name of the application folder within the `infra` folder. By default, this is `app`. +`ENVIRONMENT` needs to be the name of the environment you are creating. This will create a file called `.s3.tfbackend` in the `infra/app/service` module directory. + +## 2. Create database resources + +Now run the following commands to create the resources. Review the terraform before confirming "yes" to apply the changes. This can take over 5 minutes. + +```bash +make infra-update-app-database APP_NAME=app ENVIRONMENT= +``` + +## 3. Create Postgres users + +Trigger the role manager Lambda function that was created in the previous step in order to create the application and migrator Postgres users. + +```bash +make infra-update-app-database-roles APP_NAME=app ENVIRONMENT= +``` + +The Lambda function's response should describe the resulting PostgreSQL roles and groups that are configured in the database. It should look like a minified version of the following: + +```json +{ + "roles": [ + "postgres", + "migrator", + "app" + ], + "roles_with_groups": { + "rds_superuser": "rds_password", + "pg_monitor": "pg_read_all_settings,pg_read_all_stats,pg_stat_scan_tables", + "postgres": "rds_superuser", + "app": "rds_iam", + "migrator": "rds_iam" + }, + "schema_privileges": { + "public": "{postgres=UC/postgres,=UC/postgres}", + "app": "{migrator=UC/migrator,app=U/migrator}" + } +} +``` + +### Important note on Postgres table permissions + +Before creating migrations that create tables, first create a migration that includes the following SQL command (or equivalent if your migrations are written in a general purpose programming language): + +```sql +ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO app +``` + +This will cause all future tables created by the `migrator` user to automatically be accessible by the `app` user. See the [Postgres docs on ALTER DEFAULT PRIVILEGES](https://www.postgresql.org/docs/current/sql-alterdefaultprivileges.html) for more info. As an example see the example app's migrations file [migrations.sql](/app/migrations.sql). + +Why is this needed? The reason is because the `migrator` role will be used by the migration task to run database migrations (creating tables, altering tables, etc.), while the `app` role will be used by the web service to access the database. Moreover, in Postgres, new tables won't automatically be accessible by roles other than the creator unless specifically granted, even if those other roles have usage access to the schema that the tables are created in. In other words if the `migrator` user created a new table `foo` in the `app` schema, the `app` user will not have automatically be able to access it by default. + +## Set up application environments + +Once you set up the deployment process, you can proceed to [set up the application service](./set-up-app-env.md) diff --git a/docs/infra/set-up-infrastructure-tools.md b/docs/infra/set-up-infrastructure-tools.md new file mode 100644 index 000000000..ecc82a6b1 --- /dev/null +++ b/docs/infra/set-up-infrastructure-tools.md @@ -0,0 +1,88 @@ +# Set up infrastructure developer tools + +If you are contributing to infrastructure, you will need to complete these setup steps. + +## Prerequisites + +### Install Terraform + +[Terraform](https://www.terraform.io/) is an infrastructure as code (IaC) tool that allows you to build, change, and version infrastructure safely and efficiently. This includes both low-level components like compute instances, storage, and networking, as well as high-level components like DNS entries and SaaS features. + +You may need different versions of Terraform since different projects may require different versions of Terraform. The best way to manage Terraform versions is with [Terraform Version Manager](https://github.com/tfutils/tfenv). + +To install via [Homebrew](https://brew.sh/) + +```bash +brew install tfenv +``` + +Then install the version of Terraform you need. + +```bash +tfenv install 1.4.6 +``` + +If you are unfamiliar with Terraform, check out this [basic introduction to Terraform](./introduction-to-terraform.md). + +### Install AWS CLI + +The [AWS Command Line Interface (AWS CLI)](https://aws.amazon.com/cli/) is a unified tool to manage your AWS services. With just one tool to download and configure, you can control multiple AWS services from the command line and automate them through scripts. Install the AWS commmand line tool by following the instructions found here: + +- [Install AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) + +### Install Go + +The [Go programming language](https://go.dev/dl/) is required to run [Terratest](https://terratest.gruntwork.io/), the unit test framework for Terraform. + +### Install GitHub CLI + +The [GitHub CLI](https://cli.github.com/) is useful for automating certain operations for GitHub such as with GitHub actions. + +```bash +brew install gh +``` + +## AWS Authentication + +In order for Terraform to authenticate with your accounts you will need to configure your aws credentials using the AWS CLI or manually create your config and credentials file. If you need to manage multiple credentials or create named profiles for use with different environments you can add the `--profile` option. + +There are multiple ways to authenticate, but we recommend creating a separate profile for your project in your AWS credentials file, and setting your local environment variable `AWS_PROFILE` to the profile name. We recommend using [direnv](https://direnv.net/) to manage local environment variables. +**Credentials should be located in ~/.aws/credentials** (Linux & Mac) or **%USERPROFILE%\.aws\credentials** (Windows) + +### Examples + +```bash +$ aws configure +AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE +AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +Default region name [None]: us-east-2 +Default output format [None]: json +``` + +**Using the above command will create a [default] profile.** + +```bash +$ aws configure --profile dev +AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE +AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +Default region name [None]: us-east-2 +Default output format [None]: json +``` + +**Using the above command will create a [dev] profile.** + +Once you're done, verify access by running the following command to print out information about the AWS IAM user you authenticated as. + +```bash +aws sts get-caller-identity +``` + +### References + +- [Configuration basics][1] +- [Named profiles for the AWS CLI][2] +- [Configuration and credential file settings][3] + +[1]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html +[2]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html +[3]: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html diff --git a/docs/infra/vulnerability-management.md b/docs/infra/vulnerability-management.md new file mode 100644 index 000000000..1165db966 --- /dev/null +++ b/docs/infra/vulnerability-management.md @@ -0,0 +1,35 @@ +# Vulnerability Management for Docker Images +This repository contains a GitHub workflow that allows you to scan Docker images for vulnerabilities. The workflow, named `ci-vulnerability-scans` is located in the directory `.github/workflows`. The goal in scanning the image before pushing it to the repository is so that you can catch any vulnerabilities before deploying the image, ECR scanning takes time and the image can still be used even with vulnerabilities found by Inspector. Also, if you use `scratch` as a base image, ECR is unable to scan the image when it is pushed, which is a known issue. + +A way to ensure that there are smaller surface areas for vulnerabilities, follow this method of building images +- Build base image with required packages, name it something like `build` +- Configure app build from the image in the previous step, name it something like `app-build` +- Create a final image from `scratch` named `release` (ie `from scratch as release`), and copy any needed directories from the `app-build` image + +``` +FROM ... AS build +# Do base installs for dev and app-build here +FROM build AS dev +# Local dev installs only +FROM build AS app-build +# All installs for the release image +# Any tweaks needed for the release image +FROM scratch AS release +# Copy over the files from app-build +# COPY --from=app-build /app-build/paths/to/files /release/paths/to/files +``` + +By following this method, your deployment image will have the minimum required directories and files, it will shrink the overall image size, and reduce findings + +## How to use Workflow +The workflow will run whenever there is a push to a PR or when merged to main if there are changes in the `app` direcotry. It is scanning in both cases to ensure there is no issues if a PR is approved on a Friday, but isn't merged till Monday - a CVE could have been found in the time between the last run and the merge. + +## Notes about Scanners +### Hadolint +The trivy scanner allows you to ignore or safelist certain findings, which can be specified in the [.hadolint.yaml](../../.hadolint.yaml) file. There is a template file here that you can use in your repo. +### Trivy +The trivy scanner allows you to ignore or safelist certain findings, which can be specified in the [.trivyignore](../../.trivyignore) file. There is a template file here that you can use in your repo. +### Anchore +The anchore scanner allows you to ignore or safelist certain findings, which can be specified in the [.grype.yml](../../.grype.yml) file. There is a template file here that you can use in your repo. There are flags set to ignore findings that are in the state `not-fixed`, `wont-fix`, and `unknown`. +### Dockle +The dockle scanner action does not have the ability to use an ignore or safelist findings file, but is able to by specifying an allow file, or `DOCKLE_ACCEPT_FILES`, environmental variable. To get around this, there is a step before the dockle scan is ran to check for a file named [.dockleconfig](../../.dockleconfig), and pipe it to the environmental variable if it exists. Note that this will not ignore finding types like the other scanner's ignore file, but ignore the file specified in the list diff --git a/docs/releases.md b/docs/releases.md new file mode 100644 index 000000000..68a05e62b --- /dev/null +++ b/docs/releases.md @@ -0,0 +1,21 @@ +# Release Management + +## Building a release + +To build a release, run + +```bash +make release-build +``` + +This builds the release from [app/Dockerfile](../app/Dockerfile). The Dockerfile +needs to have a build stage called `release` to act as the build target. +(See [Name your build stages](https://docs.docker.com/build/building/multi-stage/#name-your-build-stages)) + +## Publishing a release + +TODO + +## Deploying a release + +TODO diff --git a/infra/.gitignore b/infra/.gitignore new file mode 100644 index 000000000..2cafcb4d8 --- /dev/null +++ b/infra/.gitignore @@ -0,0 +1,28 @@ +# Local .terraform metadata +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* +*.plan +*.tfstate + +# Ignore archives used for deploying lambdas +*.zip diff --git a/infra/accounts/main.tf b/infra/accounts/main.tf new file mode 100644 index 000000000..1ed35c5df --- /dev/null +++ b/infra/accounts/main.tf @@ -0,0 +1,53 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + +locals { + # This must match the name of the bucket created while bootstrapping the account in set-up-current-account.sh + tf_state_bucket_name = "${module.project_config.project_name}-${data.aws_caller_identity.current.account_id}-${data.aws_region.current.name}-tf" + + # Choose the region where this infrastructure should be deployed. + region = module.project_config.default_region + + # Set project tags that will be used to tag all resources. + tags = merge(module.project_config.default_tags, { + description = "Backend resources required for terraform state management and GitHub authentication with AWS." + }) +} + +terraform { + + required_version = ">= 1.2.0, < 2.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>5.6.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = local.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../project-config" +} + +module "backend" { + source = "../modules/terraform-backend-s3" + name = local.tf_state_bucket_name +} + +module "auth_github_actions" { + source = "../modules/auth-github-actions" + github_actions_role_name = module.project_config.github_actions_role_name + github_repository = module.project_config.code_repository +} diff --git a/infra/accounts/outputs.tf b/infra/accounts/outputs.tf new file mode 100644 index 000000000..2ec8af03a --- /dev/null +++ b/infra/accounts/outputs.tf @@ -0,0 +1,23 @@ +output "project_name" { + value = module.project_config.project_name +} + +output "account_id" { + value = data.aws_caller_identity.current.account_id +} + +output "region" { + value = data.aws_region.current.name +} + +output "tf_state_bucket_name" { + value = module.backend.tf_state_bucket_name +} + +output "tf_log_bucket_name" { + value = module.backend.tf_log_bucket_name +} + +output "tf_locks_table_name" { + value = module.backend.tf_locks_table_name +} diff --git a/infra/example.s3.tfbackend b/infra/example.s3.tfbackend new file mode 100644 index 000000000..c71af2534 --- /dev/null +++ b/infra/example.s3.tfbackend @@ -0,0 +1,4 @@ +bucket = "" +key = "" +dynamodb_table = "" +region = "" diff --git a/infra/frontend/app-config/env-config/outputs.tf b/infra/frontend/app-config/env-config/outputs.tf new file mode 100644 index 000000000..113da908b --- /dev/null +++ b/infra/frontend/app-config/env-config/outputs.tf @@ -0,0 +1,15 @@ +output "database_config" { + value = var.has_database ? { + cluster_name = "${var.app_name}-${var.environment}" + access_policy_name = "${var.app_name}-${var.environment}-db-access" + app_username = "app" + migrator_username = "migrator" + schema_name = var.app_name + } : null +} + +output "incident_management_service_integration" { + value = var.has_incident_management_service ? { + integration_url_param_name = "/monitoring/${var.app_name}/${var.environment}/incident-management-integration-url" + } : null +} diff --git a/infra/frontend/app-config/env-config/variables.tf b/infra/frontend/app-config/env-config/variables.tf new file mode 100644 index 000000000..2b7fb1857 --- /dev/null +++ b/infra/frontend/app-config/env-config/variables.tf @@ -0,0 +1,16 @@ +variable "app_name" { + type = string +} + +variable "environment" { + description = "name of the application environment (e.g. dev, staging, prod)" + type = string +} + +variable "has_database" { + type = bool +} + +variable "has_incident_management_service" { + type = bool +} diff --git a/infra/frontend/app-config/main.tf b/infra/frontend/app-config/main.tf new file mode 100644 index 000000000..a68076375 --- /dev/null +++ b/infra/frontend/app-config/main.tf @@ -0,0 +1,59 @@ +locals { + app_name = "app" + environments = ["dev", "staging", "prod"] + project_name = module.project_config.project_name + image_repository_name = "${local.project_name}-${local.app_name}" + has_database = false + has_incident_management_service = false + environment_configs = { for environment in local.environments : environment => module.env_config[environment] } + + # Map from environment name to the account name for the AWS account that + # contains the resources for that environment. Resources that are shared + # across environments use the key "shared". + # The list of configured AWS accounts can be found in /infra/account + # by looking for the backend config files of the form: + # ..s3.tfbackend + # + # Projects/applications that use the same AWS account for all environments + # will refer to the same account for all environments. For example, if the + # project has a single account named "myaccount", then infra/accounts will + # have one tfbackend file myaccount.XXXXX.s3.tfbackend, and the + # account_names_by_environment map will look like: + # + # account_names_by_environment = { + # shared = "myaccount" + # dev = "myaccount" + # staging = "myaccount" + # prod = "myaccount" + # } + # + # Projects/applications that have separate AWS accounts for each environment + # might have a map that looks more like this: + # + # account_names_by_environment = { + # shared = "dev" + # dev = "dev" + # staging = "staging" + # prod = "prod" + # } + account_names_by_environment = { + shared = "dev" + dev = "dev" + staging = "staging" + prod = "prod" + } +} + +module "project_config" { + source = "../../project-config" +} + +module "env_config" { + for_each = toset(local.environments) + + source = "./env-config" + app_name = local.app_name + environment = each.key + has_database = local.has_database + has_incident_management_service = local.has_incident_management_service +} diff --git a/infra/frontend/app-config/outputs.tf b/infra/frontend/app-config/outputs.tf new file mode 100644 index 000000000..36741d145 --- /dev/null +++ b/infra/frontend/app-config/outputs.tf @@ -0,0 +1,27 @@ +output "app_name" { + value = local.app_name +} + +output "account_names_by_environment" { + value = local.account_names_by_environment +} + +output "environments" { + value = local.environments +} + +output "has_database" { + value = local.has_database +} + +output "has_incident_management_service" { + value = local.has_incident_management_service +} + +output "image_repository_name" { + value = local.image_repository_name +} + +output "environment_configs" { + value = local.environment_configs +} diff --git a/infra/frontend/build-repository/.terraform.lock.hcl b/infra/frontend/build-repository/.terraform.lock.hcl new file mode 100644 index 000000000..43cfe95a2 --- /dev/null +++ b/infra/frontend/build-repository/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.20.1" + constraints = "~> 4.20.1" + hashes = [ + "h1:1JbjdrwUCLTNVVhlE+acEPnJFJ/FqBTHy5Ooll6nwjI=", + "zh:21d064d8fac08376c633e002e2f36e83eb7958535e251831feaf38f51c49dafd", + "zh:3a37912ff43d89ce8d559ec86265d7506801bccb380c7cfb896e8ff24e3fe79d", + "zh:795eb175c85279ec51dbe12e4d1afa0860c2c0b22e5d36a8e8869f60a93b7931", + "zh:8afb61a18b17f8ff249cb23e9d3b5d2530944001ef1d56c1d53f41b0890c7ab8", + "zh:911701040395e0e4da4b7252279e7cf1593cdd26f22835e1a9eddbdb9691a1a7", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:a46d54a6a5407f569f8178e916af888b2b268f86448c64cad165dc89759c8399", + "zh:c5f71fd5e3519a24fd6af455ef1c26a559cfdde7f626b0afbd2a73bb79f036b1", + "zh:df3b69d6c9b0cdc7e3f90ee08412b22332c32e97ad8ce6ccad528f89f235a7d3", + "zh:e99d6a64c03549d60c2accf792fa04466cfb317f72e895c8f67eff8a02920887", + "zh:eea7a0df8bcb69925c9ce8e15ef403c8bbf16d46c43e8f5607b116531d1bce4a", + "zh:f6a26ce77f7db1d50ce311e32902fd001fb365e5e45e47a9a5cd59d734c89cb6", + ] +} diff --git a/infra/frontend/build-repository/example.tfvars b/infra/frontend/build-repository/example.tfvars new file mode 100644 index 000000000..2063158f7 --- /dev/null +++ b/infra/frontend/build-repository/example.tfvars @@ -0,0 +1,2 @@ +app_environment_account_ids = [] +region = "" diff --git a/infra/frontend/build-repository/main.tf b/infra/frontend/build-repository/main.tf new file mode 100644 index 000000000..b615f00ac --- /dev/null +++ b/infra/frontend/build-repository/main.tf @@ -0,0 +1,49 @@ +data "aws_iam_role" "github_actions" { + name = module.project_config.github_actions_role_name +} + +locals { + # Set project tags that will be used to tag all resources. + tags = merge(module.project_config.default_tags, { + application = module.app_config.app_name + application_role = "build-repository" + description = "Backend resources required for storing built release candidate artifacts to be used for deploying to environments." + }) +} + +terraform { + required_version = ">= 1.2.0, < 2.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>4.20.1" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = var.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +module "container_image_repository" { + source = "../../modules/container-image-repository" + name = module.app_config.image_repository_name + push_access_role_arn = data.aws_iam_role.github_actions.arn + app_account_ids = var.app_environment_account_ids +} diff --git a/infra/frontend/build-repository/terraform.tfvars b/infra/frontend/build-repository/terraform.tfvars new file mode 100644 index 000000000..feb40425b --- /dev/null +++ b/infra/frontend/build-repository/terraform.tfvars @@ -0,0 +1 @@ +app_environment_account_ids = [] diff --git a/infra/frontend/build-repository/variables.tf b/infra/frontend/build-repository/variables.tf new file mode 100644 index 000000000..45ef60319 --- /dev/null +++ b/infra/frontend/build-repository/variables.tf @@ -0,0 +1,8 @@ +variable "app_environment_account_ids" { + type = list(string) + description = "List of AWS account ids for the application's environments. This is used to allow environments pull images from the container image repository." +} + +variable "region" { + type = string +} diff --git a/infra/frontend/database/example.tfvars b/infra/frontend/database/example.tfvars new file mode 100644 index 000000000..757d768ac --- /dev/null +++ b/infra/frontend/database/example.tfvars @@ -0,0 +1,2 @@ +environment_name = "" +region = "" diff --git a/infra/frontend/database/main.tf b/infra/frontend/database/main.tf new file mode 100644 index 000000000..fd43c3f3b --- /dev/null +++ b/infra/frontend/database/main.tf @@ -0,0 +1,72 @@ +# TODO(https://github.com/navapbc/template-infra/issues/152) use non-default VPC +data "aws_vpc" "default" { + default = true +} + +# TODO(https://github.com/navapbc/template-infra/issues/152) use private subnets +data "aws_subnets" "default" { + filter { + name = "default-for-az" + values = [true] + } +} + + +locals { + # The prefix key/value pair is used for Terraform Workspaces, which is useful for projects with multiple infrastructure developers. + # By default, Terraform creates a workspace named “default.” If a non-default workspace is not created this prefix will equal “default”, + # if you choose not to use workspaces set this value to "dev" + prefix = terraform.workspace == "default" ? "" : "${terraform.workspace}-" + + # Add environment specific tags + tags = merge(module.project_config.default_tags, { + environment = var.environment_name + description = "Database resources for the ${var.environment_name} environment" + }) + + environment_config = module.app_config.environment_configs[var.environment_name] + database_config = local.environment_config.database_config +} + +terraform { + required_version = ">=1.4.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~>4.67.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = var.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +module "database" { + source = "../../modules/database" + + name = "${local.prefix}${local.database_config.cluster_name}" + access_policy_name = "${local.prefix}${local.database_config.access_policy_name}" + app_username = "${local.prefix}${local.database_config.app_username}" + migrator_username = "${local.prefix}${local.database_config.migrator_username}" + schema_name = "${local.prefix}${local.database_config.schema_name}" + + vpc_id = data.aws_vpc.default.id + private_subnet_ids = data.aws_subnets.default.ids +} diff --git a/infra/frontend/database/outputs.tf b/infra/frontend/database/outputs.tf new file mode 100644 index 000000000..927b820a9 --- /dev/null +++ b/infra/frontend/database/outputs.tf @@ -0,0 +1,3 @@ +output "role_manager_function_name" { + value = module.database.role_manager_function_name +} diff --git a/infra/frontend/database/variables.tf b/infra/frontend/database/variables.tf new file mode 100644 index 000000000..160a2bce8 --- /dev/null +++ b/infra/frontend/database/variables.tf @@ -0,0 +1,8 @@ +variable "environment_name" { + type = string + description = "name of the application environment" +} + +variable "region" { + type = string +} diff --git a/infra/frontend/service/example.tfvars b/infra/frontend/service/example.tfvars new file mode 100644 index 000000000..d2d79d550 --- /dev/null +++ b/infra/frontend/service/example.tfvars @@ -0,0 +1,4 @@ +environment_name = "" +tfstate_bucket = "" +tfstate_key = "" +region = "" diff --git a/infra/frontend/service/image_tag.tf b/infra/frontend/service/image_tag.tf new file mode 100644 index 000000000..923487e75 --- /dev/null +++ b/infra/frontend/service/image_tag.tf @@ -0,0 +1,37 @@ +# Make the "image_tag" variable optional so that "terraform plan" +# and "terraform apply" work without any required variables. +# +# This works as follows: + +# 1. Accept an optional variable during a terraform plan/apply. (see "image_tag" variable in variables.tf) + +# 2. Read the output used from the last terraform state using "terraform_remote_state". +data "terraform_remote_state" "current_image_tag" { + # Don't do a lookup if image_tag is provided explicitly. + # This saves some time and also allows us to do a first deploy, + # where the tfstate file does not yet exist. + count = var.image_tag == null ? 1 : 0 + backend = "s3" + + config = { + bucket = var.tfstate_bucket + key = var.tfstate_key + region = var.region + } + + defaults = { + image_tag = null + } +} + +# 3. Prefer the given variable if provided, otherwise default to the value from last time. +locals { + image_tag = (var.image_tag == null + ? data.terraform_remote_state.current_image_tag[0].outputs.image_tag + : var.image_tag) +} + +# 4. Store the final value used as a terraform output for next time. +output "image_tag" { + value = local.image_tag +} diff --git a/infra/frontend/service/main.tf b/infra/frontend/service/main.tf new file mode 100644 index 000000000..4c69841bc --- /dev/null +++ b/infra/frontend/service/main.tf @@ -0,0 +1,111 @@ +# TODO(https://github.com/navapbc/template-infra/issues/152) use non-default VPC +data "aws_vpc" "default" { + default = true +} + +# TODO(https://github.com/navapbc/template-infra/issues/152) use private subnets +data "aws_subnets" "default" { + filter { + name = "default-for-az" + values = [true] + } +} + + +locals { + # The prefix key/value pair is used for Terraform Workspaces, which is useful for projects with multiple infrastructure developers. + # By default, Terraform creates a workspace named “default.” If a non-default workspace is not created this prefix will equal “default”, + # if you choose not to use workspaces set this value to "dev" + prefix = terraform.workspace == "default" ? "" : "${terraform.workspace}-" + + # Add environment specific tags + tags = merge(module.project_config.default_tags, { + environment = var.environment_name + description = "Application resources created in ${var.environment_name} environment" + }) + + service_name = "${local.prefix}${module.app_config.app_name}-${var.environment_name}" + + environment_config = module.app_config.environment_configs[var.environment_name] + database_config = local.environment_config.database_config + incident_management_service_integration_config = local.environment_config.incident_management_service_integration +} + +terraform { + required_version = ">= 1.2.0, < 2.0.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.56.0, < 5.0.0" + } + } + + backend "s3" { + encrypt = "true" + } +} + +provider "aws" { + region = var.region + default_tags { + tags = local.tags + } +} + +module "project_config" { + source = "../../project-config" +} + +module "app_config" { + source = "../app-config" +} + +data "aws_rds_cluster" "db_cluster" { + count = module.app_config.has_database ? 1 : 0 + cluster_identifier = local.database_config.cluster_name +} + +data "aws_iam_policy" "db_access_policy" { + count = module.app_config.has_database ? 1 : 0 + name = local.database_config.access_policy_name +} + +# Retrieve url for external incident management tool (e.g. Pagerduty, Splunk-On-Call) + +data "aws_ssm_parameter" "incident_management_service_integration_url" { + count = module.app_config.has_incident_management_service ? 1 : 0 + name = local.incident_management_service_integration_config.integration_url_param_name +} + +module "service" { + source = "../../modules/service" + service_name = local.service_name + image_repository_name = module.app_config.image_repository_name + image_tag = local.image_tag + vpc_id = data.aws_vpc.default.id + subnet_ids = data.aws_subnets.default.ids + + db_vars = module.app_config.has_database ? { + security_group_ids = data.aws_rds_cluster.db_cluster[0].vpc_security_group_ids + access_policy_arn = data.aws_iam_policy.db_access_policy[0].arn + connection_info = { + host = data.aws_rds_cluster.db_cluster[0].endpoint + port = data.aws_rds_cluster.db_cluster[0].port + user = local.database_config.app_username + db_name = data.aws_rds_cluster.db_cluster[0].database_name + schema_name = local.database_config.schema_name + } + } : null +} + +module "monitoring" { + source = "../../modules/monitoring" + #Email subscription list: + #email_alerts_subscription_list = ["email1@email.com", "email2@email.com"] + + # Module takes service and ALB names to link all alerts with corresponding targets + service_name = local.service_name + load_balancer_arn_suffix = module.service.load_balancer_arn_suffix + incident_management_service_integration_url = module.app_config.has_incident_management_service ? data.aws_ssm_parameter.incident_management_service_integration_url[0].value : null +} diff --git a/infra/frontend/service/outputs.tf b/infra/frontend/service/outputs.tf new file mode 100644 index 000000000..dfabf49e6 --- /dev/null +++ b/infra/frontend/service/outputs.tf @@ -0,0 +1,12 @@ +output "service_endpoint" { + description = "The public endpoint for the service." + value = module.service.public_endpoint +} + +output "service_cluster_name" { + value = module.service.cluster_name +} + +output "service_name" { + value = local.service_name +} diff --git a/infra/frontend/service/variables.tf b/infra/frontend/service/variables.tf new file mode 100644 index 000000000..0c9be917e --- /dev/null +++ b/infra/frontend/service/variables.tf @@ -0,0 +1,22 @@ +variable "environment_name" { + type = string + description = "name of the application environment" +} + +variable "image_tag" { + type = string + description = "image tag to deploy to the environment" + default = null +} + +variable "tfstate_bucket" { + type = string +} + +variable "tfstate_key" { + type = string +} + +variable "region" { + type = string +} diff --git a/infra/modules/auth-github-actions/README.md b/infra/modules/auth-github-actions/README.md new file mode 100644 index 000000000..b516b0532 --- /dev/null +++ b/infra/modules/auth-github-actions/README.md @@ -0,0 +1,16 @@ +# AWS Federation for GitHub Actions + +This module sets up a way for GitHub Actions to access AWS resources using short-lived credentials without requiring long-lived access keys and without requiring separate AWS identities that need to be managed. It does that by doing the following: + +1. Set up GitHub as an OpenID Connect Provider in the AWS account +2. Create an IAM role that GitHub actions will assume +3. Attach an IAM policy to the GitHub actions role that provides the necessary access to AWS account resources. By default this module will provide the [AWS managed Developer power user access policy `PowerUserAccess`](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html) + +## Related Implementations + +Similar functionality is also implemented in the [oidc-github module in the Terraform Registry](https://registry.terraform.io/modules/unfunco/oidc-github/aws/latest) (see also [Nava's fork of that repo](https://github.com/navapbc/terraform-aws-oidc-github)), but since IAM is sensitive we chose to implement it ourselves to keep the module simple, easy to understand, and in a place that's within our scope of control. + +## Reference + +* [AWS - Creating OpenID Connect (OIDC) Providers](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html) +* [GitHub - Security Hardening with OpenID Connect](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect) diff --git a/infra/modules/auth-github-actions/main.tf b/infra/modules/auth-github-actions/main.tf new file mode 100644 index 000000000..0ef271ea5 --- /dev/null +++ b/infra/modules/auth-github-actions/main.tf @@ -0,0 +1,58 @@ +# Set up GitHub's OpenID Connect provider in AWS account +resource "aws_iam_openid_connect_provider" "github" { + url = "https://token.actions.githubusercontent.com" + client_id_list = ["sts.amazonaws.com"] + + # AWS already trusts the GitHub OIDC identity provider's library of root certificate authorities + # so no thumbprints from intermediate certificates are needed + # At the time of writing (July 12, 2023), the thumbprint_list parameter + # is required to be a non-empty array, so we are passing an array with a dummy string that passes validation + # TODO(https://github.com/navapbc/template-infra/issues/350) Remove this parameter thumbprint_list is no + # longer required (see https://github.com/hashicorp/terraform-provider-aws/issues/32480) + thumbprint_list = ["0000000000000000000000000000000000000000"] +} + +# Create IAM role for GitHub Actions +resource "aws_iam_role" "github_actions" { + name = var.github_actions_role_name + description = "Service role required for Github Action to deploy application resources into the account." + assume_role_policy = data.aws_iam_policy_document.github_assume_role.json +} + +# Attach access policies to GitHub Actions role +resource "aws_iam_role_policy_attachment" "custom" { + count = length(var.iam_role_policy_arns) + + # TODO(https://github.com/navapbc/template-infra/issues/194) Set permissions for GitHub Actions role + # checkov:skip=CKV_AWS_274:Replace default policy of AdministratorAccess with finer grained permissions + + role = aws_iam_role.github_actions.name + policy_arn = var.iam_role_policy_arns[count.index] +} + +# Set up assume role policy for GitHub Actions to allow GitHub actions +# running from the specified repository and branches/git refs to assume +# the role +data "aws_iam_policy_document" "github_assume_role" { + statement { + effect = "Allow" + actions = ["sts:AssumeRoleWithWebIdentity"] + + principals { + type = "Federated" + identifiers = [aws_iam_openid_connect_provider.github.arn] + } + + condition { + test = "StringEquals" + variable = "token.actions.githubusercontent.com:aud" + values = ["sts.amazonaws.com"] + } + + condition { + test = "StringLike" + variable = "token.actions.githubusercontent.com:sub" + values = ["repo:${var.github_repository}:*"] + } + } +} diff --git a/infra/modules/auth-github-actions/variables.tf b/infra/modules/auth-github-actions/variables.tf new file mode 100644 index 000000000..ce6546e9d --- /dev/null +++ b/infra/modules/auth-github-actions/variables.tf @@ -0,0 +1,15 @@ +variable "github_actions_role_name" { + type = string + description = "The name to use for the IAM role GitHub actions will assume." +} + +variable "github_repository" { + type = string + description = "The GitHub repository in 'org/repo' format to provide access to AWS account resources. Example: navapbc/template-infra" +} + +variable "iam_role_policy_arns" { + type = list(string) + description = "List of IAM policy ARNs to attach to the GitHub Actions IAM role. Defaults to Developer power user access role." + default = ["arn:aws:iam::aws:policy/AdministratorAccess"] +} diff --git a/infra/modules/container-image-repository/main.tf b/infra/modules/container-image-repository/main.tf new file mode 100644 index 000000000..cf501c82e --- /dev/null +++ b/infra/modules/container-image-repository/main.tf @@ -0,0 +1,87 @@ +data "aws_region" "current" {} + +locals { + image_registry = "${aws_ecr_repository.app.registry_id}.dkr.ecr.${data.aws_region.current.name}.amazonaws.com" +} + +resource "aws_ecr_repository" "app" { + name = var.name + image_tag_mutability = "IMMUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + encryption_configuration { + encryption_type = "KMS" + kms_key = aws_kms_key.ecr_kms.arn + } +} + +resource "aws_ecr_repository_policy" "image_access" { + repository = aws_ecr_repository.app.name + policy = data.aws_iam_policy_document.image_access.json +} + +resource "aws_ecr_lifecycle_policy" "image_retention" { + repository = aws_ecr_repository.app.name + + policy = < 0 ? [true] : [] + content { + sid = "PullAccess" + effect = "Allow" + principals { + type = "AWS" + identifiers = [for account_id in var.app_account_ids : "arn:aws:iam::${account_id}:root"] + } + actions = [ + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + ] + } + } +} + +resource "aws_kms_key" "ecr_kms" { + enable_key_rotation = true + description = "KMS key for ECR repository ${var.name}" +} diff --git a/infra/modules/container-image-repository/outputs.tf b/infra/modules/container-image-repository/outputs.tf new file mode 100644 index 000000000..23b18ea59 --- /dev/null +++ b/infra/modules/container-image-repository/outputs.tf @@ -0,0 +1,11 @@ +output "image_registry" { + value = local.image_registry +} + +output "image_repository_name" { + value = aws_ecr_repository.app.name +} + +output "image_repository_url" { + value = aws_ecr_repository.app.repository_url +} diff --git a/infra/modules/container-image-repository/variables.tf b/infra/modules/container-image-repository/variables.tf new file mode 100644 index 000000000..1882ca2ae --- /dev/null +++ b/infra/modules/container-image-repository/variables.tf @@ -0,0 +1,15 @@ +variable "name" { + type = string + description = "The name of image repository." +} + +variable "push_access_role_arn" { + type = string + description = "The ARN of the role to grant push access to the repository. Use this to grant access to the role that builds and publishes release artifacts." +} + +variable "app_account_ids" { + type = list(string) + description = "A list of account ids to grant pull access to the repository. Use this to grant access to the application environment accounts in a multi-account setup." + default = [] +} diff --git a/infra/modules/database/.gitignore b/infra/modules/database/.gitignore new file mode 100644 index 000000000..ac3e6dbae --- /dev/null +++ b/infra/modules/database/.gitignore @@ -0,0 +1 @@ +/role_manager/vendor diff --git a/infra/modules/database/main.tf b/infra/modules/database/main.tf new file mode 100644 index 000000000..28de6a8e2 --- /dev/null +++ b/infra/modules/database/main.tf @@ -0,0 +1,434 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + +locals { + master_username = "postgres" + primary_instance_name = "${var.name}-primary" + role_manager_name = "${var.name}-role-manager" + role_manager_package = "${path.root}/role_manager.zip" + + # The ARN that represents the users accessing the database are of the format: "arn:aws:rds-db:::dbuser:/"" + # See https://aws.amazon.com/blogs/database/using-iam-authentication-to-connect-with-pgadmin-amazon-aurora-postgresql-or-amazon-rds-for-postgresql/ + db_user_arn_prefix = "arn:aws:rds-db:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:dbuser:${aws_rds_cluster.db.cluster_resource_id}" +} + +# Database Configuration +# ---------------------- + +resource "aws_rds_cluster" "db" { + # checkov:skip=CKV2_AWS_27:have concerns about sensitive data in logs; want better way to get this information + # checkov:skip=CKV2_AWS_8:TODO add backup selection plan using tags + + # cluster identifier is a unique identifier within the AWS account + # https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.CreateInstance.html + cluster_identifier = var.name + + engine = "aurora-postgresql" + engine_mode = "provisioned" + database_name = var.database_name + port = var.port + master_username = local.master_username + master_password = aws_ssm_parameter.random_db_password.value + storage_encrypted = true + kms_key_id = aws_kms_key.db.arn + + # checkov:skip=CKV_AWS_128:Auth decision needs to be ironed out + # checkov:skip=CKV_AWS_162:Auth decision needs to be ironed out + iam_database_authentication_enabled = true + deletion_protection = true + copy_tags_to_snapshot = true + # final_snapshot_identifier = "${var.name}-final" + skip_final_snapshot = true + + serverlessv2_scaling_configuration { + max_capacity = 1.0 + min_capacity = 0.5 + } + + vpc_security_group_ids = [aws_security_group.db.id] + + enabled_cloudwatch_logs_exports = ["postgresql"] +} + +resource "aws_rds_cluster_instance" "primary" { + identifier = local.primary_instance_name + cluster_identifier = aws_rds_cluster.db.id + instance_class = "db.serverless" + engine = aws_rds_cluster.db.engine + engine_version = aws_rds_cluster.db.engine_version + auto_minor_version_upgrade = true + monitoring_role_arn = aws_iam_role.rds_enhanced_monitoring.arn + monitoring_interval = 30 +} + +resource "random_password" "random_db_password" { + length = 48 + # Remove '@' sign from allowed characters since only printable ASCII characters besides '/', '@', '"', ' ' may be used. + override_special = "!#$%&*()-_=+[]{}<>:?" +} + +resource "aws_ssm_parameter" "random_db_password" { + name = "/db/${var.name}/master-password" + type = "SecureString" + value = random_password.random_db_password.result +} + +resource "aws_kms_key" "db" { + description = "Key for RDS cluster ${var.name}" + enable_key_rotation = true +} + +# Network Configuration +# --------------------- + +resource "aws_security_group" "db" { + name_prefix = "${var.name}-db" + description = "Database layer security group" + vpc_id = var.vpc_id +} + +resource "aws_security_group" "role_manager" { + name_prefix = "${var.name}-role-manager" + description = "Database role manager security group" + vpc_id = var.vpc_id +} + +resource "aws_vpc_security_group_egress_rule" "role_manager_egress_to_db" { + security_group_id = aws_security_group.role_manager.id + description = "Allow role manager to access database" + + from_port = 5432 + to_port = 5432 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.db.id +} + +resource "aws_vpc_security_group_ingress_rule" "db_ingress_from_role_manager" { + security_group_id = aws_security_group.db.id + description = "Allow inbound requests to database from role manager" + + from_port = 5432 + to_port = 5432 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.role_manager.id +} + +# Authentication +# -------------- + +resource "aws_iam_policy" "db_access" { + name = var.access_policy_name + policy = data.aws_iam_policy_document.db_access.json +} + +data "aws_iam_policy_document" "db_access" { + # Policy to allow connection to RDS via IAM database authentication + # which is more secure than traditional username/password authentication + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.IAMPolicy.html + statement { + actions = [ + "rds-db:connect" + ] + + resources = [ + "${local.db_user_arn_prefix}/${var.app_username}", + "${local.db_user_arn_prefix}/${var.migrator_username}", + ] + } +} + +# Database Backups +# ---------------- + +# Backup plan that defines when and how to backup and which backup vault to store backups in +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/about-backup-plans.html +resource "aws_backup_plan" "backup_plan" { + name = "${var.name}-db-backup-plan" + + rule { + rule_name = "${var.name}-db-backup-rule" + target_vault_name = aws_backup_vault.backup_vault.name + schedule = "cron(0 7 ? * SUN *)" # Run Sundays at 12pm (EST) + } +} + +# Backup vault that stores and organizes backups +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/vaults.html +resource "aws_backup_vault" "backup_vault" { + name = "${var.name}-db-backup-vault" + kms_key_arn = data.aws_kms_key.backup_vault_key.arn +} + +# KMS Key for the vault +# This key was created by AWS by default alongside the vault +data "aws_kms_key" "backup_vault_key" { + key_id = "alias/aws/backup" +} + +# Backup selection defines which resources to backup +# See https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html +# and https://docs.aws.amazon.com/aws-backup/latest/devguide/API_BackupSelection.html +resource "aws_backup_selection" "db_backup" { + name = "${var.name}-db-backup" + plan_id = aws_backup_plan.backup_plan.id + iam_role_arn = aws_iam_role.db_backup_role.arn + + resources = [ + aws_rds_cluster.db.arn + ] +} + +# Role that AWS Backup uses to authenticate when backing up the target resource +resource "aws_iam_role" "db_backup_role" { + name_prefix = "${var.name}-db-backup-role-" + assume_role_policy = data.aws_iam_policy_document.db_backup_policy.json +} + +data "aws_iam_policy_document" "db_backup_policy" { + statement { + actions = [ + "sts:AssumeRole", + ] + + effect = "Allow" + + principals { + type = "Service" + identifiers = ["backup.amazonaws.com"] + } + } +} + +resource "aws_iam_role_policy_attachment" "db_backup_role_policy_attachment" { + role = aws_iam_role.db_backup_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup" +} + +#----------------------------------# +# IAM role for enhanced monitoring # +#----------------------------------# + +resource "aws_iam_role" "rds_enhanced_monitoring" { + name_prefix = "${var.name}-enhanced-monitoring-" + assume_role_policy = data.aws_iam_policy_document.rds_enhanced_monitoring.json +} + +resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" { + role = aws_iam_role.rds_enhanced_monitoring.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole" +} + +data "aws_iam_policy_document" "rds_enhanced_monitoring" { + statement { + actions = [ + "sts:AssumeRole", + ] + + effect = "Allow" + + principals { + type = "Service" + identifiers = ["monitoring.rds.amazonaws.com"] + } + } +} + +# Query Logging +# ------------- + +resource "aws_rds_cluster_parameter_group" "rds_query_logging" { + name = var.name + family = "aurora-postgresql13" + description = "Default cluster parameter group" + + parameter { + name = "log_statement" + # Logs data definition statements (e.g. DROP, ALTER, CREATE) + value = "ddl" + } + + parameter { + name = "log_min_duration_statement" + # Logs all statements that run 100ms or longer + value = "100" + } +} + +# Role Manager Lambda Function +# ---------------------------- +# +# Resources for the lambda function that is used for managing database roles +# This includes creating and granting permissions to roles +# as well as viewing existing roles + +resource "aws_lambda_function" "role_manager" { + function_name = local.role_manager_name + + filename = local.role_manager_package + source_code_hash = data.archive_file.role_manager.output_base64sha256 + runtime = "python3.9" + handler = "role_manager.lambda_handler" + role = aws_iam_role.role_manager.arn + kms_key_arn = aws_kms_key.role_manager.arn + + # Only allow 1 concurrent execution at a time + reserved_concurrent_executions = 1 + + vpc_config { + subnet_ids = var.private_subnet_ids + security_group_ids = [aws_security_group.role_manager.id] + } + + environment { + variables = { + DB_HOST = aws_rds_cluster.db.endpoint + DB_PORT = aws_rds_cluster.db.port + DB_USER = local.master_username + DB_NAME = aws_rds_cluster.db.database_name + DB_PASSWORD_PARAM_NAME = aws_ssm_parameter.random_db_password.name + DB_SCHEMA = var.schema_name + APP_USER = var.app_username + MIGRATOR_USER = var.migrator_username + PYTHONPATH = "vendor" + } + } + + # Ensure AWS Lambda functions with tracing are enabled + # https://docs.bridgecrew.io/docs/bc_aws_serverless_4 + tracing_config { + mode = "Active" + } + + # checkov:skip=CKV_AWS_272:TODO(https://github.com/navapbc/template-infra/issues/283) + + # checkov:skip=CKV_AWS_116:Dead letter queue (DLQ) configuration is only relevant for asynchronous invocations +} + +# Installs python packages needed by the role manager lambda function before +# creating the zip archive. Reinstalls whenever requirements.txt changes +resource "terraform_data" "role_manager_python_vendor_packages" { + triggers_replace = file("${path.module}/role_manager/requirements.txt") + + provisioner "local-exec" { + command = "pip3 install -r ${path.module}/role_manager/requirements.txt -t ${path.module}/role_manager/vendor" + } +} + +data "archive_file" "role_manager" { + type = "zip" + source_dir = "${path.module}/role_manager" + output_path = local.role_manager_package + depends_on = [terraform_data.role_manager_python_vendor_packages] +} + +resource "aws_iam_role" "role_manager" { + name = "${var.name}-manager" + assume_role_policy = data.aws_iam_policy_document.role_manager_assume_role.json + managed_policy_arns = [data.aws_iam_policy.lambda_vpc_access.arn] +} + +resource "aws_iam_role_policy" "ssm_access" { + name = "${var.name}-role-manager-ssm-access" + role = aws_iam_role.role_manager.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = ["ssm:GetParameter*"] + Resource = "${aws_ssm_parameter.random_db_password.arn}" + }, + { + Effect = "Allow" + Action = ["kms:Decrypt"] + Resource = [data.aws_kms_key.default_ssm_key.arn] + } + ] + }) +} + +data "aws_kms_key" "default_ssm_key" { + key_id = "alias/aws/ssm" +} + +data "aws_iam_policy_document" "role_manager_assume_role" { + statement { + effect = "Allow" + + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + + actions = ["sts:AssumeRole"] + } +} + +# AWS managed policy required by Lambda functions in order to access VPC resources +# see https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html +data "aws_iam_policy" "lambda_vpc_access" { + name = "AWSLambdaVPCAccessExecutionRole" +} + +# KMS key used to encrypt role manager's environment variables +resource "aws_kms_key" "role_manager" { + description = "Key for Lambda function ${local.role_manager_name}" + enable_key_rotation = true +} + +# VPC Endpoints for accessing AWS Services +# ---------------------------------------- +# +# Since the role manager Lambda function is in the VPC (which is needed to be +# able to access the database) we need to allow the Lambda function to access +# AWS Systems Manager Parameter Store (to fetch the database password) and +# KMS (to decrypt SecureString parameters from Parameter Store). We can do +# this by either allowing internet access to the Lambda, or by using a VPC +# endpoint. The latter is more secure. +# See https://repost.aws/knowledge-center/lambda-vpc-parameter-store +# See https://docs.aws.amazon.com/vpc/latest/privatelink/create-interface-endpoint.html#create-interface-endpoint + +resource "aws_vpc_endpoint" "ssm" { + vpc_id = var.vpc_id + service_name = "com.amazonaws.${data.aws_region.current.name}.ssm" + vpc_endpoint_type = "Interface" + security_group_ids = [aws_security_group.vpc_endpoints.id] + subnet_ids = var.private_subnet_ids + private_dns_enabled = true +} + +resource "aws_vpc_endpoint" "kms" { + vpc_id = var.vpc_id + service_name = "com.amazonaws.${data.aws_region.current.name}.kms" + vpc_endpoint_type = "Interface" + security_group_ids = [aws_security_group.vpc_endpoints.id] + subnet_ids = var.private_subnet_ids + private_dns_enabled = true +} + +resource "aws_security_group" "vpc_endpoints" { + name_prefix = "${var.name}-vpc-endpoints" + description = "VPC endpoints to access SSM and KMS" + vpc_id = var.vpc_id +} + +resource "aws_vpc_security_group_egress_rule" "role_manager_egress_to_vpc_endpoints" { + security_group_id = aws_security_group.role_manager.id + description = "Allow outbound requests from role manager to VPC endpoints" + + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.vpc_endpoints.id +} + +resource "aws_vpc_security_group_ingress_rule" "vpc_endpoints_ingress_from_role_manager" { + security_group_id = aws_security_group.vpc_endpoints.id + description = "Allow inbound requests to VPC endpoints from role manager" + + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.role_manager.id +} diff --git a/infra/modules/database/outputs.tf b/infra/modules/database/outputs.tf new file mode 100644 index 000000000..1e20d746c --- /dev/null +++ b/infra/modules/database/outputs.tf @@ -0,0 +1,3 @@ +output "role_manager_function_name" { + value = aws_lambda_function.role_manager.function_name +} diff --git a/infra/modules/database/role_manager/requirements.txt b/infra/modules/database/role_manager/requirements.txt new file mode 100644 index 000000000..94345bbec --- /dev/null +++ b/infra/modules/database/role_manager/requirements.txt @@ -0,0 +1 @@ +pg8000 diff --git a/infra/modules/database/role_manager/role_manager.py b/infra/modules/database/role_manager/role_manager.py new file mode 100644 index 000000000..9914238e8 --- /dev/null +++ b/infra/modules/database/role_manager/role_manager.py @@ -0,0 +1,147 @@ +import boto3 +import itertools +from operator import itemgetter +import os +import logging +from pg8000.native import Connection, identifier + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +def lambda_handler(event, context): + conn = connect() + + logger.info("Current database configuration") + + prev_roles = get_roles(conn) + print_roles(prev_roles) + + prev_schema_privileges = get_schema_privileges(conn) + print_schema_privileges(prev_schema_privileges) + + logger.info("Configuring database") + configure_database(conn) + + logger.info("New database configuration") + + new_roles = get_roles(conn) + print_roles(new_roles) + + new_schema_privileges = get_schema_privileges(conn) + print_schema_privileges(new_schema_privileges) + + return { + "roles": new_roles, + "roles_with_groups": get_roles_with_groups(conn), + "schema_privileges": { + schema_name: schema_acl + for schema_name, schema_acl + in new_schema_privileges + }, + } + +def connect() -> Connection: + user = os.environ["DB_USER"] + host = os.environ["DB_HOST"] + port = os.environ["DB_PORT"] + database = os.environ["DB_NAME"] + password = get_password() + + logger.info("Connecting to database: user=%s host=%s port=%s database=%s", user, host, port, database) + return Connection(user=user, host=host, port=port, database=database, password=password) + + +def get_password() -> str: + ssm = boto3.client("ssm") + param_name = os.environ["DB_PASSWORD_PARAM_NAME"] + logger.info("Fetching password from parameter store") + result = ssm.get_parameter( + Name=param_name, + WithDecryption=True, + ) + return result["Parameter"]["Value"] + + +def get_roles(conn: Connection) -> list[str]: + return [row[0] for row in conn.run("SELECT rolname " + "FROM pg_roles " + "WHERE rolname NOT LIKE 'pg_%' " + "AND rolname NOT LIKE 'rds%'")] + + +def get_roles_with_groups(conn: Connection) -> dict[str, str]: + roles_groups = conn.run("SELECT u.rolname AS user, g.rolname AS group \ + FROM pg_roles u \ + INNER JOIN pg_auth_members a ON u.oid = a.member \ + INNER JOIN pg_roles g ON g.oid = a.roleid \ + ORDER BY user ASC") + + result = {} + for user, groups in itertools.groupby(roles_groups, itemgetter(0)): + result[user] = ",".join(map(itemgetter(1), groups)) + return result + + +# Get schema access control lists. The format of the ACLs is abbreviated. To interpret +# what the ACLs mean, see the Postgres documentation on Privileges: +# https://www.postgresql.org/docs/current/ddl-priv.html +def get_schema_privileges(conn: Connection) -> list[tuple[str, str]]: + return [(row[0], row[1]) for row in conn.run("SELECT nspname, nspacl \ + FROM pg_namespace \ + WHERE nspname NOT LIKE 'pg_%' \ + AND nspname <> 'information_schema'")] + + +def configure_database(conn: Connection) -> None: + logger.info("Configuring database") + app_username = os.environ.get("APP_USER") + migrator_username = os.environ.get("MIGRATOR_USER") + schema_name = os.environ.get("DB_SCHEMA") + + configure_roles(conn, [migrator_username, app_username]) + configure_schema(conn, schema_name, migrator_username, app_username) + + +def configure_roles(conn: Connection, roles: list[str]) -> None: + logger.info("Configuring roles") + for role in roles: + configure_role(conn, role) + + +def configure_role(conn: Connection, username: str) -> None: + logger.info("Configuring role: username=%s", username) + role = "rds_iam" + conn.run( + f""" + DO $$ + BEGIN + CREATE USER {identifier(username)}; + EXCEPTION WHEN DUPLICATE_OBJECT THEN + RAISE NOTICE 'user already exists'; + END + $$; + """ + ) + conn.run(f"GRANT {identifier(role)} TO {identifier(username)}") + + +def configure_schema(conn: Connection, schema_name: str, migrator_username: str, app_username: str) -> None: + logger.info("Configuring schema") + logger.info("Creating schema: schema_name=%s", schema_name) + conn.run(f"CREATE SCHEMA IF NOT EXISTS {identifier(schema_name)}") + logger.info("Changing schema owner: schema_name=%s owner=%s", schema_name, migrator_username) + conn.run(f"ALTER SCHEMA {identifier(schema_name)} OWNER TO {identifier(migrator_username)}") + logger.info("Granting schema usage privileges: schema_name=%s role=%s", schema_name, app_username) + conn.run(f"GRANT USAGE ON SCHEMA {identifier(schema_name)} TO {identifier(app_username)}") + + +def print_roles(roles: list[str]) -> None: + logger.info("Roles") + for role in roles: + logger.info(f"Role info: name={role}") + + +def print_schema_privileges(schema_privileges: list[tuple[str, str]]) -> None: + logger.info("Schema privileges") + for schema_name, schema_acl in schema_privileges: + logger.info(f"Schema info: name={schema_name} acl={schema_acl}") diff --git a/infra/modules/database/variables.tf b/infra/modules/database/variables.tf new file mode 100644 index 000000000..513fc3668 --- /dev/null +++ b/infra/modules/database/variables.tf @@ -0,0 +1,52 @@ +variable "name" { + description = "name of the database cluster. Note that this is not the name of the Postgres database itself, but the name of the cluster in RDS. The name of the Postgres database is set in module and defaults to 'app'." + type = string + validation { + condition = can(regex("^[-_\\da-z]+$", var.name)) + error_message = "use only lower case letters, numbers, dashes, and underscores" + } +} + +variable "access_policy_name" { + description = "name of the IAM policy to create that will be provide the ability to connect to the database as a user that will have read/write access." + type = string +} + +variable "app_username" { + description = "name of the database user to create that will be for the application." + type = string +} + +variable "migrator_username" { + description = "name of the database user to create that will be for the role that will run database migrations." + type = string +} + +variable "schema_name" { + description = "name of the Postgres schema to create that will be the schema the application will use (rather than using the public schema)" + type = string +} + +variable "port" { + description = "value of the port on which the database accepts connections. Defaults to 5432." + default = 5432 +} + +variable "database_name" { + description = "the name of the Postgres database. Defaults to 'app'." + default = "app" + validation { + condition = can(regex("^[_\\da-z]+$", var.database_name)) + error_message = "use only lower case letters, numbers, and underscores (no dashes)" + } +} + +variable "vpc_id" { + type = string + description = "Uniquely identifies the VPC." +} + +variable "private_subnet_ids" { + type = list(any) + description = "list of private subnet IDs to put the role provisioner and role checker lambda functions in" +} diff --git a/infra/modules/monitoring/main.tf b/infra/modules/monitoring/main.tf new file mode 100644 index 000000000..65fa3e99a --- /dev/null +++ b/infra/modules/monitoring/main.tf @@ -0,0 +1,85 @@ + +# Create SNS topic for all email and external incident management tools notifications + +resource "aws_sns_topic" "this" { + name = "${var.service_name}-monitoring" + + # checkov:skip=CKV_AWS_26:SNS encryption for alerts is unnecessary +} + +# Create CloudWatch alarms for the service + +resource "aws_cloudwatch_metric_alarm" "high_app_http_5xx_count" { + alarm_name = "${var.service_name}-high-app-5xx-count" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 5 + metric_name = "HTTPCode_Target_5XX_Count" + namespace = "AWS/ApplicationELB" + period = 60 + statistic = "Sum" + threshold = 1 + alarm_description = "High HTTP service 5XX error count" + treat_missing_data = "notBreaching" + alarm_actions = [aws_sns_topic.this.arn] + ok_actions = [aws_sns_topic.this.arn] + + dimensions = { + LoadBalancer = var.load_balancer_arn_suffix + } +} + +resource "aws_cloudwatch_metric_alarm" "high_load_balancer_http_5xx_count" { + alarm_name = "${var.service_name}-high-load-balancer-5xx-count" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 5 + metric_name = "HTTPCode_ELB_5XX_Count" + namespace = "AWS/ApplicationELB" + period = 60 + statistic = "Sum" + threshold = 1 + alarm_description = "High HTTP ELB 5XX error count" + treat_missing_data = "notBreaching" + alarm_actions = [aws_sns_topic.this.arn] + ok_actions = [aws_sns_topic.this.arn] + + dimensions = { + LoadBalancer = var.load_balancer_arn_suffix + } +} + +resource "aws_cloudwatch_metric_alarm" "high_app_response_time" { + alarm_name = "${var.service_name}-high-app-response-time" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 5 + metric_name = "TargetResponseTime" + namespace = "AWS/ApplicationELB" + period = 60 + statistic = "Average" + threshold = 0.2 + alarm_description = "High target latency alert" + alarm_actions = [aws_sns_topic.this.arn] + ok_actions = [aws_sns_topic.this.arn] + + dimensions = { + LoadBalancer = var.load_balancer_arn_suffix + } +} + +#email integration + +resource "aws_sns_topic_subscription" "email_integration" { + for_each = var.email_alerts_subscription_list + topic_arn = aws_sns_topic.this.arn + protocol = "email" + endpoint = each.value +} + +#External incident management service integration + +resource "aws_sns_topic_subscription" "incident_management_service_integration" { + count = var.incident_management_service_integration_url != null ? 1 : 0 + endpoint = var.incident_management_service_integration_url + endpoint_auto_confirms = true + protocol = "https" + topic_arn = aws_sns_topic.this.arn +} diff --git a/infra/modules/monitoring/outputs.tf b/infra/modules/monitoring/outputs.tf new file mode 100644 index 000000000..07d3823b8 --- /dev/null +++ b/infra/modules/monitoring/outputs.tf @@ -0,0 +1,3 @@ +output "sns_notification_channel" { + value = aws_sns_topic.this.arn +} diff --git a/infra/modules/monitoring/variables.tf b/infra/modules/monitoring/variables.tf new file mode 100644 index 000000000..9b3d42285 --- /dev/null +++ b/infra/modules/monitoring/variables.tf @@ -0,0 +1,22 @@ +variable "service_name" { + type = string + description = "Name of the service running within ECS cluster" +} + +variable "load_balancer_arn_suffix" { + type = string + description = "The ARN suffix for use with CloudWatch Metrics." +} + +variable "email_alerts_subscription_list" { + type = set(string) + default = [] + description = "List of emails to subscribe to alerts" + +} + +variable "incident_management_service_integration_url" { + type = string + default = null + description = "URL for integrating with for external incident management services" +} diff --git a/infra/modules/service/access_logs.tf b/infra/modules/service/access_logs.tf new file mode 100644 index 000000000..5b330e92e --- /dev/null +++ b/infra/modules/service/access_logs.tf @@ -0,0 +1,110 @@ +# This file defines resources for load balancer access logs +# including the S3 bucket where access logs are stored and +# the IAM policy granting the AWS Elastic Load Balancer service +# to write to the bucket +locals { + # This is needed to gran~t permissions to the ELB service for sending access logs to S3. + # The list was obtained from https://docs.aws.amazon.com/elasticloadbalancing/latest/application/enable-access-logging.html + elb_account_map = { + "us-east-1" : "127311923021", + "us-east-2" : "033677994240", + "us-west-1" : "027434742980", + "us-west-2" : "797873946194" + } + + # set log_file_transition = {} to disable lifecycle transitions. Additional lifecycle transitions can be added via a key value pair of `$STORAGE_CLASS=$DAYS` + log_file_transition = { + STANDARD_IA = 30 + GLACIER = 60 + } +} + +resource "aws_s3_bucket" "access_logs" { + bucket_prefix = "${var.service_name}-access-logs" + force_destroy = false + # checkov:skip=CKV2_AWS_62:Event notification not necessary for this bucket expecially due to likely use of lifecycle rules + # checkov:skip=CKV_AWS_18:Access logging was not considered necessary for this bucket + # checkov:skip=CKV_AWS_144:Not considered critical to the point of cross region replication + # checkov:skip=CKV_AWS_300:Known issue where Checkov gets confused by multiple rules +} + +resource "aws_s3_bucket_public_access_block" "access_logs" { + bucket = aws_s3_bucket.access_logs.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +data "aws_iam_policy_document" "access_logs_put_access" { + statement { + effect = "Allow" + resources = [ + aws_s3_bucket.access_logs.arn, + "${aws_s3_bucket.access_logs.arn}/*" + ] + actions = ["s3:PutObject"] + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${local.elb_account_map[data.aws_region.current.name]}:root"] + } + } +} + +resource "aws_s3_bucket_lifecycle_configuration" "access_logs" { + bucket = aws_s3_bucket.access_logs.id + + rule { + id = "AbortIncompleteUpload" + status = "Enabled" + abort_incomplete_multipart_upload { + days_after_initiation = 7 + } + } + + rule { + id = "StorageClass" + status = "Enabled" + dynamic "transition" { + for_each = local.log_file_transition + content { + days = transition.value + storage_class = transition.key + } + } + } + + rule { + id = "Expiration" + status = "Enabled" + expiration { + days = 2555 + } + } + # checkov:skip=CKV_AWS_300:There is a known issue where this check brings up false positives +} + +resource "aws_s3_bucket_versioning" "versioning" { + bucket = aws_s3_bucket.access_logs.id + versioning_configuration { + status = "Enabled" + } +} + + +resource "aws_s3_bucket_server_side_encryption_configuration" "encryption" { + bucket = aws_s3_bucket.access_logs.id + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } +} + +resource "aws_s3_bucket_policy" "access_logs" { + bucket = aws_s3_bucket.access_logs.id + policy = data.aws_iam_policy_document.access_logs_put_access.json +} diff --git a/infra/modules/service/main.tf b/infra/modules/service/main.tf new file mode 100644 index 000000000..b36d0a5ac --- /dev/null +++ b/infra/modules/service/main.tf @@ -0,0 +1,399 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} +data "aws_ecr_repository" "app" { + name = var.image_repository_name +} + +locals { + alb_name = var.service_name + cluster_name = var.service_name + log_group_name = "service/${var.service_name}" + task_executor_role_name = "${var.service_name}-task-executor" + image_url = "${data.aws_ecr_repository.app.repository_url}:${var.image_tag}" + + base_environment_variables = [ + { name : "PORT", value : tostring(var.container_port) }, + { name : "AWS_REGION", value : data.aws_region.current.name }, + ] + db_environment_variables = var.db_vars == null ? [] : [ + { name : "DB_HOST", value : var.db_vars.connection_info.host }, + { name : "DB_PORT", value : var.db_vars.connection_info.port }, + { name : "DB_USER", value : var.db_vars.connection_info.user }, + { name : "DB_NAME", value : var.db_vars.connection_info.db_name }, + { name : "DB_SCHEMA", value : var.db_vars.connection_info.schema_name }, + ] + environment_variables = concat(local.base_environment_variables, local.db_environment_variables) +} + + +#--------------- +# Load balancer +#--------------- + +# ALB for an app running in ECS +resource "aws_lb" "alb" { + depends_on = [aws_s3_bucket_policy.access_logs] + name = var.service_name + idle_timeout = "120" + internal = false + security_groups = [aws_security_group.alb.id] + subnets = var.subnet_ids + + # TODO(https://github.com/navapbc/template-infra/issues/163) Implement HTTPS + # checkov:skip=CKV2_AWS_20:Redirect HTTP to HTTPS as part of implementing HTTPS support + + # TODO(https://github.com/navapbc/template-infra/issues/161) Prevent deletion protection + # checkov:skip=CKV_AWS_150:Allow deletion until we can automate deletion for automated tests + # enable_deletion_protection = true + + # TODO(https://github.com/navapbc/template-infra/issues/165) Protect ALB with WAF + # checkov:skip=CKV2_AWS_28:Implement WAF in issue #165 + + # Drop invalid HTTP headers for improved security + # Note that header names cannot contain underscores + # https://docs.bridgecrew.io/docs/ensure-that-alb-drops-http-headers + drop_invalid_header_fields = true + + access_logs { + bucket = aws_s3_bucket.access_logs.id + prefix = "${var.service_name}-lb" + enabled = true + } +} + +# NOTE: for the demo we expose private http endpoint +# due to the complexity of acquiring a valid TLS/SSL cert. +# In a production system we would provision an https listener +resource "aws_lb_listener" "alb_listener_http" { + # TODO(https://github.com/navapbc/template-infra/issues/163) Use HTTPS protocol + # checkov:skip=CKV_AWS_2:Implement HTTPS in issue #163 + # checkov:skip=CKV_AWS_103:Require TLS 1.2 as part of implementing HTTPS support + + load_balancer_arn = aws_lb.alb.arn + port = "80" + protocol = "HTTP" + + default_action { + type = "fixed-response" + + fixed_response { + content_type = "text/plain" + message_body = "Not Found" + status_code = "404" + } + } +} + +resource "aws_lb_listener_rule" "app_http_forward" { + listener_arn = aws_lb_listener.alb_listener_http.arn + priority = 100 + + action { + type = "forward" + target_group_arn = aws_lb_target_group.app_tg.arn + } + condition { + path_pattern { + values = ["/*"] + } + } +} + +resource "aws_lb_target_group" "app_tg" { + # you must use a prefix, to facilitate successful tg changes + name_prefix = "app-" + port = var.container_port + protocol = "HTTP" + vpc_id = var.vpc_id + target_type = "ip" + deregistration_delay = "30" + + health_check { + path = "/health" + port = var.container_port + healthy_threshold = 2 + unhealthy_threshold = 10 + interval = 30 + timeout = 29 + matcher = "200-299" + } + + lifecycle { + create_before_destroy = true + } +} + +#------------------- +# Service Execution +#------------------- + +resource "aws_ecs_service" "app" { + name = var.service_name + cluster = aws_ecs_cluster.cluster.arn + launch_type = "FARGATE" + task_definition = aws_ecs_task_definition.app.arn + desired_count = var.desired_instance_count + + # Allow changes to the desired_count without differences in terraform plan. + # This allows autoscaling to manage the desired count for us. + lifecycle { + ignore_changes = [desired_count] + } + + network_configuration { + # TODO(https://github.com/navapbc/template-infra/issues/152) set assign_public_ip = false after using private subnets + # checkov:skip=CKV_AWS_333:Switch to using private subnets + assign_public_ip = true + subnets = var.subnet_ids + security_groups = [aws_security_group.app.id] + } + + load_balancer { + target_group_arn = aws_lb_target_group.app_tg.arn + container_name = var.service_name + container_port = var.container_port + } +} + +resource "aws_ecs_task_definition" "app" { + family = var.service_name + execution_role_arn = aws_iam_role.task_executor.arn + task_role_arn = aws_iam_role.service.arn + + # when is this needed? + # task_role_arn = aws_iam_role.app_service.arn + + container_definitions = jsonencode([ + { + name = var.service_name, + image = local.image_url, + memory = var.memory, + cpu = var.cpu, + networkMode = "awsvpc", + essential = true, + readonlyRootFilesystem = true, + + # Need to define all parameters in the healthCheck block even if we want + # to use AWS's defaults, otherwise the terraform plan will show a diff + # that will force a replacement of the task definition + healthCheck = { + interval = 30, + retries = 3, + timeout = 5, + command = ["CMD-SHELL", + "wget --no-verbose --tries=1 --spider http://localhost:${var.container_port}/health || exit 1" + ] + }, + environment = local.environment_variables, + portMappings = [ + { + containerPort = var.container_port, + } + ], + linuxParameters = { + capabilities = { + drop = ["ALL"] + }, + initProcessEnabled = true + }, + logConfiguration = { + logDriver = "awslogs", + options = { + "awslogs-group" = aws_cloudwatch_log_group.service_logs.name, + "awslogs-region" = data.aws_region.current.name, + "awslogs-stream-prefix" = var.service_name + } + } + } + ]) + + cpu = var.cpu + memory = var.memory + + requires_compatibilities = ["FARGATE"] + + # Reference https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html + network_mode = "awsvpc" +} + +resource "aws_ecs_cluster" "cluster" { + name = local.cluster_name + + setting { + name = "containerInsights" + value = "enabled" + } +} + +#------ +# Logs +#------ + +# Cloudwatch log group to for streaming ECS application logs. +resource "aws_cloudwatch_log_group" "service_logs" { + name = local.log_group_name + + # Conservatively retain logs for 5 years. + # Looser requirements may allow shorter retention periods + retention_in_days = 1827 + + # TODO(https://github.com/navapbc/template-infra/issues/164) Encrypt with customer managed KMS key + # checkov:skip=CKV_AWS_158:Encrypt service logs with customer key in future work +} + +#---------------- +# Access Control +#---------------- + +resource "aws_iam_role" "task_executor" { + name = local.task_executor_role_name + assume_role_policy = data.aws_iam_policy_document.ecs_tasks_assume_role_policy.json +} + +resource "aws_iam_role" "service" { + name = var.service_name + assume_role_policy = data.aws_iam_policy_document.ecs_tasks_assume_role_policy.json +} + +data "aws_iam_policy_document" "ecs_tasks_assume_role_policy" { + statement { + sid = "ECSTasksAssumeRole" + actions = [ + "sts:AssumeRole" + ] + principals { + type = "Service" + identifiers = ["ecs-tasks.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "task_executor" { + # Allow ECS to log to Cloudwatch. + statement { + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ] + resources = ["${aws_cloudwatch_log_group.service_logs.arn}:*"] + } + + # Allow ECS to authenticate with ECR + statement { + sid = "ECRAuth" + actions = [ + "ecr:GetAuthorizationToken", + ] + resources = ["*"] + } + + # Allow ECS to download images. + statement { + sid = "ECRPullAccess" + actions = [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + ] + resources = [data.aws_ecr_repository.app.arn] + } +} + +resource "aws_iam_role_policy" "task_executor" { + name = "${var.service_name}-task-executor-role-policy" + role = aws_iam_role.task_executor.id + policy = data.aws_iam_policy_document.task_executor.json +} + +#----------------------- +# Network Configuration +#----------------------- + +resource "aws_security_group" "alb" { + # Specify name_prefix instead of name because when a change requires creating a new + # security group, sometimes the change requires the new security group to be created + # before the old one is destroyed. In this situation, the new one needs a unique name + name_prefix = "${var.service_name}-alb" + description = "Allow TCP traffic to application load balancer" + + lifecycle { + create_before_destroy = true + + # changing the description is a destructive change + # just ignore it + ignore_changes = [description] + } + + vpc_id = var.vpc_id + + # TODO(https://github.com/navapbc/template-infra/issues/163) Disallow incoming traffic to port 80 + # checkov:skip=CKV_AWS_260:Disallow ingress from 0.0.0.0:0 to port 80 when implementing HTTPS support in issue #163 + ingress { + description = "Allow HTTP traffic from public internet" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + description = "Allow all outgoing traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# Security group to allow access to Fargate tasks +resource "aws_security_group" "app" { + # Specify name_prefix instead of name because when a change requires creating a new + # security group, sometimes the change requires the new security group to be created + # before the old one is destroyed. In this situation, the new one needs a unique name + name_prefix = "${var.service_name}-app" + description = "Allow inbound TCP access to application container port" + vpc_id = var.vpc_id + lifecycle { + create_before_destroy = true + } + + ingress { + description = "Allow HTTP traffic to application container port" + protocol = "tcp" + from_port = var.container_port + to_port = var.container_port + security_groups = [aws_security_group.alb.id] + } + + egress { + description = "Allow all outgoing traffic from application" + protocol = "-1" + from_port = 0 + to_port = 0 + cidr_blocks = ["0.0.0.0/0"] + } +} + +#----------------- +# Database Access +#----------------- + +resource "aws_vpc_security_group_ingress_rule" "db_ingress_from_service" { + count = var.db_vars != null ? length(var.db_vars.security_group_ids) : 0 + + security_group_id = var.db_vars.security_group_ids[count.index] + description = "Allow inbound requests to database from ${var.service_name} service" + + from_port = tonumber(var.db_vars.connection_info.port) + to_port = tonumber(var.db_vars.connection_info.port) + ip_protocol = "tcp" + referenced_security_group_id = aws_security_group.app.id +} + +resource "aws_iam_role_policy_attachment" "app_db_access" { + count = var.db_vars != null ? 1 : 0 + + role = aws_iam_role.service.name + policy_arn = var.db_vars.access_policy_arn +} diff --git a/infra/modules/service/outputs.tf b/infra/modules/service/outputs.tf new file mode 100644 index 000000000..adde59cba --- /dev/null +++ b/infra/modules/service/outputs.tf @@ -0,0 +1,13 @@ +output "public_endpoint" { + description = "The public endpoint for the service." + value = "http://${aws_lb.alb.dns_name}" +} + +output "cluster_name" { + value = aws_ecs_cluster.cluster.name +} + +output "load_balancer_arn_suffix" { + description = "The ARN suffix for use with CloudWatch Metrics." + value = aws_lb.alb.arn_suffix +} diff --git a/infra/modules/service/variables.tf b/infra/modules/service/variables.tf new file mode 100644 index 000000000..7b89db095 --- /dev/null +++ b/infra/modules/service/variables.tf @@ -0,0 +1,68 @@ +variable "service_name" { + description = "name of the service, to be used for infra structure resource naming" + validation { + condition = can(regex("^[-_\\da-z]+$", var.service_name)) + error_message = "use only lower case letters, numbers, dashes, and underscores" + } +} + +variable "image_tag" { + type = string + description = "The tag of the image to deploy" +} + +variable "image_repository_name" { + type = string + description = "The name of the container image repository" +} + +variable "desired_instance_count" { + type = number + description = "Number of instances of the task definition to place and keep running." + default = 1 +} + +variable "cpu" { + type = number + default = 256 + description = "Number of cpu units used by the task, expessed as an integer value, e.g 512 " +} + +variable "memory" { + type = number + default = 512 + description = "Amount (in MiB) of memory used by the task. e.g. 2048" +} + + +variable "container_port" { + type = number + description = "The port number on the container that's bound to the user-specified" + default = 8000 +} + +variable "vpc_id" { + type = string + description = "Uniquely identifies the VPC." +} + +variable "subnet_ids" { + type = list(any) + description = "Private subnet id from vpc module" +} + +variable "db_vars" { + description = "Variables for integrating the app service with a database" + type = object({ + security_group_ids = list(string) + access_policy_arn = string + connection_info = object({ + host = string + port = string + user = string + db_name = string + schema_name = string + }) + }) + default = null +} diff --git a/infra/modules/terraform-backend-s3/README.md b/infra/modules/terraform-backend-s3/README.md new file mode 100644 index 000000000..29220db59 --- /dev/null +++ b/infra/modules/terraform-backend-s3/README.md @@ -0,0 +1,7 @@ +# Terraform S3 backend module + +This module creates resources for an [S3 backend for Terraform](https://www.terraform.io/language/settings/backends/s3). It creates the following resources: + +* S3 bucket to store [Terraform state files](https://www.terraform.io/language/state) +* S3 bucket to store [S3 access logs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) +* DynamoDB table to manage [terraform state locks](https://www.terraform.io/language/state/locking) diff --git a/infra/modules/terraform-backend-s3/main.tf b/infra/modules/terraform-backend-s3/main.tf new file mode 100644 index 000000000..09bf780c8 --- /dev/null +++ b/infra/modules/terraform-backend-s3/main.tf @@ -0,0 +1,276 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} +data "aws_partition" "current" {} + +locals { + tf_state_bucket_name = var.name + tf_logs_bucket_name = "${var.name}-logs" + tf_locks_table_name = "${var.name}-state-locks" +} + +# Create the dynamodb table required for state locking. + +# Options for encryption are an AWS owned key, which is not unique to your account; AWS managed; or customer managed. The latter two options are more secure, and customer managed gives +# control over the key. This allows for ability to restrict access by key as well as policies attached to roles or users. +# https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html +resource "aws_kms_key" "tf_backend" { + description = "KMS key for DynamoDB table ${local.tf_locks_table_name}" + # The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the KMS key. + deletion_window_in_days = "10" + # Generates new cryptographic material every 365 days, this is used to encrypt your data. The KMS key retains the old material for decryption purposes. + enable_key_rotation = "true" +} + +resource "aws_dynamodb_table" "terraform_lock" { + name = local.tf_locks_table_name + hash_key = "LockID" + billing_mode = "PAY_PER_REQUEST" + + attribute { + name = "LockID" + type = "S" + } + + server_side_encryption { + enabled = true + kms_key_arn = aws_kms_key.tf_backend.arn + } + + point_in_time_recovery { + enabled = true + } + +} + +# Create the S3 bucket used to store terraform state remotely. +resource "aws_s3_bucket" "tf_state" { + bucket = local.tf_state_bucket_name + + # checkov:skip=CKV_AWS_144:Cross region replication not required by default + # checkov:skip=CKV2_AWS_62:S3 bucket does not need notifications enabled + # checkov:skip=CKV2_AWS_61:No need to define S3 bucket lifecycle configuration to expire or transition tfstate files since they will always be needed and the file sizes are small anyways + + # Prevent accidental destruction a developer executing terraform destory in the wrong directory. Contains terraform state files. + lifecycle { + prevent_destroy = true + } +} + +resource "aws_s3_bucket_versioning" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.tf_backend.arn + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } +} + +resource "aws_s3_bucket_public_access_block" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_ownership_controls" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + + rule { + object_ownership = "BucketOwnerEnforced" + } +} + +data "aws_iam_policy_document" "tf_state" { + statement { + sid = "RequireTLS" + principals { + type = "AWS" + identifiers = ["*"] + } + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.tf_state.arn, + "${aws_s3_bucket.tf_state.arn}/*" + ] + + effect = "Deny" + + condition { + test = "Bool" + variable = "aws:SecureTransport" + + values = [ + false + ] + } + } +} + +resource "aws_s3_bucket_policy" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + policy = data.aws_iam_policy_document.tf_state.json +} + +# Create the S3 bucket to provide server access logging. +# +# Ignore bucket logging complaince check for this bucket since +# the bucket is used for logging only and doesn't need server access logging itself +# (see https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) +# tfsec:ignore:aws-s3-enable-bucket-logging +resource "aws_s3_bucket" "tf_log" { + bucket = local.tf_logs_bucket_name + + # checkov:skip=CKV_AWS_144:Cross region replication not required by default + # checkov:skip=CKV2_AWS_62:S3 bucket does not need notifications enabled +} + +resource "aws_s3_bucket_versioning" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.tf_backend.arn + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } +} + +resource "aws_s3_bucket_public_access_block" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_ownership_controls" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + + rule { + object_ownership = "BucketOwnerEnforced" + } +} + +# Move all log data to lower cost infrequent-access storage after 30 days. +resource "aws_s3_bucket_lifecycle_configuration" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + expected_bucket_owner = data.aws_caller_identity.current.account_id + + rule { + id = "move-s3-to-ia" + status = "Enabled" + + abort_incomplete_multipart_upload { + days_after_initiation = 15 + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + noncurrent_version_transition { + noncurrent_days = 30 + storage_class = "STANDARD_IA" + } + } +} + +data "aws_iam_policy_document" "tf_log" { + statement { + sid = "RequireTLS" + principals { + type = "AWS" + identifiers = ["*"] + } + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.tf_log.arn, + "${aws_s3_bucket.tf_log.arn}/*" + ] + + effect = "Deny" + + condition { + test = "Bool" + variable = "aws:SecureTransport" + + values = [ + false + ] + } + } + statement { + sid = "S3ServerAccessLogsPolicy" + principals { + type = "Service" + identifiers = [ + "logging.s3.amazonaws.com" + ] + } + actions = [ + "s3:PutObject", + ] + + resources = [ + "${aws_s3_bucket.tf_log.arn}/*" + ] + + effect = "Allow" + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + + values = [ + "arn:${data.aws_partition.current.id}:s3:::${aws_s3_bucket.tf_log.id}" + ] + } + + condition { + test = "StringLike" + variable = "aws:SourceAccount" + + values = [ + data.aws_caller_identity.current.account_id + ] + } + } +} + +resource "aws_s3_bucket_policy" "tf_log" { + bucket = aws_s3_bucket.tf_log.id + policy = data.aws_iam_policy_document.tf_log.json +} + +resource "aws_s3_bucket_logging" "tf_state" { + bucket = aws_s3_bucket.tf_state.id + + target_bucket = aws_s3_bucket.tf_log.id + target_prefix = "logs/${aws_s3_bucket.tf_state.bucket}/" +} diff --git a/infra/modules/terraform-backend-s3/outputs.tf b/infra/modules/terraform-backend-s3/outputs.tf new file mode 100644 index 000000000..8ce14d4a9 --- /dev/null +++ b/infra/modules/terraform-backend-s3/outputs.tf @@ -0,0 +1,11 @@ +output "tf_state_bucket_name" { + value = aws_s3_bucket.tf_state.bucket +} + +output "tf_log_bucket_name" { + value = aws_s3_bucket.tf_log.bucket +} + +output "tf_locks_table_name" { + value = aws_dynamodb_table.terraform_lock.name +} diff --git a/infra/modules/terraform-backend-s3/variables.tf b/infra/modules/terraform-backend-s3/variables.tf new file mode 100644 index 000000000..5918083e1 --- /dev/null +++ b/infra/modules/terraform-backend-s3/variables.tf @@ -0,0 +1,4 @@ +variable "name" { + type = string + description = "The name of the backend resource. This will be used to prefix the names of the other backend resources." +} diff --git a/infra/project-config/README.md b/infra/project-config/README.md new file mode 100644 index 000000000..3c885c322 --- /dev/null +++ b/infra/project-config/README.md @@ -0,0 +1,19 @@ +# Common module + +The purpose of this module is to contain environment agnostic items. e.g. tags that are common to all environments are stored here. + +## Usage + +```terraform +# Import the common module + +module "project_config" { + source = "../../project-config" +} + +# Combine common tags with environment specific tags. +tags = merge(module.project_config.default_tags, { + environment = "dev" + description = "Backend resources required for terraform state management." +}) +``` diff --git a/infra/project-config/main.tf b/infra/project-config/main.tf new file mode 100644 index 000000000..1b69a39a5 --- /dev/null +++ b/infra/project-config/main.tf @@ -0,0 +1,18 @@ +locals { + # Machine readable project name (lower case letters, dashes, and underscores) + # This will be used in names of AWS resources + project_name = "" + + # Project owner (e.g. navapbc). Used for tagging infra resources. + owner = "" + + # URL of project source code repository + code_repository_url = "" + + # Default AWS region for project (e.g. us-east-1, us-east-2, us-west-1). + # This is dependent on where your project is located (if regional) + # otherwise us-east-1 is a good default + default_region = "" + + github_actions_role_name = "${local.project_name}-github-actions" +} diff --git a/infra/project-config/outputs.tf b/infra/project-config/outputs.tf new file mode 100644 index 000000000..e00c0be2d --- /dev/null +++ b/infra/project-config/outputs.tf @@ -0,0 +1,36 @@ +output "project_name" { + value = local.project_name +} + +output "owner" { + value = local.owner +} + +output "code_repository_url" { + value = local.code_repository_url +} + +output "code_repository" { + value = regex("([-_\\w]+/[-_\\w]+)(\\.git)?$", local.code_repository_url)[0] + description = "The 'org/repo' string of the repo (e.g. 'navapbc/template-infra'). This is extracted from the repo URL (e.g. 'git@github.com:navapbc/template-infra.git' or 'https://github.com/navapbc/template-infra.git')" +} + +output "default_region" { + value = local.default_region +} + +# Common tags for all accounts and environments +output "default_tags" { + value = { + project = local.project_name + owner = local.owner + repository = local.code_repository_url + terraform = true + terraform_workspace = terraform.workspace + # description is set in each environments local use key project_description if required. + } +} + +output "github_actions_role_name" { + value = local.github_actions_role_name +} diff --git a/infra/test/go.mod b/infra/test/go.mod new file mode 100644 index 000000000..47d1ca7f7 --- /dev/null +++ b/infra/test/go.mod @@ -0,0 +1,55 @@ +module navapbc/template-infra + +go 1.19 + +require ( + cloud.google.com/go v0.83.0 // indirect + cloud.google.com/go/storage v1.10.0 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/aws/aws-sdk-go v1.40.56 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/gruntwork-io/terratest v0.41.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter v1.6.1 // indirect + github.com/hashicorp/go-multierror v1.1.0 // indirect + github.com/hashicorp/go-safetemp v1.0.0 // indirect + github.com/hashicorp/go-version v1.3.0 // indirect + github.com/hashicorp/hcl/v2 v2.9.1 // indirect + github.com/hashicorp/terraform-json v0.13.0 // indirect + github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jstemmer/go-junit-report v0.9.1 // indirect + github.com/klauspost/compress v1.13.0 // indirect + github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.7.0 // indirect + github.com/tmccombs/hcl2json v0.3.3 // indirect + github.com/ulikunitz/xz v0.5.8 // indirect + github.com/zclconf/go-cty v1.9.1 // indirect + go.opencensus.io v0.23.0 // indirect + golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/mod v0.4.2 // indirect + golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect + golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e // indirect + golang.org/x/text v0.3.6 // indirect + golang.org/x/tools v0.1.2 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/api v0.47.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect + google.golang.org/grpc v1.38.0 // indirect + google.golang.org/protobuf v1.26.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/infra/test/go.sum b/infra/test/go.sum new file mode 100644 index 000000000..80865778a --- /dev/null +++ b/infra/test/go.sum @@ -0,0 +1,594 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0 h1:bAMqZidYkmIsUqe6PtkEPT7Q+vfizScn+jfNA6jwK9c= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.40.56 h1:FM2yjR0UUYFzDTMx+mH9Vyw1k1EUUxsAFzk+BjkzANA= +github.com/aws/aws-sdk-go v1.40.56/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gruntwork-io/terratest v0.41.0 h1:QKFK6m0EMVnrV7lw2L06TlG+Ha3t0CcOXuBVywpeNRU= +github.com/gruntwork-io/terratest v0.41.0/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.6.1 h1:NASsgP4q6tL94WH6nJxKWj8As2H/2kop/bB1d8JMyRY= +github.com/hashicorp/go-getter v1.6.1/go.mod h1:IZCrswsZPeWv9IkVnLElzRU/gz/QPi6pZHn4tv6vbwA= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl/v2 v2.9.1 h1:eOy4gREY0/ZQHNItlfuEZqtcQbXIxzojlP301hDpnac= +github.com/hashicorp/hcl/v2 v2.9.1/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= +github.com/hashicorp/terraform-json v0.13.0 h1:Li9L+lKD1FO5RVFRM1mMMIBDoUHslOniyEi5CM+FWGY= +github.com/hashicorp/terraform-json v0.13.0/go.mod h1:y5OdLBCT+rxbwnpxZs9kGL7R9ExU76+cpdY8zHwoazk= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o= +github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs= +github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg= +github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tmccombs/hcl2json v0.3.3 h1:+DLNYqpWE0CsOQiEZu+OZm5ZBImake3wtITYxQ8uLFQ= +github.com/tmccombs/hcl2json v0.3.3/go.mod h1:Y2chtz2x9bAeRTvSibVRVgbLJhLJXKlUeIvjeVdnm4w= +github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.8.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.9.1 h1:viqrgQwFl5UpSxc046qblj78wZXVDFnSOufaOTER+cc= +github.com/zclconf/go-cty v1.9.1/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e h1:w36l2Uw3dRan1K3TyXriXvY+6T56GNmlKGcqiQUJDfM= +golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0 h1:sQLWZQvP6jPGIP4JGPkJu4zHswrv81iobiyszr3b/0I= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/infra/test/infra_test.go b/infra/test/infra_test.go new file mode 100644 index 000000000..442a13562 --- /dev/null +++ b/infra/test/infra_test.go @@ -0,0 +1,132 @@ +package test + +import ( + "fmt" + "strings" + "testing" + "time" + + http_helper "github.com/gruntwork-io/terratest/modules/http-helper" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/gruntwork-io/terratest/modules/shell" + "github.com/gruntwork-io/terratest/modules/terraform" +) + +func TestDev(t *testing.T) { + BuildAndPublish(t) + + uniqueId := strings.ToLower(random.UniqueId()) + workspaceName := fmt.Sprintf("t-%s", uniqueId) + imageTag := shell.RunCommandAndGetOutput(t, shell.Command{ + Command: "git", + Args: []string{"rev-parse", "HEAD"}, + WorkingDir: "./", + }) + terraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{ + Reconfigure: true, + TerraformDir: "../app/service/", + VarFiles: []string{"dev.tfvars"}, + Vars: map[string]interface{}{ + "image_tag": imageTag, + }, + }) + + defer DestroyDevEnvironmentAndWorkspace(t, terraformOptions, workspaceName) + CreateDevEnvironmentInWorkspace(t, terraformOptions, workspaceName) + WaitForServiceToBeStable(t, workspaceName) + RunEndToEndTests(t, terraformOptions) +} + +func BuildAndPublish(t *testing.T) { + // terratest currently does not support passing a file as the -backend-config option + // so we need to manually call terraform rather than using terraform.Init + // see https://github.com/gruntwork-io/terratest/issues/517 + // it looks like this PR would add functionality for this: https://github.com/gruntwork-io/terratest/pull/558 + // after which we add BackendConfig: []string{"dev.s3.tfbackend": terraform.KeyOnly} to terraformOptions + // and replace the call to terraform.RunTerraformCommand with terraform.Init + terraform.RunTerraformCommand(t, &terraform.Options{ + TerraformDir: "../app/build-repository/", + }, "init", "-backend-config=shared.s3.tfbackend") + + shell.RunCommand(t, shell.Command{ + Command: "make", + Args: []string{"release-build"}, + WorkingDir: "../../", + }) + + shell.RunCommand(t, shell.Command{ + Command: "make", + Args: []string{"release-publish"}, + WorkingDir: "../../", + }) +} + +func CreateDevEnvironmentInWorkspace(t *testing.T, terraformOptions *terraform.Options, workspaceName string) { + fmt.Printf("::group::Create dev environment in new workspace '%s\n'", workspaceName) + + // terratest currently does not support passing a file as the -backend-config option + // so we need to manually call terraform rather than using terraform.Init + // see https://github.com/gruntwork-io/terratest/issues/517 + // it looks like this PR would add functionality for this: https://github.com/gruntwork-io/terratest/pull/558 + // after which we add BackendConfig: []string{"dev.s3.tfbackend": terraform.KeyOnly} to terraformOptions + // and replace the call to terraform.RunTerraformCommand with terraform.Init + terraform.RunTerraformCommand(t, terraformOptions, "init", "-backend-config=dev.s3.tfbackend") + terraform.WorkspaceSelectOrNew(t, terraformOptions, workspaceName) + terraform.Apply(t, terraformOptions) + fmt.Println("::endgroup::") +} + +func WaitForServiceToBeStable(t *testing.T, workspaceName string) { + fmt.Println("::group::Wait for service to be stable") + appName := "app" + environmentName := "dev" + serviceName := fmt.Sprintf("%s-%s-%s", workspaceName, appName, environmentName) + shell.RunCommand(t, shell.Command{ + Command: "aws", + Args: []string{"ecs", "wait", "services-stable", "--cluster", serviceName, "--services", serviceName}, + WorkingDir: "../../", + }) + fmt.Println("::endgroup::") +} + +func RunEndToEndTests(t *testing.T, terraformOptions *terraform.Options) { + fmt.Println("::group::Check service for healthy status 200") + serviceEndpoint := terraform.Output(t, terraformOptions, "service_endpoint") + http_helper.HttpGetWithRetryWithCustomValidation(t, serviceEndpoint, nil, 5, 1*time.Second, func(responseStatus int, responseBody string) bool { + return responseStatus == 200 + }) + fmt.Println("::endgroup::") +} + +func EnableDestroy(t *testing.T, terraformOptions *terraform.Options, workspaceName string) { + fmt.Println("::group::Setting force_destroy = true and prevent_destroy = false for s3 buckets") + shell.RunCommand(t, shell.Command{ + Command: "sed", + Args: []string{ + "-i.bak", + "s/force_destroy = false/force_destroy = true/g", + "infra/modules/service/access_logs.tf", + }, + WorkingDir: "../../", + }) + shell.RunCommand(t, shell.Command{ + Command: "sed", + Args: []string{ + "-i.bak", + "s/prevent_destroy = true/prevent_destroy = false/g", + "infra/modules/service/access_logs.tf", + }, + WorkingDir: "../../", + }) + terraform.RunTerraformCommand(t, terraformOptions, "init", "-backend-config=dev.s3.tfbackend") + terraform.Apply(t, terraformOptions) +} + +func DestroyDevEnvironmentAndWorkspace(t *testing.T, terraformOptions *terraform.Options, workspaceName string) { + EnableDestroy(t, terraformOptions, workspaceName) + fmt.Println("::group::Destroy environment and workspace") + terraform.RunTerraformCommand(t, terraformOptions, "init", "-backend-config=dev.s3.tfbackend") + terraform.Destroy(t, terraformOptions) + terraform.WorkspaceDelete(t, terraformOptions, workspaceName) + fmt.Println("::endgroup::") +}