diff --git a/.circleci/aws/config b/.circleci/aws/config new file mode 100644 index 000000000..9c723f99f --- /dev/null +++ b/.circleci/aws/config @@ -0,0 +1,23 @@ +[default] +region=us-east-1 +output=json + +[profile dogfood-dev] +region=us-east-1 +credential_source=Environment +role_arn=arn:aws:iam::090304172891:role/allow-gruntwork-website-ci-cd-access-from-other-accounts + +[profile dogfood-stage] +region=us-east-1 +credential_source=Environment +role_arn=arn:aws:iam::151025255439:role/allow-gruntwork-website-ci-cd-access-from-other-accounts + +[profile dogfood-prod] +region=us-east-1 +credential_source=Environment +role_arn=arn:aws:iam::996502968539:role/allow-gruntwork-website-ci-cd-access-from-other-accounts + +[profile dogfood-shared] +region=us-east-1 +credential_source=Environment +role_arn=arn:aws:iam::706132791050:role/allow-gruntwork-website-ci-cd-access-from-other-accounts \ No newline at end of file diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 000000000..5ea868f7c --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,102 @@ +version: 2.1 + +###################################################################################################################### +# Define the ORBS we'll use in the rest of the workflow +###################################################################################################################### +orbs: + docker: circleci/docker@0.5.13 + awscli: circleci/aws-cli@0.1.13 + slack: circleci/slack@3.4.1 + +###################################################################################################################### +# We define all the actual build steps here in named, reusable references. This allows us to define jobs below as a +# readable, composable list of references +###################################################################################################################### +references: + base_container: &base_container + docker: + - image: circleci/node:14.15 + + ###################################################################################################################### + # Build steps + ###################################################################################################################### + + deploy_to_staging: &deploy-to-staging + run: + name: deploy to staging + command: | + set +o pipefail + export CREDENTIALS=`aws sts assume-role --role-arn arn:aws:iam::151025255439:role/allow-gruntwork-website-ci-cd-access-from-other-accounts --role-session-name CircleCI --duration-seconds 900 --output=json` + export AWS_ACCESS_KEY_ID=`echo ${CREDENTIALS} | jq -r '.Credentials.AccessKeyId'` + export AWS_SECRET_ACCESS_KEY=`echo ${CREDENTIALS} | jq -r '.Credentials.SecretAccessKey'` + export AWS_SESSION_TOKEN=`echo ${CREDENTIALS} | jq -r '.Credentials.SessionToken'` + export AWS_EXPIRATION=`echo ${CREDENTIALS} | jq -r '.Credentials.Expiration'` + ./scripts/push-to-s3-stage.sh + + deploy_to_prod: &deploy-to-prod + run: + name: deploy to prod + command: | + set +o pipefail + export CREDENTIALS=`aws sts assume-role --role-arn arn:aws:iam::996502968539:role/allow-gruntwork-website-ci-cd-access-from-other-accounts --role-session-name CircleCI --duration-seconds 900 --output=json` + export AWS_ACCESS_KEY_ID=`echo ${CREDENTIALS} | jq -r '.Credentials.AccessKeyId'` + export AWS_SECRET_ACCESS_KEY=`echo ${CREDENTIALS} | jq -r '.Credentials.SecretAccessKey'` + export AWS_SESSION_TOKEN=`echo ${CREDENTIALS} | jq -r '.Credentials.SessionToken'` + export AWS_EXPIRATION=`echo ${CREDENTIALS} | jq -r '.Credentials.Expiration'` + ./scripts/push-to-s3-prod.sh + + notify_slack_staging: ¬ify_slack_staging + slack/status: + failure_message: ":red_circle: $CIRCLE_JOB has failed on master! Build triggered by: $CIRCLE_USERNAME. You have one hour to fix or revert!!" + success_message: ":tada: [Stage] Docs site has been successfully deployed at version $CIRCLE_SHA1 to https://docs.dogfood-stage.com" + only_for_branches: "master" + channel: "C01J73HUKEF" # #team-platform-notifications + + notify_slack_prod: ¬ify_slack_prod + slack/status: + failure_message: ":red_circle: $CIRCLE_JOB has failed on master! Build triggered by: $CIRCLE_USERNAME. You have one hour to fix or revert!!" + success_message: ":tada: [Prod] Docs site has been successfully deployed at version $CIRCLE_TAG to https://docs.gruntwork.io" + channel: "C01J73HUKEF" # #team-platform-notifications + +####################################################################################################################### +# The build jobs available, all consisting of lists of references to the references section above +####################################################################################################################### +jobs: + deploy-to-stage: + <<: *base_container + description: Deploy to Staging + steps: + - checkout + - awscli/install + - *deploy-to-staging + - *notify_slack_staging + + deploy-to-prod: + <<: *base_container + description: Deploy to Prod + steps: + - checkout + - awscli/install + - *deploy-to-prod + - *notify_slack_prod + +# --------------------------------------------------------------------------------------------------------------------- +# Here we combine the jobs defined above into various workflows that can run in parallel or sequentially, define +# dependencies on each other, and only run on certain branches/tags. +# --------------------------------------------------------------------------------------------------------------------- + +workflows: + version: 2 + each_commit: + jobs: + - deploy-to-stage: + filters: + branches: + # TODO: UPDATE TO `main/master` + only: docusaurus + - deploy-to-prod: + filters: + tags: + only: /^v.*/ + branches: + ignore: /.*/ diff --git a/.gitignore b/.gitignore index 342ea11cd..18848a9cc 100644 --- a/.gitignore +++ b/.gitignore @@ -74,3 +74,6 @@ yarn-error.log .pnp.js # Yarn Integrity file .yarn-integrity + +build/ +.docusaurus/ diff --git a/README.gatsby.md b/README.gatsby.md deleted file mode 100644 index 11e2af0ea..000000000 --- a/README.gatsby.md +++ /dev/null @@ -1,97 +0,0 @@ - -

- - Gatsby - -

-

- Gatsby's default starter -

- -Kick off your project with this default boilerplate. This starter ships with the main Gatsby configuration files you might need to get up and running blazing fast with the blazing fast app generator for React. - -_Have another more specific idea? You may want to check out our vibrant collection of [official and community-created starters](https://www.gatsbyjs.org/docs/gatsby-starters/)._ - -## πŸš€ Quick start - -1. **Create a Gatsby site.** - - Use the Gatsby CLI to create a new site, specifying the default starter. - - ```sh - # create a new Gatsby site using the default starter - gatsby new my-default-starter https://github.com/gatsbyjs/gatsby-starter-default - ``` - -1. **Start developing.** - - Navigate into your new site’s directory and start it up. - - ```sh - cd my-default-starter/ - gatsby develop - ``` - -1. **Open the source code and start editing!** - - Your site is now running at `http://localhost:8000`! - - _Note: You'll also see a second link: _`http://localhost:8000/___graphql`_. This is a tool you can use to experiment with querying your data. Learn more about using this tool in the [Gatsby tutorial](https://www.gatsbyjs.org/tutorial/part-five/#introducing-graphiql)._ - - Open the `my-default-starter` directory in your code editor of choice and edit `src/pages/index.js`. Save your changes and the browser will update in real time! - -## 🧐 What's inside? - -A quick look at the top-level files and directories you'll see in a Gatsby project. - - . - β”œβ”€β”€ node_modules - β”œβ”€β”€ src - β”œβ”€β”€ .gitignore - β”œβ”€β”€ .prettierrc - β”œβ”€β”€ gatsby-browser.js - β”œβ”€β”€ gatsby-config.js - β”œβ”€β”€ gatsby-node.js - β”œβ”€β”€ gatsby-ssr.js - β”œβ”€β”€ LICENSE - β”œβ”€β”€ package-lock.json - β”œβ”€β”€ package.json - └── README.md - -1. **`/node_modules`**: This directory contains all of the modules of code that your project depends on (npm packages) are automatically installed. - -2. **`/src`**: This directory will contain all of the code related to what you will see on the front-end of your site (what you see in the browser) such as your site header or a page template. `src` is a convention for β€œsource code”. - -3. **`.gitignore`**: This file tells git which files it should not track / not maintain a version history for. - -4. **`.prettierrc`**: This is a configuration file for [Prettier](https://prettier.io/). Prettier is a tool to help keep the formatting of your code consistent. - -5. **`gatsby-browser.js`**: This file is where Gatsby expects to find any usage of the [Gatsby browser APIs](https://www.gatsbyjs.org/docs/browser-apis/) (if any). These allow customization/extension of default Gatsby settings affecting the browser. - -6. **`gatsby-config.js`**: This is the main configuration file for a Gatsby site. This is where you can specify information about your site (metadata) like the site title and description, which Gatsby plugins you’d like to include, etc. (Check out the [config docs](https://www.gatsbyjs.org/docs/gatsby-config/) for more detail). - -7. **`gatsby-node.js`**: This file is where Gatsby expects to find any usage of the [Gatsby Node APIs](https://www.gatsbyjs.org/docs/node-apis/) (if any). These allow customization/extension of default Gatsby settings affecting pieces of the site build process. - -8. **`gatsby-ssr.js`**: This file is where Gatsby expects to find any usage of the [Gatsby server-side rendering APIs](https://www.gatsbyjs.org/docs/ssr-apis/) (if any). These allow customization of default Gatsby settings affecting server-side rendering. - -9. **`LICENSE`**: Gatsby is licensed under the MIT license. - -10. **`package-lock.json`** (See `package.json` below, first). This is an automatically generated file based on the exact versions of your npm dependencies that were installed for your project. **(You won’t change this file directly).** - -11. **`package.json`**: A manifest file for Node.js projects, which includes things like metadata (the project’s name, author, etc). This manifest is how npm knows which packages to install for your project. - -12. **`README.md`**: A text file containing useful reference information about your project. - -## πŸŽ“ Learning Gatsby - -Looking for more guidance? Full documentation for Gatsby lives [on the website](https://www.gatsbyjs.org/). Here are some places to start: - -- **For most developers, we recommend starting with our [in-depth tutorial for creating a site with Gatsby](https://www.gatsbyjs.org/tutorial/).** It starts with zero assumptions about your level of ability and walks through every step of the process. - -- **To dive straight into code samples, head [to our documentation](https://www.gatsbyjs.org/docs/).** In particular, check out the _Guides_, _API Reference_, and _Advanced Tutorials_ sections in the sidebar. - -## πŸ’« Deploy - -[![Deploy to Netlify](https://www.netlify.com/img/deploy/button.svg)](https://app.netlify.com/start/deploy?repository=https://github.com/gatsbyjs/gatsby-starter-default) - - diff --git a/README.md b/README.md index ed8083164..55d0c3ef4 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,33 @@ -# Generating Gruntwork Package and Module Documentation +# Website -This repo contains a set of tools for generating Gruntwork Package and Gruntwork Module documentation on a public website -in a customizable format. +This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator. -## Features +### Installation -This docs site is built using [Gatsby](https://www.gatsbyjs.org/), a static site generator that is based on React. -We have extended it using plugins and custom code to support all of the relevant features required, including: - -- Syntax highlighting (via prismjs) -- Copy code to clipboard -- Image Captions -- Responsive design with fixed header -- Edit on GitHub buttons -- Google Analytics -- A dynamic sidebar with the ToC of the current page - -## doc-sourcer - -Some of the content is pulled from our other repos. You can look at `gruntyrepos.yml` for a list of repos where docs -are sourced. +``` +$ yarn +``` -This is managed through the `doc-sourcer` tool. Make sure you have a copy of it available by running: +### Local Development ``` -(cd ./doc_sourcer && go build -o doc-sourcer .) +$ yarn start ``` -**NOTE: The doc-sourcer project uses [go modules](https://github.com/golang/go/wiki/Modules). You may experience -dependency issues if you clone the repo in your GOPATH. To address, clone outside of the GOPATH.** +This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. -## Deploy +### Build -To deploy a new version of the site, run: - -1. `./doc_sourcer/doc-sourcer` -1. `yarn run build` -1. `houston-cli exec websites -- yarn run deploy` +``` +$ yarn build +``` -### The Generation Workflow +This command generates static content into the `build` directory and can be served using any static contents hosting service. -Generating documentation is a multi-step process made up of the following stages: +### Deployment -1. `docs-fetcher`: Fetch all Gruntwork Package and Gruntwork Module source code into one repo. -2. `docs-preprocessor`: Transform the default folder structure of Gruntwork docs into a folder structure that mirrors - the desired public website structure. -3. `docs-generator`: Convert markdown files to HTML files, and generate an HTML-based navigation for all pages. +``` +$ GIT_USER= USE_SSH=true yarn deploy +``` -By using separate tools, we can compose this process to generate different kinds of documentation, pull from different -sources, or output in a different format. +If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/babel.config.js b/babel.config.js new file mode 100644 index 000000000..e00595dae --- /dev/null +++ b/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: [require.resolve('@docusaurus/core/lib/babel/preset')], +}; diff --git a/content/guides/deploying-a-dockerized-app-on-gke/index.md b/content/guides/deploying-a-dockerized-app-on-gke/index.md deleted file mode 100644 index 5c7ff0a03..000000000 --- a/content/guides/deploying-a-dockerized-app-on-gke/index.md +++ /dev/null @@ -1,526 +0,0 @@ ---- -title: "Deploying a Dockerized app on GCP/GKE" -date: 2019-10-22 -tags: ["gcp", "gke", "docker"] ---- - -This guide walks you through deploying a dockerized app to a GKE cluster running on Google Cloud Platform. - -In order to follow this guide you will need: - -1. A GCP account with billing enabled. There is a [free tier](https://cloud.google.com/free/) that includes \$300 of free credit over a 12 month period. -2. [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) v0.12.7 or later installed locally. -3. [Docker](https://www.docker.com/) v18.09.2 or later installed locally. -4. A recent version of the [gcloud](https://cloud.google.com/sdk/gcloud/) command-line tool. -5. A basic understanding of Node.js is also recommended. - -## Creating a Basic App - -Before we can deploy a dockerized app, we first need to create one. For the purposes of this guide we will create -a basic Node.js app that responds to requests on port `8080`. - -Start by creating a file called `server.js` and paste in the following source code: - -```javascript -const express = require("express") - -// Constants -const PORT = 8080 -const HOST = "0.0.0.0" - -// App -const app = express() -app.get("/", (req, res) => res.send("Hello World!")) - -app.listen(PORT, HOST) -console.log(`Running on http://${HOST}:${PORT}`) -``` - -Next, we need a simple `package.json` file in order to make this work properly: - -```json -{ - "name": "docker_web_app", - "version": "1.0.0", - "main": "server.js", - "scripts": { - "start": "node server.js" - }, - "dependencies": { - "express": "^4.16.4" - } -} -``` - -Now we can begin to Dockerize the App! - -## Dockerizing the App - -Before we can deploy the app to GKE, we need to first dockerize it. If you are not familiar with the basics of Docker, we recommend -you check out our "[Crash Course on Docker and Packer](https://training.gruntwork.io/p/a-crash-course-on-docker-packer)" from the Gruntwork Training Library. - -For the purposes of this guide, we will use the following `Dockerfile` to package our app into a Docker image. - -```docker -FROM node:12 - -# Create app directory -WORKDIR /usr/app - -COPY package*.json ./ - -RUN npm install -COPY . . - -EXPOSE 8080 -CMD [ "npm", "start" ] -``` - -The folder structure of our sample app should now look like this: - -```bash -β”œβ”€β”€ server.js -β”œβ”€β”€ Dockerfile -└── package.json -``` - -**Note:** Your actual app will definitely be a lot more complicated than this, but the main point to take from here, is that -we need to ensure our Docker image is configured to EXPOSE the port that our app is going to need for external -communication. See the [Docker examples](https://docs.docker.com/samples/) for more information on dockerizing popular -app formats. - -To build this Docker image from the `Dockerfile`, run: - -```bash -$ docker build -t simple-web-app:latest . -``` - -Now we can test our container to see if it is working: - -```bash -$ docker run --rm -p 8080:8080 simple-web-app:latest -``` - -This starts the newly built container and links port 8080 on your machine to the container's port 8080. You should see -the following output below when you run the above command: - -```bash -> docker_web_app@1.0.0 start /usr/app -> node server.js - -Running on http://0.0.0.0:8080 -``` - -Next, let's go and open the app in your browser: - -```bash -$ open http://localhost:8080 -``` - -You should be able to see the "Hello World!" message from the server. - -### Dockerfile Tips - -Some things to note when writing up your Dockerfile and building your app: - -- Ensure your Dockerfile starts your app in the foreground so the container doesn't shutdown after app startup. -- Your app should log to stdout/stderr to aid in debugging it after deployment to GKE - -## Pushing the Docker image - -So far we've successfully built a Docker image on our local computer. Now it's time to push the image to your private -[Google Container Registry](https://cloud.google.com/container-registry/), so it can be deployed in the future. - -First, we must configure our local Docker client to be able to authenticate to Container Registry. Simply, run the -following commands (Note: you'll only need to do this step once): - -```bash -$ export PROJECT_ID="$(gcloud config get-value project -q)" -$ gcloud auth configure-docker -``` - -Next, tag the local Docker image for uploading: - -```bash -$ docker tag simple-web-app:latest gcr.io/${PROJECT_ID}/simple-web-app:v1 -``` - -Finally, push the Docker image to your private Container Registry: - -```bash -$ docker push gcr.io/${PROJECT_ID}/simple-web-app:v1 -``` - -## Launching a GKE Cluster - -Now we have successfully pushed the Docker image to the private Container Registry, we need to launch a -[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) cluster. By using our -[GKE module](https://github.com/gruntwork-io/terraform-google-gke) we can easily deploy a production-grade GKE cluster. - -First, let's create a `terraform` directory to store the HCL code: - -```bash -$ mkdir -p terraform -$ cd terraform -``` - -Then create a `main.tf` file and copy the following code: - -```hcl -terraform { - # The modules used in this example require Terraform 0.12, additionally we depend on a bug fixed in version 0.12.7. - required_version = ">= 0.12.7" -} - -provider "google" { - version = "~> 2.9.0" - project = var.project - region = var.region -} - -provider "google-beta" { - version = "~> 2.9.0" - project = var.project - region = var.region -} - -# --------------------------------------------------------------------------------------------------------------------- -# DEPLOY A PRIVATE CLUSTER IN GOOGLE CLOUD PLATFORM -# --------------------------------------------------------------------------------------------------------------------- - -module "gke_cluster" { - # Use a version of the gke-cluster module that supports Terraform 0.12 - source = "git::git@github.com:gruntwork-io/terraform-google-gke.git//modules/gke-cluster?ref=v0.3.8" - - name = var.cluster_name - - project = var.project - location = var.location - network = module.vpc_network.network - - # We're deploying the cluster in the 'public' subnetwork to allow outbound internet access - # See the network access tier table for full details: - # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier - subnetwork = module.vpc_network.public_subnetwork - - # When creating a private cluster, the 'master_ipv4_cidr_block' has to be defined and the size must be /28 - master_ipv4_cidr_block = var.master_ipv4_cidr_block - - # This setting will make the cluster private - enable_private_nodes = "true" - - # To make testing easier, we keep the public endpoint available. In production, we highly recommend restricting access to only within the network boundary, requiring your users to use a bastion host or VPN. - disable_public_endpoint = "false" - - # With a private cluster, it is highly recommended to restrict access to the cluster master - # However, for testing purposes we will allow all inbound traffic. - master_authorized_networks_config = [ - { - cidr_blocks = [ - { - cidr_block = "0.0.0.0/0" - display_name = "all-for-testing" - }, - ] - }, - ] - - cluster_secondary_range_name = module.vpc_network.public_subnetwork_secondary_range_name -} - -# --------------------------------------------------------------------------------------------------------------------- -# CREATE A NODE POOL -# --------------------------------------------------------------------------------------------------------------------- - -resource "google_container_node_pool" "node_pool" { - provider = google-beta - - name = "private-pool" - project = var.project - location = var.location - cluster = module.gke_cluster.name - - initial_node_count = "1" - - autoscaling { - min_node_count = "1" - max_node_count = "5" - } - - management { - auto_repair = "true" - auto_upgrade = "true" - } - - node_config { - image_type = "COS" - machine_type = "n1-standard-1" - - labels = { - private-pools-example = "true" - } - - # Add a private tag to the instances. See the network access tier table for full details: - # https://github.com/gruntwork-io/terraform-google-network/tree/master/modules/vpc-network#access-tier - tags = [ - module.vpc_network.private, - "private-pool-example", - ] - - disk_size_gb = "30" - disk_type = "pd-standard" - preemptible = false - - service_account = module.gke_service_account.email - - oauth_scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - ] - } - - lifecycle { - ignore_changes = [initial_node_count] - } - - timeouts { - create = "30m" - update = "30m" - delete = "30m" - } -} - -# --------------------------------------------------------------------------------------------------------------------- -# CREATE A CUSTOM SERVICE ACCOUNT TO USE WITH THE GKE CLUSTER -# --------------------------------------------------------------------------------------------------------------------- - -module "gke_service_account" { - source = "git::git@github.com:gruntwork-io/terraform-google-gke.git//modules/gke-service-account?ref=v0.3.8" - - name = var.cluster_service_account_name - project = var.project - description = var.cluster_service_account_description -} - -# --------------------------------------------------------------------------------------------------------------------- -# ALLOW THE CUSTOM SERVICE ACCOUNT TO PULL IMAGES FROM THE GCR REPO -# --------------------------------------------------------------------------------------------------------------------- - -resource "google_storage_bucket_iam_member" "member" { - bucket = "artifacts.${var.project}.appspot.com" - role = "roles/storage.objectViewer" - member = "serviceAccount:${module.gke_service_account.email}" -} - -# --------------------------------------------------------------------------------------------------------------------- -# CREATE A NETWORK TO DEPLOY THE CLUSTER TO -# --------------------------------------------------------------------------------------------------------------------- - -module "vpc_network" { - source = "github.com/gruntwork-io/terraform-google-network.git//modules/vpc-network?ref=v0.2.1" - - name_prefix = "${var.cluster_name}-network-${random_string.suffix.result}" - project = var.project - region = var.region - - cidr_block = var.vpc_cidr_block - secondary_cidr_block = var.vpc_secondary_cidr_block -} - -# Use a random suffix to prevent overlap in network names -resource "random_string" "suffix" { - length = 4 - special = false - upper = false -} -``` - -The `main.tf` file is responsible for creating all of the GCP resources. After that let's create both the `outputs.tf` -and `variables.tf` files: - -**outputs.tf** - -```hcl -output "cluster_endpoint" { - description = "The IP address of the cluster master." - sensitive = true - value = module.gke_cluster.endpoint -} - -output "client_certificate" { - description = "Public certificate used by clients to authenticate to the cluster endpoint." - value = module.gke_cluster.client_certificate -} - -output "client_key" { - description = "Private key used by clients to authenticate to the cluster endpoint." - sensitive = true - value = module.gke_cluster.client_key -} - -output "cluster_ca_certificate" { - description = "The public certificate that is the root of trust for the cluster." - sensitive = true - value = module.gke_cluster.cluster_ca_certificate -} -``` - -**variables.tf** - -```hcl -# --------------------------------------------------------------------------------------------------------------------- -# REQUIRED PARAMETERS -# These variables are expected to be passed in by the operator. -# --------------------------------------------------------------------------------------------------------------------- - -variable "project" { - description = "The project ID where all resources will be launched." - type = string -} - -variable "location" { - description = "The location (region or zone) of the GKE cluster." - type = string -} - -variable "region" { - description = "The region for the network. If the cluster is regional, this must be the same region. Otherwise, it should be the region of the zone." - type = string -} - -# --------------------------------------------------------------------------------------------------------------------- -# OPTIONAL PARAMETERS -# These parameters have reasonable defaults. -# --------------------------------------------------------------------------------------------------------------------- - -variable "cluster_name" { - description = "The name of the Kubernetes cluster." - type = string - default = "example-private-cluster" -} - -variable "cluster_service_account_name" { - description = "The name of the custom service account used for the GKE cluster. This parameter is limited to a maximum of 28 characters." - type = string - default = "example-private-cluster-sa" -} - -variable "cluster_service_account_description" { - description = "A description of the custom service account used for the GKE cluster." - type = string - default = "Example GKE Cluster Service Account managed by Terraform" -} - -variable "master_ipv4_cidr_block" { - description = "The IP range in CIDR notation (size must be /28) to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network." - type = string - default = "10.5.0.0/28" -} - -# For the example, we recommend a /16 network for the VPC. Note that when changing the size of the network, -# you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. -variable "vpc_cidr_block" { - description = "The IP address range of the VPC in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." - type = string - default = "10.3.0.0/16" -} - -# For the example, we recommend a /16 network for the secondary range. Note that when changing the size of the network, -# you will have to adjust the 'cidr_subnetwork_width_delta' in the 'vpc_network' -module accordingly. -variable "vpc_secondary_cidr_block" { - description = "The IP address range of the VPC's secondary address range in CIDR notation. A prefix of /16 is recommended. Do not use a prefix higher than /27." - type = string - default = "10.4.0.0/16" -} -``` - -**Note:** Be sure to fill in any required variables that don't have a default value. - -Now we can use Terraform to create the resources: - -1. Run `terraform init`. -2. Run `terraform plan`. -3. If the plan looks good, run `terraform apply`. - -Terraform will begin to create the GCP resources. This process can take between 10-15 minutes. - -## Deploying the Dockerized App - -To deploy our Dockerized App on the GKE cluster, we can use the `kubectl` CLI tool to create a -[Kubernetes Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). A Pod is the smallest deployable -object in the Kubernetes object model and will contain only our `simple-web-app` Docker image. - -First, we must configure `kubectl` to use the newly created cluster: - -``` -$ gcloud container clusters get-credentials example-private-cluster --region europe-west3 -``` - -**Note**: Be sure to substitute `example-private-cluster` with the name of your GKE cluster and use either `--region` or `--zone` to specify the location. - -Use the `kubectl create` command to create a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) -named `simple-web-app-deploy` on your cluster: - -```bash -$ kubectl create deployment simple-web-app-deploy --image=gcr.io/${PROJECT_ID}/simple-web-app:v1 -``` - -To see the Pod created by the last command, you can run: - -```bash -$ kubectl get pods -w -``` - -The output should look similar to the following: - -```bash -NAME READY STATUS RESTARTS AGE -simple-web-app-deploy-7fb787c449-vgtf6 0/1 ContainerCreating 0 7s -``` - -Now we need to expose the app to the public internet. - -## Attaching a Load Balancer - -So far we have deployed the dockerized app, but it is not currently accessible from the public internet. This is because -we have not assigned an external IP address or load balancer to the Pod. We can easily achieve this, by running the -following command: - -```bash -$ kubectl expose deployment simple-web-app-deploy --type=LoadBalancer --port 80 --target-port 8080 -``` - -**Note:** GKE assigns the external IP address to the Service resource, not the Deployment. - -This will take approximately 1 minute to assign an external IP address to the service. You can follow the progress by running: - -```bash -$ kubectl get services -w -``` - -Once this is done, you can easily open the external IP address in your web browser: - -```bash -$ open http://34.89.172.43 -``` - -If the service has been exposed correctly and the DNS has propagated you should see 'Hello World!'. Congratulations! - -## Cleaning Up - -In order to save costs, we recommend you destroy any infrastructure you've created by following this guide. - -First, delete the Kubernetes Service: - -```bash -$ kubectl delete service simple-web-app-deploy -``` - -This will destroy the Load Balancer created during the previous step. - -Next, to destroy the GKE cluster, you can simply invoke the `terraform destroy` command: - -```bash -$ terraform destroy -``` - -**Note**: This is a destructive command that will forcibly terminate and destroy your GKE cluster! diff --git a/content/guides/deploying-a-production-grade-eks-cluster/index.md b/content/guides/deploying-a-production-grade-eks-cluster/index.md deleted file mode 100644 index ea03c244e..000000000 --- a/content/guides/deploying-a-production-grade-eks-cluster/index.md +++ /dev/null @@ -1,437 +0,0 @@ ---- -title: "Deploying a production-grade EKS cluster" -date: 2019-05-07 -tags: ["aws", "eks", "docker"] ---- - -This guide walks you through how to use Gruntwork's private [terraform-aws-eks -Terraform Module](https://github.com/gruntwork-io/terraform-aws-eks) available to subscribers to provision a -production grade EKS cluster. - -**NOTE: All the code in this guide use modules from Gruntwork's IaC Library. You must be a paying subscriber to have -access. See gruntwork.io for more info and feel free to reach out to us at info@gruntwork.io if you have questions.** - - -## Prerequisites - -This guide depends on `Terraform` and `kubergrunt`. You can also optionally install `kubectl` if -you would like explore the newly provisioned cluster. You can find instructions on how to install each tool below: - -- [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) -- [kubergrunt](https://github.com/gruntwork-io/kubergrunt#installation), minimum version: `0.3.9` -- (Optional) [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -Before you begin, be sure to set up your AWS credentials as environment variables so that all the commands -below can authenticate to the AWS account where you wish to deploy this example. You can refer to our blog post series -on AWS authentication ([A Comprehensive Guide to Authenticating to AWS on the Command -Line](https://blog.gruntwork.io/a-comprehensive-guide-to-authenticating-to-aws-on-the-command-line-63656a686799)) for -more information. - -Finally, before you begin, we recommend you familiarize yourself with EKS and Kubernetes. You can [refer to the module -documentation](https://github.com/gruntwork-io/terraform-aws-eks/#what-is-kubernetes) for an introduction. You can -also go to [the official AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html) for more -details on EKS, or [the official Kubernetes documentation](https://kubernetes.io/docs/home/) for more details on -Kubernetes. - - -## Workspace - -To follow along with the code samples, create a new directory that you will use as your workspace. This guide will -assume all your code is in the folder `production-grade-eks`: - -```bash -mkdir production-grade-eks -cd production-grade-eks -``` - - -## Overview - -Once all the tools are installed, we are ready to start deploying some infrastructure! - -This guide will include relevant code snippets where necessary. The complete code examples for this guide are available -in the [EKS cluster basic -example](https://github.com/gruntwork-io/terraform-aws-eks/tree/master/examples/eks-cluster-basic) in -the `terraform-aws-eks` repository. - -In order to setup our production grade EKS cluster, we need to: - -1. [Create a VPC for the EKS cluster](#create-a-vpc-for-the-eks-cluster) -1. [Deploy the EKS control plane](#deploy-the-eks-control-plane) -1. [Deploy an ASG for our worker nodes](#deploy-an-asg-for-our-worker-nodes) -1. [Create ConfigMap to authorize workers](#create-configmap-to-authorize-workers) -1. [(Optional) Explore the cluster using kubectl](#optional-explore-the-cluster-using-kubectl) - -Once the cluster is deployed, take a look at [Where to go from here](#where-to-go-from-here) for ideas on what to do -next. - - -## Create a VPC for the EKS cluster - -EKS relies on [Amazon Virtual Private Cloud](https://aws.amazon.com/vpc/) to provide a network topology to manage -communication across the nodes. For this guide, we will use the [vpc-app module in the module-vpc -repo](https://github.com/gruntwork-io/module-vpc/tree/master/modules/vpc-app) to provision a best practices VPC to house -the EKS cluster. - -This VPC will provision three subnet tiers: - -- **Public Subnets**: Resources in these subnets are directly addressable from the Internet. We will use this to - provision public-facing resources (typically just load balancers). -- **Private/App Subnets**: Resources in these subnets are NOT directly addressable from the Internet but they can make - outbound connections to the Internet through a NAT Gateway. We will use this to provision the Control Plane and Worker - Nodes. -- **Private/Persistence Subnets**: Resources in these subnets are neither directly addressable from the Internet nor - able to make outbound Internet connections. While we will not use this for our guide, typically this tier holds - databases, cache servers, and other stateful resources. - -EKS also relies on special tags on the VPC to know which VPC resources to use for deploying infrastructure. For example, -EKS needs to know to use the public subnet for the load balancers associated with a `Service` resource. We can use the -[eks-vpc-tags module in -terraform-aws-eks](https://github.com/gruntwork-io/terraform-aws-eks/tree/master/modules/eks-vpc-tags) for this purpose. - -The following Terraform code creates the VPC and tags them for use with a EKS cluster with the given name. We will -assume the cluster name is provided via an input variable, `var.eks_cluster_name`: - -```hcl -module "vpc" { - source = "git::git@github.com:gruntwork-io/module-vpc.git//modules/vpc-app?ref=v0.5.6" - - vpc_name = "${var.eks_cluster_name}-vpc" - aws_region = "us-east-1" - - # These tags are used by EKS to determine which AWS resources are associated - # with the cluster. This information will ultimately be used by the - # [amazon-vpc-cni-k8s plugin](https://github.com/aws/amazon-vpc-cni-k8s) to - # allocate ip addresses from the VPC to the Kubernetes pods. - - custom_tags = "${module.vpc_tags.vpc_eks_tags}" - public_subnet_custom_tags = "${module.vpc_tags.vpc_public_subnet_eks_tags}" - private_app_subnet_custom_tags = "${module.vpc_tags.vpc_private_app_subnet_eks_tags}" - private_persistence_subnet_custom_tags = "${module.vpc_tags.vpc_private_persistence_subnet_eks_tags}" - - # The IP address range of the VPC in CIDR notation. A prefix of /18 is - # recommended. Do not use a prefix higher than /27. - cidr_block = "10.0.0.0/18" - - # The number of NAT Gateways to launch for this VPC. For production VPCs, a - # NAT Gateway should be placed in each Availability Zone (so likely 3 total), - # whereas for non-prod VPCs, just one Availability Zone (and hence 1 NAT - # Gateway) will suffice. Warning: You must have at least this number of Elastic - # IP's to spare. The default AWS limit is 5 per region, but you can request - # more. - num_nat_gateways = 1 -} - -module "vpc_tags" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-vpc-tags?ref=v0.5.3" - - eks_cluster_name = "${var.eks_cluster_name}" -} -``` - -To apply this code, create a new file in the workspace named `main.tf` and copy paste the above code. Make sure to -configure include configurations for the AWS provider and select the `us-east-1` region. - -Be sure to define the input variable as well, in a different file `variables.tf`: - -```hcl -# Insert into variables.tf -variable "eks_cluster_name" {} -``` - -Once all your code is available, run `terraform apply` in the directory to provision your VPC. - - -## Deploy the EKS control plane - -Once we have a VPC where we can launch our EKS cluster into, we are ready to provision the Control Plane. The Control -Plane contains the resources and endpoint to run and access the Kubernetes master components within your VPC. The -underlying resources are entirely managed by AWS. The Control Plane acts as the brain of your cluster, managing the -scheduling and lifecycle of your deployed units (called [Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod/) -in Kubernetes). - -In this guide, we will use [eks-cluster-control-plane module in the terraform-aws-eks -repo](https://github.com/gruntwork-io/terraform-aws-eks/tree/master/modules/eks-cluster-control-plane) to provision our -Control Plane. This module provisions all the necessary resources and dependencies to get your Control Plane up and -running, including IAM roles, security groups, and additional configurations. - -The following Terraform code provisions our EKS cluster into the VPC we just created, with [control plane logging -enabled](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html): - -```hcl -module "eks_cluster" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-control-plane?ref=v0.5.3" - - cluster_name = "${var.eks_cluster_name}" - kubernetes_version = "1.12" - - vpc_id = "${module.vpc.vpc_id}" - vpc_master_subnet_ids = ["${module.vpc.public_subnet_ids}"] - - # We only enable the security audit logs here. You can also enable the - # scheduler and controller logs by passing in "scheduler" and - # "controllerManager" respectively. - enabled_cluster_log_types = ["api", "audit", "authenticator"] -} -``` - -To apply this code, copy the snippet into the `main.tf` file and run `terraform apply`. - - -## Deploy an ASG for our worker nodes - -While the Control Plane is critical for your EKS cluster, it is not sufficient to run any workloads on your cluster. -You also need to provision worker nodes that will run your actual container workloads. The worker nodes connect to the -Control Plane to receive instructions on what `Pods` to schedule on the node. - -Here, we will setup an ASG with a configurable number of nodes to manage our worker nodes for the EKS cluster. We will -use the [eks-cluster-workers module in the terraform-aws-eks -repo](https://github.com/gruntwork-io/terraform-aws-eks/tree/master/modules/eks-cluster-workers) to do this. Note that -you can spawn multiple groups by making additional calls to the module. Note that you will need to set a `name_prefix` -with a unique string on each additional group to avoid name collition. - -The following Terraform code will provision an ASG using `t3.small` instances that run the [EKS optimized -AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html): - -```hcl -data "aws_ami" "eks_ami" { - filter { - name = "name" - values = ["amazon-eks-node-1.12-v*"] - } - - most_recent = true - owners = ["602401143452"] # Amazon EKS AMI Account ID -} - -module "eks_workers" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers?ref=v0.5.3" - - cluster_name = "${module.eks_cluster.eks_cluster_name}" - eks_master_security_group_id = "${module.eks_cluster.eks_master_security_group_id}" - - vpc_id = "${module.vpc.vpc_id}" - vpc_worker_subnet_ids = ["${module.vpc.public_subnet_ids}"] - - # Make the max size twice the min size to allow for rolling out updates to the - # cluster without downtime. See - # https://github.com/gruntwork-io/terraform-aws-eks/tree/master/modules/eks-cluster-workers#how-do-i-roll-out-an-update-to-the-instances - cluster_min_size = "${var.worker_group_size}" - cluster_max_size = "${var.worker_group_size * 2}" - - # We use a t3.small so that we have enough container slots to run the supporting services - cluster_instance_type = "t3.small" - cluster_instance_ami = "${data.aws_ami.eks_ami.id}" - - # EKS currently documents this required userdata for EKS worker nodes to - # properly configure Kubernetes applications on the EC2 instance. See - # https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html for more - # info. - cluster_instance_user_data = <<-USERDATA - #!/bin/bash - /etc/eks/bootstrap.sh \ - --apiserver-endpoint '${module.eks_cluster.eks_cluster_endpoint}' \ - --b64-cluster-ca '${module.eks_cluster.eks_cluster_certificate_authority}' \ - '${module.eks_cluster.eks_cluster_name}' - USERDATA -} -``` - -To apply this code, copy the snippet into the `main.tf` file and run `terraform apply`. Don't forget to define the new -input variable as well, in `variables.tf`: - -```hcl -variable "worker_group_size" {} -``` - - -## Create ConfigMap to authorize workers - -Note that it is not sufficient to deploy your worker nodes to successfully register them to the Control Plane. This is -because we have not authorized the worker nodes to access the Kubernetes Control Plane yet. Kubernetes employs a [Role -Based Access Control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) system to manage -authorizations on the API. This system grants permissions to do various activities on the cluster once a user or entity -has authenticated to the cluster. - -In EKS, authentication to the Kubernetes API is handled using AWS IAM credentials. To access the Kubernetes API, you -attach your IAM credentials as the authorization bearer token. This is all managed using [AWS IAM authenticator -plugin](https://github.com/kubernetes-sigs/aws-iam-authenticator). You can read more about how all of this works in the -[plugin documentation](https://github.com/kubernetes-sigs/aws-iam-authenticator#how-does-it-work). For the purposes of -this guide, all you need to know is that you need to have IAM credentials to access our deployed cluster. - -Given that, we need some way to map IAM entities to Kubernetes RBAC entities so that we can grant various permissions to -the authenticated users. For example, worker nodes need the `system:node` `ClusterRole` to function, so we need to bind -this role to the IAM Role of the nodes in our ASG. We can use the [eks-k8s-role-mapping module in -terraform-aws-eks](https://github.com/gruntwork-io/terraform-aws-eks/tree/master/modules/eks-k8s-role-mapping) to manage -this mapping. This module creates a Kubernetes [ConfigMap -resource](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) to configure the AWS IAM -Authenticator plugin to map the provided IAM entities to the specified RBAC groups so that permissions are bound to -those IAM entities. - -However, before we apply this, we need to setup our Kubernetes connection so that Terraform can create the `ConfigMap` -on our cluster. - -### Configuring the Kubernetes provider - -The `eks-k8s-role-mapping` module uses the [kubernetes -provider](https://www.terraform.io/docs/providers/kubernetes/index.html) to manage the `ConfigMap` resource. In order to -run it, we need to make sure the provider connects to the EKS cluster we just deployed. However, Terraform [does not -allow us to configure providers using resources interpolations on the provider -block](https://github.com/hashicorp/terraform/issues/2430). This makes it difficult to depend provider configuration on -the clusters being provisioned. We can work around this limitation using data sources that interpolate the resources. -This is because provider blocks support data source interpolations, and data sources do not have the limitation that -they can not interpolate resources. - -The following code interpolates the cluster resources as [`template_file` data -sources](https://www.terraform.io/docs/providers/template/d/file.html), and passes them to the `kubernetes` provider -block. Additionally, we use the [`aws_eks_cluster_auth` data -source](https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html) to retrieve an authentication token -compatible with our cluster: - -```hcl -provider "kubernetes" { - load_config_file = false - host = "${data.template_file.kubernetes_cluster_endpoint.rendered}" - cluster_ca_certificate = "${base64decode(data.template_file.kubernetes_cluster_ca.rendered)}" - token = "${data.aws_eks_cluster_auth.kubernetes_token.token}" -} - -data "template_file" "kubernetes_cluster_endpoint" { - template = "${module.eks_cluster.eks_cluster_endpoint}" -} - -data "template_file" "kubernetes_cluster_ca" { - template = "${module.eks_cluster.eks_cluster_certificate_authority}" -} - -data "aws_eks_cluster_auth" "kubernetes_token" { - name = "${module.eks_cluster.eks_cluster_name}" -} -``` - -Include this in our `main.tf` file to setup the provider. - -### Map worker to system:node RBAC role - -Once our `kubernetes` provider configuration is setup, we are ready to provision our `ConfigMap` to bind the -`system:node` RBAC role to our worker node IAM roles: - -```hcl -module "eks_k8s_role_mapping" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-role-mapping?ref=v0.5.3" - - eks_worker_iam_role_arns = ["${module.eks_workers.eks_worker_iam_role_arn}"] -} -``` - -Copy this to our `main.tf` file and run `terraform apply` to provision the `ConfigMap`. - -### What about my IAM role/user? - -You might be wondering if you need to map your own IAM role (or user, depending on how you authenticated to AWS) to a -RBAC role to access the cluster. EKS defaults to mapping the IAM entity that provisioned the cluster to the -`system:masters` group, granting you superuser permissions on the cluster. In fact, if you did not have this, you would -be unable to create the `ConfigMap` in the first place as there is a chicken and egg situation: you need to bind -permissions to yourself by creating the `ConfigMap`, but you have no permissions to create it in the first place! - -This is why it is not necessary to explicitly grant our IAM entity permissions to access the EKS cluster. That said, it -is good practice to be explicit about the permissions you have granted. We recommend updating the previous block with -the following changes to explicitly state that you have admin access: - -```hcl -module "eks_k8s_role_mapping" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-role-mapping?ref=v0.5.3" - - eks_worker_iam_role_arns = ["${module.eks_workers.eks_worker_iam_role_arn}"] - - iam_role_to_rbac_group_mappings = "${ - map( - var.admin_iam_role_arn, list("system:masters"), - ) - }" -} - -variable "admin_iam_role_arn" {} -``` - -Note that we deliberately avoid using the [`aws_caller_identity` data -source](https://www.terraform.io/docs/providers/aws/d/caller_identity.html) to determine the authenticated user, because -this would cause Terraform to update the `ConfigMap` everytime a different IAM entity is used to apply the code. - - -## (Optional) Explore the cluster using kubectl - -At this point, you should have a working EKS cluster that you can use to deploy your apps. You can use `kubectl` to -explore and create resources on the cluster. `kubectl` is the official command line interface that you can use to -interact with the cluster. You can learn more about the various features of `kubectl` from [the official -documentation](https://kubernetes.io/docs/reference/kubectl/overview/). - -In order to use `kubectl`, we need to first set it up so that it can authenticate with our new EKS cluster. You can -learn more about how authentication works with EKS in our guide [How do I authenticate kubectl to the EKS -cluster?](https://github.com/gruntwork-io/terraform-aws-eks/blob/master/README.md#how-to-authenticate-kubectl). For now, -you can run the `kubergrunt eks configure` command to get up and running. - -We need the ARN of the provisioned EKS cluster to use the command, so we will modify our terraform code to output this. -Create a new file `outputs.tf` and insert the following snippet: - -```hcl -# In outputs.tf -output "eks_cluster_arn" { - value = "${module.eks_cluster.eks_cluster_arn}" -} -``` - -Make sure to run `terraform apply` so that the output is included in the Terraform state. - -Once the output is available, we can extract the cluster ARN and use `kubergrunt eks configure`: - -```bash -EKS_CLUSTER_ARN=$(terraform output eks_cluster_arn) -kubergrunt eks configure --eks-cluster-arn $EKS_CLUSTER_ARN -``` - -At the end of this command, your default kubeconfig file (located at `~/.kube/config`) will have a new context that -authenticates with EKS. This context will be set as the default so that subsequent `kubectl` calls will target your -deployed eks cluster. - -You can now use `kubectl`. To verify your setup, run `kubectl get nodes` to see the list of worker nodes that are -registered to the cluster. - - -## Summary - -Congratulations! You have successfully deployed a production grade EKS cluster using Gruntwork modules! In this guide -you learned: - -- Deploy a production VPC configuration using the `vpc-app` module in `module-vpc`. -- Tag the VPC for use with EKS using the `eks-vpc-tags` module in `terraform-aws-eks`. -- Deploy a EKS control plane into the VPC using the `eks-cluster-control-plane` module in `terraform-aws-eks`. -- Deploy and register worker nodes to the EKS control plane using the `eks-cluster-workers` module in - `terraform-aws-eks`. -- Bind permissions to the worker node IAM roles using the `eks-k8s-role-mapping` module in `terraform-aws-eks`. - - -## Where to go from here - -Now that you have a production grade EKS cluster, here are some ideas for next steps: - -- [Deploy an app on the cluster using - `kubectl`.](https://kubernetes.io/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro/) -- [Deploy Tiller (Helm Server) and supporting services to enhance the cluster - features.](https://github.com/gruntwork-io/terraform-aws-eks/tree/master/examples/eks-cluster-with-supporting-services#deploy-core-services) -- Try provisioning a Namespace with Tiller (Helm Server) to deploy your - apps using the [modules in terraform-kubernetes-helm](https://github.com/gruntwork-io/terraform-kubernetes-helm). - - -## Troubleshooting - -**When destroying `eks-cluster`, I get an error with destroying VPC related resources.** - -EKS relies on the [`amazon-vpc-cni-k8s`](https://github.com/aws/amazon-vpc-cni-k8s) plugin to allocate IP addresses to -the pods in the Kubernetes cluster. This plugin works by allocating secondary ENI devices to the underlying worker -instances. Depending on timing, this plugin could interfere with destroying the cluster in this example. Specifically, -terraform could shutdown the instances before the VPC CNI pod had a chance to cull the ENI devices. These devices are -managed outside of terraform, so if they linger, it could interfere with destroying the VPC. - -To workaround this limitation, you have to go into the console and delete the ENI associated with the VPC. Then, -retry the destroy call. diff --git a/content/guides/upgrading-to-tf12-tg19/index.md b/content/guides/upgrading-to-tf12-tg19/index.md deleted file mode 100644 index c9e8f881e..000000000 --- a/content/guides/upgrading-to-tf12-tg19/index.md +++ /dev/null @@ -1,468 +0,0 @@ ---- -title: "Upgrading your Reference Architecture Deployment to Terraform 0.12.x and Terragrunt 0.19.x" -date: 2019-09-25 -tags: ["terraform", "terragrunt"] ---- - -This guide walks you through the steps you should take when upgrading your Reference Architecture deployment to -Terraform 0.12.x and Terragrunt 0.19.x. - -## Background - -Terraform 0.12 (referred to as TF12) was released in May, 2019, and it included a few major changes: - -1. A shift from HCL to HCL2 as the main syntax. This included support for first-class expressions (i.e., using variables - and functions without having to wrap everything in `${...}`). -1. Some official providers have changed their syntax. For example, the `terraform_remote_state` data source now requires - an `outputs` attribute to index into the outputs exported by the state. -1. More strict rules around what can go in a `.tfvars` file. In particular, any variable defined in a `.tfvars` file - that does not match a corresponding `variable` definition in your `.tf` files produces an error. -1. Math behave differently. In particular, many functions that previously returned ints now return floats. -1. Bools behave differently, where they are no longer auto type casted to ints. - -To be compatible with Terraform 0.12 changes, Terragrunt 0.19 (referred to as TG19) was released. Before version 0.19.0, -Terragrunt had you define its configuration in a `terragrunt = { ... }` variable in a `terraform.tfvars` file, but due -to item (1) this no longer works with Terraform 0.12 and newer. - -To support this: - -1. Terragrunt now defines and uses its own configuration file format: `terragrunt.hcl`. As a result of this, all the - attributes and blocks that used to be defined under the `terragrunt` input in the `terraform.tfvars` file is now - defined at the top level. -1. The `terragrunt.hcl` file: - - Uses HCL2 syntax. - - Is now more strict about the difference between blocks and attributes. - - Supports all Terraform built-in functions in its config file. - - Defines inputs to modules are using a new attribute (`inputs`). - -This means that both the modules and the live config need to be updated in order to support TF12. - -## Migration Guide - -The following sections outline the steps you need to take in order to migrate from Terraform <= 0.11.X and Terragrunt <= -v0.18.x to Terraform 0.12.x and newer and Terragrunt 0.19.x and newer: - -1. [Upgrade modules for compatibility with Terraform 0.12.x](#upgrade-modules-for-compatibility-with-terraform-012x) -1. [Upgrade live config for compatibility with Terragrunt - 0.19.x](#upgrade-live-config-for-compatibility-with-terragrunt-019x) - -### Upgrade modules for compatibility with Terraform 0.12.x - -Terraform 0.12.x introduces many syntactic updates to make working with the language better, such as first-class -expressions, `for` and `for_each`, a more powerful type system, better error messages, [and -more](https://www.hashicorp.com/blog/announcing-terraform-0-12). However, there a few backwards incompatible changes -that prevent using your TF11 and older modules directly with TF12. For the safest and robust upgrade path, we recommend -going through the process of updating all your modules to use the new HCL2 syntax using the `terraform 0.12upgrade` tool -that ships with TF12. - -Before starting, read through the [Terraform 0.12 upgrade guide](https://www.terraform.io/upgrade-guides/0-12.html) so -you can learn about the types of changes you’ll have to make. Familiarity of the changes help with detecting errors and -gotchas that the upgrade tool occasionally misses. - -Here is the rough process: - -1. [Install and setup Terraform 0.12](#install-and-setup-terraform-0-12) -1. [Upgrade each module for 0.12 compatibility](#upgrade-each-module-for-0-12-compatibility) - -#### Install and setup Terraform 0.12 - -To use Terraform 0.12, you will need to have the binary available on your machine. However, you don't want to blindly -replace your existing `terraform` binary to the newer version, because that would mean you wouldn't be able to use the -0.11 binary for your old code when you need to make a change to the existing infrastructure. - -Here are two ways for you to support having 0.11 and 0.12 simultaneously on your machine: - -- [Using homebrew to manage multiple terraform versions](#using-homebrew-to-manage-multiple-terraform-versions) -- [Using tfenv to manage multiple terraform versions](#using-tfenv-to-manage-multiple-terraform-versions) -- [Manually managing multiple terraform versions](#manually-managing-multiple-terraform-versions) - -##### Using homebrew to manage multiple terraform versions - -If you are on a Mac, you have the option of managing multiple terraform versions using homebrew. You can follow the -instructions published in our blog post [Installing Multiple Versions of Terraform with -Homebrew](https://blog.gruntwork.io/installing-multiple-versions-of-terraform-with-homebrew-899f6d124ff9) to setup -Homebrew to manage multiple versions of terraform, which you can then use `brew switch` to switch between them. - -##### Using tfenv to manage multiple terraform versions - -If you are on any Unix based system, you have the option of relying on [tfenv](https://github.com/tfutils/tfenv). -`tfenv` is a tool inspired by `rbenv` to provide a CLI for managing multiple versions of terraform. Once you have it -installed, `tfenv` will manage the binaries in your home directory and create symlinks that allow it to invoke the -currently selected Terraform version. - -For example, here is an example of using `tfenv` to install and manage both TF11 and TF12: - -First, you need to install the versions that you wish to use: - -```bash -$ tfenv install 0.11.14 -$ tfenv install 0.12.6 -``` - -Once the versions are installed, you can switch between the two using the `use` command: - -```bash -$ tfenv use 0.11.14 -$ terraform version -# Terraform v0.11.14 - -$ tfenv use 0.12.6 -$ terraform version -# Terraform v0.12.6 -``` - -Note that it can get confusing which version of terraform is currently in use, especially if you frequently switch -between the two. `tfenv` tracks the current version in a file available in `$TFENV_ROOT/version`. You can take advantage -of this fact to setup your shell prompt to display the currently selected Terraform version. - -##### Manually managing multiple terraform versions - -If you are on a non-Unix based machine (such that you can't use `tfenv`), or if you have restrictions on software that -can be installed on your machine, then your only option is to manage the Terraform versions manually. In this method, -you download each version of Terraform under a different alias (e.g `terraform0.12` for TF12 and `terraform0.11` for -TF11), and then have it available in a common folder that you can track and find. Then, whenever you need to use a -particular version, you copy the binary of the version you want to use into a location available in your `PATH` under -the name `terraform`. - -For example, on a Unix based machine, you can store all the Terraform binaries in the folder `$HOME/.terraform`: - -```bash -.terraform -β”œβ”€β”€ 0.11.14 -β”‚Β Β  └── terraform0.11 -└── 0.12.6 - Β Β  └── terraform0.12 -``` - -Then, every time you want to switch versions, you can copy the binary to `/usr/local/bin`: - -```bash -cp ~/.terraform/0.11.14/terraform0.11 /usr/local/bin/terraform -terraform version -# Terraform v0.11.14 -cp ~/.terraform/0.12.6/terraform0.12 /usr/local/bin/terraform -terraform version -# Terraform v0.12.6 -``` - -You can find the available binaries for each terraform version [here](https://releases.hashicorp.com/terraform/). - -#### Upgrade each module for 0.12 compatibility - -To upgrade each of your modules to HCL2 syntax, you can run the `terraform 0.12upgrade` command using version 0.12.x of -the `terraform` binary. You will need to do the following for each of your Terraform modules (e.g in your -`infrastructure-modules` repo). To make the commands concrete, the rest of this step by step guide will assume you are -updating a module in `infrastructure-modules/networking/vpc-app` (such as [this -one](https://github.com/gruntwork-io/infrastructure-modules-multi-account-acme/tree/3158e4ebe83156f3917f8d168a6985678e30acfa/networking/vpc-app)): - -**NOTE**: We recommend updating and testing one module at a time, starting with the modules that are at the root of your -dependency graph. Terraform 0.12 is not able to read the state of Terraform 0.11, so you will need to start with the -root of your dependencies before you can move on to the modules that depend on it. **Always test in an environment where -it is very easy to "start over", such as a sandbox or dev environment!** - -1. Go into the folder with the Terraform module: `cd infrastructure-modules/networking/vpc-app` -1. Search your code for any references to Gruntwork modules. Update each module to the version number specified in the - right-most column of the [Gruntwork module compatibility - table](https://docs.gruntwork.io/reference/version-compatibility/) (or newer). For example, - our example `infrastructure-modules/networking/vpc-app` [makes use of - module-vpc](https://github.com/gruntwork-io/infrastructure-modules-multi-account-acme/blob/3158e4ebe83156f3917f8d168a6985678e30acfa/networking/vpc-app/main.tf#L38), - which you’ll want to upgrade to `v0.6.0` (or newer). -1. Make sure to check the release notes for the module to see any other changes you need to make. E.g., Here are [the - release notes for module-vpc, v0.6.0](https://github.com/gruntwork-io/module-vpc/releases/tag/v0.6.0). -1. If you have a `required_version` constraint in your code (e.g like [this one in the - example](https://github.com/gruntwork-io/infrastructure-modules-multi-account-acme/blob/3158e4ebe83156f3917f8d168a6985678e30acfa/networking/vpc-app/main.tf#L28)), - for the time being, remove it so the Terraform auto-upgrade step doesn’t complain. We’ll bring it back later. -1. Run `terraform init -backend=false`. -1. Run `terraform 0.12upgrade`. -1. Delete or edit the generated `versions.tf` file. We recommend specifying a strict `required_version` constraint in - your Terraform code: e.g., `required_version = "= 0.12.4"`. -1. There are a few gotchas you have to handle manually. Here are the list of changes and mistakes the upgrade tool has - made which we detected in the process of upgrading our modules: - - - Duplicated comment blocks. Sometimes, the upgrade tool will duplicate comment blocks in the middle of the code. - This was most common when there is a comment on a resource attribute. - - Reorganized comment blocks. The tool sometimes moves the comment block somewhere you don't expect. This is - especially problematic for comments within blocks (e.g., in the middle of a vars = { … }) block. - - `bool` no longer converts to `1` and `0` automatically. If you had the pattern of doing `count = bool`, this no - longer works. You have to explicitly add a conditional: `count = bool ? 1 : 0`. - - In tf11 and under, sometimes you had to do `attr = ["${list}"]` when passing in a `list` to an attribute. In tf12, - this will no longer automatically flatten the list for you. Instead, you need to change to `attr = list`. - - `map` requires all the values to have the same type, and `list` requires all the elements to have the same type. - So `map(any)` does not mean you can mix types (e.g `{ x = "foo", bar = 1 }`), but rather β€œany type as - long as that type is consistent for all values”. If you have a need for map with any value, use `any` by itself. - Same with `list`. - - Object type requires all keys to be present. You can not have optional attributes when using the `object` type. - - Math works differently. In particular, before division returned ints but now they return floats. You have to use - `floor` to get the same behavior. E.g if you had `(5 / 2)` before, you now need to do `floor(5/2)`. - -1. (Optional) Go through any variable declarations and add appropriate [type - constraints](https://www.terraform.io/docs/configuration/types.html). We recommend adding explicit type constraints - on your variables. They not only make your code more robust to typos, type inference bugs, and type ambiguities, but - also act as good documentation as well. -1. If you have terratest workflows for your modules, upgrade terratest to v0.16.x (or newer). You can see all available - terratest versions, including detailed migration guides for each version in [the releases - page](https://github.com/gruntwork-io/terratest/releases). -1. Test the changes locally using `--terragrunt-source` (see the docs on [working - locally](https://github.com/gruntwork-io/terragrunt#working-locally)). Note that you will need to upgrade the - corresponding live config to [Terragrunt 0.19.x first](#upgrade-live-config-for-compatibility-with-terragrunt-019x). -1. Once you are satisfied with the changes, create a branch, push to the repository, open a PR, merge, and release. - -### Upgrade live config for compatibility with Terragrunt 0.19.x - -Terragrunt 0.19.x introduces a new file format for the live config. This format has many advantages, including -supporting all built in functions available to Terraform. However, this means that you will need to convert all your -terragrunt `terraform.tfvars` files to the new syntax. - -Here is the rough process: - -1. [Install and setup Terragrunt 0.19](#install-and-setup-terragrunt-0-19) -1. [Migrate terraform.tfvars to terragrunt.hcl](#migrate-terraform-tfvars-to-terragrunt-hcl) -1. [Switch common tfvars files to use yaml](#switch-common-tfvars-files-to-use-yaml) - -#### Install and setup Terragrunt 0.19 - -You will also need to setup the Terragrunt 0.19 binary in a [similar fashion to setting up Terraform -0.12](#install-and-setup-terraform-0-12). You can use the same instructions available for managing Terraform 0.12 as for -managing Terragrunt 0.19, except for `tfenv` which only supports Terraform - -- [Using homebrew to manage multiple terragrunt versions](#using-homebrew-to-manage-multiple-terraform-versions) -- [Manually managing multiple terragrunt versions](#manually-managing-multiple-terragrunt-versions) - -##### Using homebrew to manage multiple terragrunt versions - -Like `terraform`, you can use Homebrew to manage multiple `terraform` versions, using the same method described above in -[Using homebrew to manage multiple terraform versions](#using-homebrew-to-manage-multiple-terraform-versions). To use -the method for terragrunt, replace the references for `terraform` with `terragrunt`. For example, in the first step when -searching for the homebrew commit that introduces the Terragrunt version for `0.18.7`, you would run: - -``` -$ git log master -- Formula/terragrunt.rb -``` - -instead of the equivalent one for Terraform. - -##### Manually managing multiple terragrunt versions - -Like `terraform`, you can use the method described above in [Manually managing multiple terraform -versions](#manually-managing-multiple-terraform-versions) to manage multiple versions of Terragrunt. - -You can find the available binaries for each terragrunt version -[here](https://github.com/gruntwork-io/terragrunt/releases). - -#### Migrate terraform.tfvars to terragrunt.hcl - -Once you have terragrunt 0.19 available on your machine, you will need to migrate your `terraform.tfvars` files to -`terragrunt.hcl` files before you can start to use the new version. You will need to follow the steps outlined in the -[Terragrunt 0.19.x migration -guide](https://github.com/gruntwork-io/terragrunt/blob/master/_docs/migration_guides/upgrading_to_terragrunt_0.19.x.md) -for each of your live config (e.g in your `infrastructure-live` repo). - -You should do this in parallel with each module upgrade. For example, if you were upgrading the module -`infrastructure-modules/networking/vpc-app`, you should upgrade the live config that deploys that module to a pre-prod -environment to test the changes you are making to that module (e.g -[`infrastructure-live/dev/us-east-1/dev/vpc`](https://github.com/gruntwork-io/infrastructure-live-multi-account-acme/tree/dd6dce7f737f8c1bd32466b69e905b2bdd25db80/dev/us-east-1/dev/vpc)). -The rough process should be: - -1. Upgrade the module to TF12 syntax following the steps [listed above](#upgrade-each-module-for-0-12-compatibility). -1. Update the live config for a pre-prod environment that deploys the module. -1. Run `terragrunt plan` to verify the changes. Use `--terragrunt-source` so you can point to the updated module (see - the docs on [working locally](https://github.com/gruntwork-io/terragrunt#working-locally)). Carefully review to make - sure there are no disruptive changes. If you had been keeping the modules up to date with Gruntwork releases, there - should be minimal to 0 changes to the underlying resources. -1. Once you are satisfied with the changes, run `terragrunt apply` to deploy the changes. -1. Run smoke tests on your infrastructure to verify that the changes applied cleanly. -1. Once you are satisfied with the deployment, open a PR, review, merge the changes, and issue a new release for the - changes in `infrastructure-modules`. -1. Update the `ref` tag in your live config to point to the released version of `infrastructure-modules` in the pre-prod - environment. Then, open a PR, review, and merge the changes. - -After this, you have the option of either propagating the changes across all your environments, or moving on to the next -module before promoting the changes. Depending on your infrastructure setup, you may prefer one approach over the other: - -- Immediately promoting the changes ensure there is minimal drift between your environments. This means that you can - quickly test changes in your pre-prod environments to deal with issues in your prod environment. -- Immediately promoting the changes can drive towards immediately upgrading the rest of the modules, as opposed to - delaying the problem. -- Delaying promotion ensures that you can deal with problems in your existing infrastructure on a stable foundation. -- Delaying promotion allows you to minimize disruption from environments being in a mixed version state, that might - prevent you from addressing issues in downstream modules. - -#### Switch common tfvars files to use yaml - -Terragrunt supports ingesting common `tfvars` files to pass global variables to your terraform modules through the use -of [`required_var_files` and `optional_var_files` in the -config](https://github.com/gruntwork-io/terragrunt#required-and-optional-var-files). However, using `tfvars` files to -set variables that are not available in your modules is now deprecated, and will become [an error starting with Terraform -0.13.x](https://github.com/hashicorp/terraform/issues/19424#issuecomment-472186386). As such, some configurations may -start to get a loud warning that they are setting variables that are not configured in the underlying modules. - -Terragrunt works around this limitation through the use of environment variables to pass in the inputs. This means that -input variables defined in the `inputs` attribute of a `terragrunt.hcl` config are not restricted by this limitation. -Additionally, Terragrunt 0.19 and newer support all built in functions of Terraform in the config. You can combine these -two features to still make use of common variables. - -If you have `tfvars` files that set variables that are not defined in all modules that use them, you can follow the -following steps to workaround the new warnings in TF12: - -- Replace all common `tfvars` files to `yaml` syntax. For example, if you had a file `account.tfvars` with the content: - - ```hcl - aws_account_id = 1111111111 - terraform_state_s3_bucket = "acme-multi-account-dev-terraform-state" - terraform_state_aws_region = "us-east-1" - ``` - - you should replace this with a file `account.yaml` with the content: - - ```yaml - aws_account_id: 1111111111 - terraform_state_s3_bucket: "acme-multi-account-dev-terraform-state" - terraform_state_aws_region: "us-east-1" - ``` - -- At the root of your repo, create yaml file named `empty.yaml`. This is used to inject an empty object when the common - yaml vars files can not be found in the directory tree. It should have the content: - - ```yaml - {} - ``` - -- Remove references to the `tfvars` files in the root `terragrunt.hcl` config. -- Source and merge in the common vars into the `inputs` attribute of the root `terragrunt.hcl` config. For example, if - you had an `account.yaml` file that needed to be passed in to your modules: - ```hcl - inputs = merge( - yamldecode( - file("${get_terragrunt_dir()}/${find_in_parent_folders("account.yaml", "${path_relative_from_include()}/empty.yaml")}"), - ), - { - # additional vars - }, - ) - ``` - -## Acme change sets - -We applied the above procedure to our Acme Reference Architecture repos. You can see the pull requests for each repo -here: - -- [infrastructure-live-multi-account-acme](https://github.com/gruntwork-io/infrastructure-live-multi-account-acme/pull/21) -- [infrastructure-modules-multi-account-acme](https://github.com/gruntwork-io/infrastructure-modules-multi-account-acme/pull/23) -- [infrastructure-live-acme](https://github.com/gruntwork-io/infrastructure-live-acme/pull/18) -- [infrastructure-modules-acme](https://github.com/gruntwork-io/infrastructure-modules-acme/pull/23) - -## Known issues - -- When upgrading `iam-groups`, you may run into an error where you will not be able to `plan`, `apply`, or `destroy` the - resources. You will get an error message similar to below: - - ```bash - Error: Invalid count argument - on .terraform/modules/iam_groups/modules/iam-groups/main.tf line 230, in resource "aws_iam_group" "cross_account_access_groups": - 230: count = length(data.template_file.cross_account_group_names.*.rendered) - ``` - - This is due to a bug in terraform where it does not realize that the data source can be calculated at `plan` time. You - can read more about the issue on [the bug ticket here](https://github.com/hashicorp/terraform/issues/21450). - This issue is fixed in `module-security` - [v0.18.1](https://github.com/gruntwork-io/module-security/releases/tag/v0.18.1). If you encounter this issue, upgrade - `module-security` to `v0.18.1`. Note that `v0.18.0` has a backwards incompatible change and will require code changes - if you are upgrading from a version less than `v0.18.0`. You can see the necessary changes you need to make in [the - release notes](https://github.com/gruntwork-io/module-security/releases/tag/v0.18.0). - -- When upgrading `iam-groups` to `v0.18.1` from a version older than `v0.17.0`, you may run into an issue different from - the previous one where you will not be able to run `plan`, `apply`, or `destroy`. The error message in this case will - look similar to below: - - ```bash - ... - - Error: Unsupported attribute - on .terraform/modules/iam_groups/modules/iam-groups/outputs.tf line 38, in output "ssh_grunt_sudo_users_group_arns": - 38: value = [for _, group in aws_iam_group.ssh_iam_sudo_users : group.arn] - - This value does not have any attributes. - - ... - ``` - - This is due to a bug in terraform where it can not properly handle the migration of state representation from a single - resource to a resource using `for_each`. You can read more about the issue on [the bug ticket - here](https://github.com/hashicorp/terraform/issues/22375). To resolve this, you need to do state surgery to update - the representation. This will be done by first removing the original resource from the state, and then importing it. - - To resolve this issue, follow the following steps: - -1. Make sure the module code is updated to `module-security` v0.18.1 and you are using terraform with version at - least 0.12.6. -1. Run `terragrunt state list` to show all the state resources. You should see something like below: - - ```bash - data.aws_caller_identity.current - module.iam_groups.aws_iam_group.billing - module.iam_groups.aws_iam_group.developers - module.iam_groups.aws_iam_group.full_access - module.iam_groups.aws_iam_group.iam_user_self_mgmt - module.iam_groups.aws_iam_group.read_only - module.iam_groups.aws_iam_group.ssh_iam_sudo_users - module.iam_groups.aws_iam_group.ssh_iam_users - module.iam_groups.aws_iam_group.use_existing_iam_roles - module.iam_groups.aws_iam_group_policy.billing - module.iam_groups.aws_iam_group_policy.developers - module.iam_groups.aws_iam_group_policy.developers_personal_s3_bucket - module.iam_groups.aws_iam_group_policy.full_access - module.iam_groups.aws_iam_group_policy.read_only - module.iam_groups.aws_iam_group_policy.use_existing_iam_roles - module.iam_groups.aws_iam_group_policy_attachment.billing_iam_user_self_mgmt - module.iam_groups.aws_iam_group_policy_attachment.developers_iam_user_self_mgmt - module.iam_groups.aws_iam_group_policy_attachment.full_access_iam_user_self_mgmt - module.iam_groups.aws_iam_group_policy_attachment.iam_user_self_mgmt - module.iam_groups.aws_iam_group_policy_attachment.read_only_iam_user_self_mgmt - module.iam_groups.aws_iam_policy.iam_user_self_mgmt - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.allow_access_from_other_accounts - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.billing - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.developers - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.developers_s3_bucket - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.full_access - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.houston_cli_permissions - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.iam_user_self_mgmt - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.read_only - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.ssh_grunt_houston_permissions - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.ssh_grunt_permissions - module.iam_groups.module.iam_policies.data.aws_iam_policy_document.use_existing_iam_roles - ``` - - The two objects to look for in particular are: - - ```bash - module.iam_groups.aws_iam_group.ssh_iam_sudo_users - module.iam_groups.aws_iam_group.ssh_iam_users - ``` - - If you have those entries, then this method should resolve the problem for you. - -1. Inspect the state for one of the problem resources. Run `terragrunt state show module.iam_groups.aws_iam_group.ssh_iam_sudo_users`. This should look like below: - - ```hcl - # module.iam_groups.aws_iam_group.ssh_iam_sudo_users: - resource "aws_iam_group" "ssh_iam_sudo_users" { - arn = "arn:aws:iam::0000000000:group/ssh-grunt-sudo-users" - id = "ssh-grunt-sudo-users" - name = "ssh-grunt-sudo-users" - path = "/" - unique_id = "00000000000000" - } - ``` - - Note the value of the `id` field, as we will need it later. - -1. Remove the state object so that it can be reconstructed in the new format. This operation doesn’t delete the - object, but does remove it from the state so that terraform loses track of it. Don’t worry: we will restore it in - step 4. The command to do this is: `terragrunt state rm module.iam_groups.aws_iam_group.ssh_iam_sudo_users`. - -1. Import the object into the state as the new format. Run the following command, replacing `ID_OF_IAM_GROUP` with - the `id` field you recorded in step 2: `terragrunt import "module.iam_groups.aws_iam_group.ssh_iam_sudo_users[\"ID_OF_IAM_GROUP\"]" ID_OF_IAM_GROUP` - -1. Repeat steps 2-4 for `module.iam_groups.aws_iam_group.ssh_iam_users`. - -At the end of this, you should be able to run `terragrunt plan` cleanly. diff --git a/content/introduction/philosophy/index.md b/content/introduction/philosophy/index.md deleted file mode 100644 index a05161e37..000000000 --- a/content/introduction/philosophy/index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: "Gruntwork Philosophy" -date: 2019-06-06 ---- - -At Gruntwork, we strive to accelerate the deployment of production grade infrastructure by providing a library of -stable, reusable, and battle tested infrastructure as code organized into a series of [packages](#what-is-a-package) with -[modules](#what-is-a-module). Each package represents a particular set of infrastructure that is componentized into -smaller pieces represented by the modules within the package. By doing so, we have built a composable library that can -be combined into building out everything from simple single service deployments to complicated microservice setups so -that your infrastructure can grow with your business needs. Every package we provide is built with the [production grade -infrastruture checklist](#production-grade-infrastructure-checklist) in mind, ensuring that the services you deploy are -resilient, fault tolerant, and scalable. - - -## What is a Package? - -A Package is a reusable, tested, documented, configurable, best-practices definition of a single piece of Infrastructure -(e.g., Docker cluster, VPC, Jenkins, Consul), written using a combination of [Terraform](https://www.terraform.io/), Go, -and Bash. A package contains a set of automated tests, documentation, and examples that have been proven in production, -providing the underlying infrastructure for [Gruntwork's customers](https://www.gruntwork.io/customers). - -Instead of figuring out the details of how to run a piece of infrastructure from scratch, you can reuse existing code -that has been proven in production. And instead of maintaining all that infrastructure code yourself, you can leverage -the work of the community to pick up infrastructure improvements through a version number bump. - - -## What is a Module? - -Each Infrastructure Package consists of one or more orthogonal Modules that handle some specific aspect of that -Infrastructure Package's functionality. Breaking the code up into multiple modules makes it easier to reuse and -compose to handle many different use cases. Although Packages are designed to provide an end to end solution to manage -the relevant infrastructure by combining the Modules defined in the Package, Modules can be used independently for -specific functionality that you need in your infrastructure code. - - -## Production Grade Infrastructure Checklist - -At Gruntwork, we have learned over the years that it is not enough to just get the services up and running in a publicly -accessible space to call your application "production-ready" (see [our lessons -learned](https://blog.gruntwork.io/5-lessons-learned-from-writing-over-300-000-lines-of-infrastructure-code-36ba7fadeac1) -from writing over 300k lines of infrastructure code). There are many more things to consider, and oftentimes many of -these considerations are missing in the deployment plan of applications. These topics come up as afterthoughts, and are -learned the hard way after the fact. That is why we codified all of them into a checklist that can be used as a -reference to help ensure that they are considered before your application goes to production, and conscious decisions -are made to neglect particular components if needed, as opposed to accidentally omitting them from consideration. - - - -| Task | Description | Example tools | -|--------------------|-------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| -| Install | Install the software binaries and all dependencies. | Bash, Chef, Ansible, Puppet | -| Configure | Configure the software at runtime. Includes port settings, TLS certs, service discovery, leaders, followers, replication, etc. | Bash, Chef, Ansible, Puppet | -| Provision | Provision the infrastructure. Includes EC2 instances, load balancers, network topology, security gr oups, IAM permissions, etc. | Terraform, CloudFormation | -| Deploy | Deploy the service on top of the infrastructure. Roll out updates with no downtime. Includes blue-green, rolling, and canary deployments. | Scripts, Orchestration tools (ECS, k8s, Nomad) | -| High availability | Withstand outages of individual processes, EC2 instances, services, Availability Zones, and regions. | Multi AZ, multi-region, replication, ASGs, ELBs | -| Scalability | Scale up and down in response to load. Scale horizontally (more servers) and/or vertically (bigger servers). | ASGs, replication, sharding, caching, divide and conquer | -| Performance | Optimize CPU, memory, disk, network, GPU, and usage. Includes query tuning, benchmarking, load testing, and profiling. | Dynatrace, valgrind, VisualVM, ab, Jmeter | -| Networking | Configure static and dynamic IPs, ports, service discovery, firewalls, DNS, SSH access, and VPN access. | EIPs, ENIs, VPCs, NACLs, SGs, Route 53, OpenVPN | -| Security | Encryption in transit (TLS) and on disk, authentication, authorization, secrets management, server hardening. | ACM, EBS Volumes, Cognito, Vault, CIS | -| Metrics | Availability metrics, business metrics, app metrics, server metrics, events, observability, tracing, and alerting. | CloudWatch, DataDog, New Relic, Honeycomb | -| Logs | Rotate logs on disk. Aggregate log data to a central location. | CloudWatch logs, ELK, Sumo Logic, Papertrail | -| Backup and Restore | Make backups of DBs, caches, and other data on a scheduled basis. Replicate to separate region/account. | RDS, ElastiCache, ec2-snapper, Lambda | -| Cost optimization | Pick proper instance types, use spot and reserved instances, use auto scaling, and nuke unused resources. | ASGs, spot instances, reserved instances | -| Documentation | Document your code, architecture, and practices. Create playbooks to respond to incidents. | READMEs, wikis, Slack | -| Tests | Write automated tests for your infrastructure code. Run tests after every commit and nightly. | Terratest | diff --git a/content/introduction/what-is-gruntwork/index.md b/content/introduction/what-is-gruntwork/index.md deleted file mode 100644 index 54647d22d..000000000 --- a/content/introduction/what-is-gruntwork/index.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: "What is Gruntwork?" -date: 2019-06-06 ---- - -Our goal is to make world-class infrastructure and DevOps practices accessible to everyone, and not just the small, -elite group of companies that can afford a huge team of infrastructure engineers and SREs. When writing became -accessible to all of humanity, rather than just a tiny β€œpriest” class, it had a profound effect on the world. We believe -something similar will happen when it’s an order of magnitude easier to build, run, and maintain software. - - -## How we make world-class infrastructure and DevOps accessible - -Currently, we offer the following products to our customers: - -[Infrastructure as Code Library](#infrastructure-as-code-library) -[Reference Architecture](#reference-architecture) -[Gruntwork Houston](#gruntwork-houston) -[Dedicated Support](#dedicated-support) - - -### Infrastructure as Code Library - -A collection of over 350k lines of reusable, battle-tested, production-ready infrastructure code for AWS and GCP. -Instead of reinventing the wheel and building all of your infrastructure from scratch, you can build on top of a mature -Infrastructure as Code (IaC) Library, which has been built by a team of DevOps experts, and proven in production with -hundreds of customers. - -A key feature of all our modules is our dedication to production-ready code. You can read more about our philosophy -behind our modules in our article on [our philosophy](/introduction/philosophy). - -See [our product page](https://gruntwork.io/infrastructure-as-code-library/) for more information on the features of our -IaC library. Also take a look at our [module -catalog](/introduction/library-catalog) for a list of supported components. - -### Reference Architecture - -An opinionated, battle tested, best-practices end-to-end tech stack built on top of the Infrastructure as Code Library. -Instead of spending months assembling everything from scratch, we can deploy the Reference Architecture into your AWS -account in about one day and give you 100% of the code, which includes just about everything a typical company needs: -server clusters, load balancers, databases, caches, network topology, monitoring, alerting, CI/CD, secrets management, -VPN, and more. - -See [our product page](https://gruntwork.io/reference-architecture/) for more information on what we deploy to your -account. - -### Gruntwork Houston - -Houston is Mission Control for your entire infrastructure. On the surface, it’s a simple web interface that your Dev -team can use to deploy and manage infrastructure. Under the hood, the web interface and how it manages infrastructure -are completely defined and controlled by your Ops team using infrastructure as code. It’s the best of both worlds: your -Dev team gets an easy-to-use, self-service experience, while your Ops team still has all the power and control they need -to ensure reliability, security, and compliance. - -See [our product page](https://gruntwork.io/houston/) for a demo of how Houston can optimize your workflows. - -### Dedicated Support - -With DevOps, it seems like there are a thousand little details you have to get just rightβ€”DNS, TLS, VPCs, secrets -management, monitoring, alerting, CI, CDβ€”and never enough time to learn them all. With Gruntwork Dedicated Support, you -get to work with a team of DevOps experts who can help you set up your infrastructure, design highly available and -scalable systems, automate builds and deployments, troubleshoot issues, and avoid gotchas and pitfalls. - -See [our product page](https://gruntwork.io/support/) for the different ways we can help. - - -## Learn more - -You can learn more about Gruntwork on [our company website](https://gruntwork.io). Or read more about our origin story -in our post, [How we built a distributed, self-funded, family-friendly, profitable -startup?](https://blog.gruntwork.io/how-we-built-a-distributed-self-funded-family-friendly-profitable-startup-93635feb5ace) diff --git a/content/reference/gcp-reference-architecture/index.md b/content/reference/gcp-reference-architecture/index.md deleted file mode 100644 index 0ae64f648..000000000 --- a/content/reference/gcp-reference-architecture/index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "GCP Reference Architecture" -date: 2019-08-30 ---- - -We are building out an end-to-end, production-grade, secure, and developer-friendly Reference Architecture for GCP. Just as with our [AWS -Reference Architecture](https://www.gruntwork.io/reference-architecture/), the GCP Reference Architecture includes just about everything a -typical company needs: VPCs, Kubernetes (GKE), load balancers, databases, caches, static content, CI / CD, monitoring, alerting, user and -permissions management, VPN, SSH, and so on. We deploy the Reference Architecture into your GCP account and give you 100% of the code, -allowing your team to immediately start building on top of a battle-tested, best-practices, fully-automated infrastructure. - -## Features - -Our initial release targets Terraform 0.12.x, and includes support for: - -- **Multiple Environments:** We support multiple environments including prod and stage. -- **GKE Cluster:** With Helm (Tiller) securely installed to run your applications. -- **VPC Network:** To securely contain all of the resources. -- **HA Cloud SQL Database:** To store your relational data. We support either MySQL or Postgres. -- **Sample Applications:** Gruntwork provides sample frontend and backend applications that demonstrate how to run an app on the GKE cluster. -- **Cloud Load Balancer:** To proxy requests to the GKE cluster. This includes DNS and TLS configuration for the load balancer. -- **Stackdriver Integration:** To collect metrics from the GKE cluster including log aggregation from supported applications. -- **Google Cloud Build:** For the purposes of continuous integration. Gruntwork integrates a CI / CD pipeline for the sample apps on top of Google Cloud Build. -- **Google Container Registry:** For storing the artifacts produced by Google Cloud Build. -- **Secrets Management:** _[Coming Soon]_ Gruntwork will propose and integrate a suitable solution for secrets management based on Cloud KMS. diff --git a/content/reference/version-compatibility/index.md b/content/reference/version-compatibility/index.md deleted file mode 100644 index 87e2fc977..000000000 --- a/content/reference/version-compatibility/index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "Module Version Compatibility" -date: 2019-07-11 ---- - -## Terraform Version Compatibility Chart - -The following lists our Terraform packages and their compatibility with Terraform versions. - -**NOTE**: If a package is not listed here, then either it does not contain any Terraform code (e.g., `gruntkms`) or it has not been updated for Terraform 0.12 compatibility yet. - - - -### AWS - -| IaC Package | Terraform 0.11.X | Terraform 0.12.X | -| ------------------------------------------------------------------------------------------------ | ---------------- | ---------------- | -| [Terragrunt](https://github.com/gruntwork-io/terragrunt) | <=v0.18.7 | >=v0.19.0 | -| [Terratest](https://github.com/gruntwork-io/terratest) | ALL | >=v0.15.8 | -| [package-terraform-utilities](https://github.com/gruntwork-io/package-terraform-utilities) | <=v0.0.8 | >=v0.1.0 | -| [module-ci](https://github.com/gruntwork-io/module-ci) | <=v0.13.16 | >=v0.14.0 | -| [module-security](https://github.com/gruntwork-io/module-security) | <=v0.16.6 | >=v0.17.0 | -| [module-cache](https://github.com/gruntwork-io/module-cache) | <=v0.5.0 | >=v0.6.0 | -| [module-vpc](https://github.com/gruntwork-io/module-vpc) | <=v0.5.8 | >=v0.6.0 | -| [module-server](https://github.com/gruntwork-io/module-server) | <=v0.6.2 | >=v0.7.0 | -| [module-load-balancer](https://github.com/gruntwork-io/module-load-balancer) | <=v0.13.3 | >=v0.14.0 | -| [module-aws-monitoring](https://github.com/gruntwork-io/module-aws-monitoring) | <=v0.12.7 | >=v0.13.0 | -| [module-data-storage](https://github.com/gruntwork-io/module-data-storage) | <=v0.8.9 | >=v0.9.0 | -| [module-asg](https://github.com/gruntwork-io/module-asg) | <=v0.6.26 | >=v0.7.0 | -| [module-ecs](https://github.com/gruntwork-io/module-ecs) | <=v0.13.5 | >=v0.14.0 | -| [package-openvpn](https://github.com/gruntwork-io/package-openvpn) | <=v0.8.2 | >=v0.9.0 | -| [package-static-assets](https://github.com/gruntwork-io/package-static-assets) | <=v0.4.3 | >=v0.5.0 | -| [package-messaging](https://github.com/gruntwork-io/package-messaging) | <=v0.2.0 | >=v0.3.0 | -| [package-lambda](https://github.com/gruntwork-io/package-lambda) | <=v0.5.1 | >=v0.6.0 | -| [package-sam](https://github.com/gruntwork-io/package-sam) | <=v0.1.12 | >=v0.2.0 | -| [package-mongodb](https://github.com/gruntwork-io/package-mongodb) | <=v0.3.0 | >=v0.4.0 | -| [package-elk](https://github.com/gruntwork-io/package-elk) | <=v0.3.1 | >=v0.4.0 | -| [package-zookeeper](https://github.com/gruntwork-io/package-zookeeper) | <=v0.5.4 | >=v0.6.0 | -| [package-kafka](https://github.com/gruntwork-io/package-kafka) | <=v0.5.3 | >=v0.6.0 | -| [package-beanstalk](https://github.com/gruntwork-io/package-beanstalk) | <=v0.0.4 | >=v0.1.0 | -| [terraform-aws-eks](https://github.com/gruntwork-io/terraform-aws-eks) | <=v0.5.5 | >=v0.6.0 | -| [terraform-aws-couchbase](https://github.com/gruntwork-io/terraform-aws-couchbase) | <=v0.1.5 | >=v0.2.0 | -| [terraform-aws-influx](https://github.com/gruntwork-io/terraform-aws-influx) | <=v0.0.4 | >=v0.1.0 | -| [terraform-aws-consul](https://github.com/hashicorp/terraform-aws-consul) | <=v0.6.1 | >=v0.7.0 | -| [terraform-aws-nomad](https://github.com/hashicorp/terraform-aws-nomad) | <=v0.4.5 | >=v0.5.0 | -| [terraform-aws-vault](https://github.com/hashicorp/terraform-aws-vault) | <=v0.12.2 | >=v0.13.0 | -| [terraform-kubernetes-helm](https://github.com/gruntwork-io/terraform-kubernetes-helm) | <=v0.4.0 | >=v0.5.0 | - - -### GCP - -| IaC Package | Terraform 0.11.X | Terraform 0.12.X | -| ------------------------------------------------------------------------------------------------ | ---------------- | ---------------- | -| [terraform-google-network](https://github.com/gruntwork-io/terraform-google-network) | <=v0.1.2 | >=v0.2.0 | -| [terraform-google-load-balancer](https://github.com/gruntwork-io/terraform-google-load-balancer) | <=v0.1.2 | >=v0.2.0 | -| [terraform-google-sql](https://github.com/gruntwork-io/terraform-google-sql) | <=v0.1.1 | >=v0.2.0 | -| [terraform-google-static-assets](https://github.com/gruntwork-io/terraform-google-static-assets) | <=v0.1.1 | >=v0.2.0 | -| [terraform-google-gke](https://github.com/gruntwork-io/terraform-google-gke) | <=v0.2.0 | >=v0.3.0 | -| [terraform-google-consul](https://github.com/hashicorp/terraform-google-consul) | <=v0.3.2 | >=v0.4.0 | -| [terraform-google-vault](https://github.com/hashicorp/terraform-google-vault) | <=v0.1.3 | >=v0.2.0 | -| [terraform-google-nomad](https://github.com/hashicorp/terraform-google-nomad) | v0.0.1 | >=v0.1.0 | diff --git a/doc_sourcer/.gitignore b/doc_sourcer/.gitignore deleted file mode 100644 index fd4a9aa49..000000000 --- a/doc_sourcer/.gitignore +++ /dev/null @@ -1 +0,0 @@ -doc_sourcer diff --git a/doc_sourcer/README.md b/doc_sourcer/README.md deleted file mode 100644 index 89a3ebf59..000000000 --- a/doc_sourcer/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Doc Sourcer - -- `doc-sourcer` utility that will generate and copy in docs from child repos -- Idea: - - clone repo - - generate docs based on config - - copy artifact into `content` dir - - let gatsby render the artifacts - -- Config format: - - ```yaml - # gruntydocs.yml - # What command to run to generate the docs. - builder: ./build.sh - # Where the target files live. These will be pulled in verbatim to /content in the docs repo. - targets: - - ./generated/final - ``` - -- Repos config format: - - ```yaml - --- - repos: - - url: "git@github.com:gruntwork-io/prototypes.git" - branch: "yori-quickstarts" - gruntyDocsRoot: "quickstarts" - ``` diff --git a/doc_sourcer/config/config.go b/doc_sourcer/config/config.go deleted file mode 100644 index d912156be..000000000 --- a/doc_sourcer/config/config.go +++ /dev/null @@ -1 +0,0 @@ -package config diff --git a/doc_sourcer/config/grunty_docs.go b/doc_sourcer/config/grunty_docs.go deleted file mode 100644 index 2a5c36e1e..000000000 --- a/doc_sourcer/config/grunty_docs.go +++ /dev/null @@ -1,27 +0,0 @@ -package config - -import ( - "io/ioutil" - "path/filepath" - - "gopkg.in/yaml.v2" -) - -const ExpectedGruntyDocsFilename = "gruntydocs.yml" - -type GruntyDocs struct { - Builder string `yaml:"builder"` - Targets []string `yaml:"targets"` -} - -func LoadGruntyDocs(basePath string) (GruntyDocs, error) { - var gruntyDocsConfig GruntyDocs - - data, err := ioutil.ReadFile(filepath.Join(basePath, ExpectedGruntyDocsFilename)) - if err != nil { - return gruntyDocsConfig, err - } - - err = yaml.Unmarshal(data, &gruntyDocsConfig) - return gruntyDocsConfig, err -} diff --git a/doc_sourcer/config/grunty_repos.go b/doc_sourcer/config/grunty_repos.go deleted file mode 100644 index f50bd6df5..000000000 --- a/doc_sourcer/config/grunty_repos.go +++ /dev/null @@ -1,31 +0,0 @@ -package config - -import ( - "io/ioutil" - - "gopkg.in/yaml.v2" -) - -const ExpectedGruntyReposFilename = "gruntyrepos.yml" - -type Repo struct { - URL string `yaml:"url"` - Branch string `yaml:"branch"` - GruntyDocsRoot string `yaml:"gruntyDocsRoot"` -} - -type GruntyRepos struct { - Repos []Repo `yaml:"repos"` -} - -func LoadGruntyRepos() (GruntyRepos, error) { - var gruntyReposConfig GruntyRepos - - data, err := ioutil.ReadFile(ExpectedGruntyReposFilename) - if err != nil { - return gruntyReposConfig, err - } - - err = yaml.Unmarshal(data, &gruntyReposConfig) - return gruntyReposConfig, err -} diff --git a/doc_sourcer/git/git.go b/doc_sourcer/git/git.go deleted file mode 100644 index cd99cdbee..000000000 --- a/doc_sourcer/git/git.go +++ /dev/null @@ -1 +0,0 @@ -package git diff --git a/doc_sourcer/git/source.go b/doc_sourcer/git/source.go deleted file mode 100644 index ea1b23173..000000000 --- a/doc_sourcer/git/source.go +++ /dev/null @@ -1,75 +0,0 @@ -package git - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/gruntwork-io/gruntwork-cli/errors" - "github.com/gruntwork-io/gruntwork-cli/shell" - "gopkg.in/src-d/go-git.v4" - "gopkg.in/src-d/go-git.v4/plumbing" - - "github.com/gruntwork-io/docs/doc_sourcer/config" -) - -func SourceRepo(repoURL string, repoBranch string, repoGruntyDocsRootPath string) error { - workspacePath, err := ioutil.TempDir("", "") - if err != nil { - return errors.WithStackTrace(err) - } - defer os.RemoveAll(workspacePath) - - err = cloneRepo(workspacePath, repoURL, repoBranch) - if err != nil { - return errors.WithStackTrace(err) - } - - workspacePath = filepath.Join(workspacePath, repoGruntyDocsRootPath) - gruntyDocsConfig, err := config.LoadGruntyDocs(workspacePath) - if err != nil { - return errors.WithStackTrace(err) - } - - err = buildDocsForRepo(workspacePath, gruntyDocsConfig) - if err != nil { - return errors.WithStackTrace(err) - } - - return copyArtifact(workspacePath, gruntyDocsConfig) -} - -func cloneRepo(workspacePath string, repoURL string, repoBranch string) error { - _, err := git.PlainClone( - workspacePath, - false, - &git.CloneOptions{ - URL: repoURL, - ReferenceName: plumbing.NewBranchReferenceName(repoBranch), - SingleBranch: true, - Depth: 1, - Progress: os.Stdout, - }, - ) - return err -} - -func buildDocsForRepo(workspacePath string, gruntyDocsConfig config.GruntyDocs) error { - options := shell.NewShellOptions() - options.WorkingDir = workspacePath - return shell.RunShellCommand(options, "bash", "-c", gruntyDocsConfig.Builder) -} - -func copyArtifact(workspacePath string, gruntyDocsConfig config.GruntyDocs) error { - options := shell.NewShellOptions() - for _, target := range gruntyDocsConfig.Targets { - srcPath := filepath.Join(workspacePath, target) - srcPath = fmt.Sprintf("%s/.", srcPath) - err := shell.RunShellCommand(options, "rsync", "--progress", "-r", srcPath, "./content") - if err != nil { - return err - } - } - return nil -} diff --git a/doc_sourcer/go.mod b/doc_sourcer/go.mod deleted file mode 100644 index a7f28097d..000000000 --- a/doc_sourcer/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module github.com/gruntwork-io/docs/doc_sourcer - -require ( - github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/fatih/color v1.7.0 // indirect - github.com/go-errors/errors v1.0.1 // indirect - github.com/gruntwork-io/gruntwork-cli v0.4.2 - github.com/mattn/go-colorable v0.1.1 // indirect - github.com/mattn/go-isatty v0.0.7 // indirect - github.com/mattn/go-zglob v0.0.1 // indirect - github.com/sirupsen/logrus v1.4.0 - github.com/urfave/cli v1.20.0 - gopkg.in/src-d/go-git.v4 v4.10.0 - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/doc_sourcer/go.sum b/doc_sourcer/go.sum deleted file mode 100644 index f0fbeb732..000000000 --- a/doc_sourcer/go.sum +++ /dev/null @@ -1,71 +0,0 @@ -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo= -github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gruntwork-io/gruntwork-cli v0.4.2 h1:+K4OLfTbdwfCGa8wFDPFV6t6jG1YBQqp2a3FjhINGNc= -github.com/gruntwork-io/gruntwork-cli v0.4.2/go.mod h1:IBX21bESC1/LGoV7jhXKUnTQTZgQ6dYRsoj/VqxUSZQ= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8= -github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-zglob v0.0.1 h1:xsEx/XUoVlI6yXjqBK062zYhRTZltCNmYPx6v+8DNaY= -github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= -github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA= -github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sirupsen/logrus v1.4.0 h1:yKenngtzGh+cUSSh6GWbxW2abRqhYUSR/t/6+2QqNvE= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= -github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro= -github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo= -gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= -gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= -gopkg.in/src-d/go-git.v4 v4.10.0 h1:NWjTJTQnk8UpIGlssuefyDZ6JruEjo5s88vm88uASbw= -gopkg.in/src-d/go-git.v4 v4.10.0/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/doc_sourcer/main.go b/doc_sourcer/main.go deleted file mode 100644 index 339651545..000000000 --- a/doc_sourcer/main.go +++ /dev/null @@ -1,63 +0,0 @@ -package main - -import ( - "github.com/gruntwork-io/gruntwork-cli/entrypoint" - "github.com/gruntwork-io/gruntwork-cli/errors" - "github.com/gruntwork-io/gruntwork-cli/logging" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" - - "github.com/gruntwork-io/docs/doc_sourcer/config" - "github.com/gruntwork-io/docs/doc_sourcer/git" -) - -var ( - logLevelFlag = cli.StringFlag{ - Name: "loglevel", - Value: logrus.InfoLevel.String(), - } -) - -// initCli initializes the CLI app before any command is actually executed. This function will handle all the setup -// code, such as setting up the logger with the appropriate log level. -func initCli(cliContext *cli.Context) error { - // Set logging level - logLevel := cliContext.String(logLevelFlag.Name) - level, err := logrus.ParseLevel(logLevel) - if err != nil { - return errors.WithStackTrace(err) - } - logging.SetGlobalLogLevel(level) - return nil -} - -// main should only setup the CLI flags and help texts. -func main() { - app := entrypoint.NewApp() - - app.Name = "doc-sourcer" - app.Author = "Gruntwork " - - app.Before = initCli - - app.Flags = []cli.Flag{ - logLevelFlag, - } - - app.Action = func(cliContext *cli.Context) error { - gruntyRepos, err := config.LoadGruntyRepos() - if err != nil { - return err - } - - for _, repo := range gruntyRepos.Repos { - err := git.SourceRepo(repo.URL, repo.Branch, repo.GruntyDocsRoot) - if err != nil { - return err - } - } - return nil - } - - entrypoint.RunApp(app) -} diff --git a/docs/intro.md b/docs/intro.md new file mode 100644 index 000000000..e762190b4 --- /dev/null +++ b/docs/intro.md @@ -0,0 +1,35 @@ +--- +sidebar_position: 1 +--- + +# Tutorial Intro + +Let's discover **Docusaurus in less than 5 minutes**. + +## Getting Started + +Get started by **creating a new site**. + +Or **try Docusaurus immediately** with **[docusaurus.new](https://docusaurus.new)**. + +## Generate a new site + +Generate a new Docusaurus site using the **classic template**: + +```shell +npm init docusaurus@latest my-website classic +``` + +## Start your site + +Run the development server: + +```shell +cd my-website + +npx docusaurus start +``` + +Your site starts at `http://localhost:3000`. + +Open `docs/intro.md` and edit some lines: the site **reloads automatically** and display your changes. diff --git a/docs/tutorial-basics/_category_.json b/docs/tutorial-basics/_category_.json new file mode 100644 index 000000000..135e4a685 --- /dev/null +++ b/docs/tutorial-basics/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Tutorial - Basics", + "position": 2 +} diff --git a/docs/tutorial-basics/congratulations.md b/docs/tutorial-basics/congratulations.md new file mode 100644 index 000000000..9ef99bbad --- /dev/null +++ b/docs/tutorial-basics/congratulations.md @@ -0,0 +1,21 @@ +--- +sidebar_position: 6 +--- + +# Congratulations! + +You have just learned the **basics of Docusaurus** and made some changes to the **initial template**. + +Docusaurus has **much more to offer**! + +Have **5 more minutes**? Take a look at **[versioning](../tutorial-extras/manage-docs-versions.md)** and **[i18n](../tutorial-extras/translate-your-site.md)**. + +Anything **unclear** or **buggy** in this tutorial? [Please report it!](https://github.com/facebook/docusaurus/discussions/4610) + +## What's next? + +- Read the [official documentation](https://docusaurus.io/). +- Add a custom [Design and Layout](https://docusaurus.io/docs/styling-layout) +- Add a [search bar](https://docusaurus.io/docs/search) +- Find inspirations in the [Docusaurus showcase](https://docusaurus.io/showcase) +- Get involved in the [Docusaurus Community](https://docusaurus.io/community/support) diff --git a/docs/tutorial-basics/create-a-blog-post.md b/docs/tutorial-basics/create-a-blog-post.md new file mode 100644 index 000000000..0d50aaf31 --- /dev/null +++ b/docs/tutorial-basics/create-a-blog-post.md @@ -0,0 +1,34 @@ +--- +sidebar_position: 3 +--- + +# Create a Blog Post + +Docusaurus creates a **page for each blog post**, but also a **blog index page**, a **tag system**, an **RSS** feed... + +## Create your first Post + +Create a file at `blog/2021-02-28-greetings.md`: + +```md title="blog/2021-02-28-greetings.md" +--- +slug: greetings +title: Greetings! +authors: + - name: Joel Marcey + title: Co-creator of Docusaurus 1 + url: https://github.com/JoelMarcey + image_url: https://github.com/JoelMarcey.png + - name: SΓ©bastien Lorber + title: Docusaurus maintainer + url: https://sebastienlorber.com + image_url: https://github.com/slorber.png +tags: [greetings] +--- + +Congratulations, you have made your first post! + +Feel free to play around and edit this post as much you like. +``` + +A new blog post is now available at `http://localhost:3000/blog/greetings`. diff --git a/docs/tutorial-basics/create-a-document.md b/docs/tutorial-basics/create-a-document.md new file mode 100644 index 000000000..b4a072ecd --- /dev/null +++ b/docs/tutorial-basics/create-a-document.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 2 +--- + +# Create a Document + +Documents are **groups of pages** connected through: + +- a **sidebar** +- **previous/next navigation** +- **versioning** + +## Create your first Doc + +Create a markdown file at `docs/hello.md`: + +```md title="docs/hello.md" +# Hello + +This is my **first Docusaurus document**! +``` + +A new document is now available at `http://localhost:3000/docs/hello`. + +## Configure the Sidebar + +Docusaurus automatically **creates a sidebar** from the `docs` folder. + +Add metadatas to customize the sidebar label and position: + +```md title="docs/hello.md" {1-4} +--- +sidebar_label: 'Hi!' +sidebar_position: 3 +--- + +# Hello + +This is my **first Docusaurus document**! +``` + +It is also possible to create your sidebar explicitly in `sidebars.js`: + +```diff title="sidebars.js" +module.exports = { + tutorialSidebar: [ + { + type: 'category', + label: 'Tutorial', +- items: [...], ++ items: ['hello'], + }, + ], +}; +``` diff --git a/docs/tutorial-basics/create-a-page.md b/docs/tutorial-basics/create-a-page.md new file mode 100644 index 000000000..e112b0059 --- /dev/null +++ b/docs/tutorial-basics/create-a-page.md @@ -0,0 +1,43 @@ +--- +sidebar_position: 1 +--- + +# Create a Page + +Add **Markdown or React** files to `src/pages` to create a **standalone page**: + +- `src/pages/index.js` -> `localhost:3000/` +- `src/pages/foo.md` -> `localhost:3000/foo` +- `src/pages/foo/bar.js` -> `localhost:3000/foo/bar` + +## Create your first React Page + +Create a file at `src/pages/my-react-page.js`: + +```jsx title="src/pages/my-react-page.js" +import React from 'react'; +import Layout from '@theme/Layout'; + +export default function MyReactPage() { + return ( + +

My React page

+

This is a React page

+
+ ); +} +``` + +A new page is now available at `http://localhost:3000/my-react-page`. + +## Create your first Markdown Page + +Create a file at `src/pages/my-markdown-page.md`: + +```mdx title="src/pages/my-markdown-page.md" +# My Markdown page + +This is a Markdown page +``` + +A new page is now available at `http://localhost:3000/my-markdown-page`. diff --git a/docs/tutorial-basics/deploy-your-site.md b/docs/tutorial-basics/deploy-your-site.md new file mode 100644 index 000000000..492eae027 --- /dev/null +++ b/docs/tutorial-basics/deploy-your-site.md @@ -0,0 +1,31 @@ +--- +sidebar_position: 5 +--- + +# Deploy your site + +Docusaurus is a **static-site-generator** (also called **[Jamstack](https://jamstack.org/)**). + +It builds your site as simple **static HTML, JavaScript and CSS files**. + +## Build your site + +Build your site **for production**: + +```bash +npm run build +``` + +The static files are generated in the `build` folder. + +## Deploy your site + +Test your production build locally: + +```bash +npm run serve +``` + +The `build` folder is now served at `http://localhost:3000/`. + +You can now deploy the `build` folder **almost anywhere** easily, **for free** or very small cost (read the **[Deployment Guide](https://docusaurus.io/docs/deployment)**). diff --git a/docs/tutorial-basics/markdown-features.mdx b/docs/tutorial-basics/markdown-features.mdx new file mode 100644 index 000000000..885562605 --- /dev/null +++ b/docs/tutorial-basics/markdown-features.mdx @@ -0,0 +1,144 @@ +--- +sidebar_position: 4 +--- + +# Markdown Features + +Docusaurus supports **[Markdown](https://daringfireball.net/projects/markdown/syntax)** and a few **additional features**. + +## Front Matter + +Markdown documents have metadata at the top called [Front Matter](https://jekyllrb.com/docs/front-matter/): + +```text title="my-doc.md" +// highlight-start +--- +id: my-doc-id +title: My document title +description: My document description +slug: /my-custom-url +--- +// highlight-end + +## Markdown heading + +Markdown text with [links](./hello.md) +``` + +## Links + +Regular Markdown links are supported, using url paths or relative file paths. + +```md +Let's see how to [Create a page](/create-a-page). +``` + +```md +Let's see how to [Create a page](./create-a-page.md). +``` + +**Result:** Let's see how to [Create a page](./create-a-page.md). + +## Images + +Regular Markdown images are supported. + +Add an image at `static/img/docusaurus.png` and display it in Markdown: + +```md +![Docusaurus logo](/img/docusaurus.png) +``` + +![Docusaurus logo](/img/docusaurus.png) + +## Code Blocks + +Markdown code blocks are supported with Syntax highlighting. + + ```jsx title="src/components/HelloDocusaurus.js" + function HelloDocusaurus() { + return ( +

Hello, Docusaurus!

+ ) + } + ``` + +```jsx title="src/components/HelloDocusaurus.js" +function HelloDocusaurus() { + return

Hello, Docusaurus!

; +} +``` + +## Admonitions + +Docusaurus has a special syntax to create admonitions and callouts: + + :::tip My tip + + Use this awesome feature option + + ::: + + :::danger Take care + + This action is dangerous + + ::: + +:::tip My tip + +Use this awesome feature option + +::: + +:::danger Take care + +This action is dangerous + +::: + +## MDX and React Components + +[MDX](https://mdxjs.com/) can make your documentation more **interactive** and allows using any **React components inside Markdown**: + +```jsx +export const Highlight = ({children, color}) => ( + { + alert(`You clicked the color ${color} with label ${children}`) + }}> + {children} + +); + +This is Docusaurus green ! + +This is Facebook blue ! +``` + +export const Highlight = ({children, color}) => ( + { + alert(`You clicked the color ${color} with label ${children}`); + }}> + {children} + +); + +This is Docusaurus green ! + +This is Facebook blue ! diff --git a/docs/tutorial-extras/_category_.json b/docs/tutorial-extras/_category_.json new file mode 100644 index 000000000..ca3f8e064 --- /dev/null +++ b/docs/tutorial-extras/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Tutorial - Extras", + "position": 3 +} diff --git a/docs/tutorial-extras/manage-docs-versions.md b/docs/tutorial-extras/manage-docs-versions.md new file mode 100644 index 000000000..6335b0ac9 --- /dev/null +++ b/docs/tutorial-extras/manage-docs-versions.md @@ -0,0 +1,55 @@ +--- +sidebar_position: 1 +--- + +# Manage Docs Versions + +Docusaurus can manage multiple versions of your docs. + +## Create a docs version + +Release a version 1.0 of your project: + +```bash +npm run docusaurus docs:version 1.0 +``` + +The `docs` folder is copied into `versioned_docs/version-1.0` and `versions.json` is created. + +Your docs now have 2 versions: + +- `1.0` at `http://localhost:3000/docs/` for the version 1.0 docs +- `current` at `http://localhost:3000/docs/next/` for the **upcoming, unreleased docs** + +## Add a Version Dropdown + +To navigate seamlessly across versions, add a version dropdown. + +Modify the `docusaurus.config.js` file: + +```js title="docusaurus.config.js" +module.exports = { + themeConfig: { + navbar: { + items: [ + // highlight-start + { + type: 'docsVersionDropdown', + }, + // highlight-end + ], + }, + }, +}; +``` + +The docs version dropdown appears in your navbar: + +![Docs Version Dropdown](/img/tutorial/docsVersionDropdown.png) + +## Update an existing version + +It is possible to edit versioned docs in their respective folder: + +- `versioned_docs/version-1.0/hello.md` updates `http://localhost:3000/docs/hello` +- `docs/hello.md` updates `http://localhost:3000/docs/next/hello` diff --git a/docs/tutorial-extras/translate-your-site.md b/docs/tutorial-extras/translate-your-site.md new file mode 100644 index 000000000..a25c089ed --- /dev/null +++ b/docs/tutorial-extras/translate-your-site.md @@ -0,0 +1,88 @@ +--- +sidebar_position: 2 +--- + +# Translate your site + +Let's translate `docs/intro.md` to French. + +## Configure i18n + +Modify `docusaurus.config.js` to add support for the `fr` locale: + +```js title="docusaurus.config.js" +module.exports = { + i18n: { + defaultLocale: 'en', + locales: ['en', 'fr'], + }, +}; +``` + +## Translate a doc + +Copy the `docs/intro.md` file to the `i18n/fr` folder: + +```bash +mkdir -p i18n/fr/docusaurus-plugin-content-docs/current/ + +cp docs/intro.md i18n/fr/docusaurus-plugin-content-docs/current/intro.md +``` + +Translate `i18n/fr/docusaurus-plugin-content-docs/current/intro.md` in French. + +## Start your localized site + +Start your site on the French locale: + +```bash +npm run start -- --locale fr +``` + +Your localized site is accessible at `http://localhost:3000/fr/` and the `Getting Started` page is translated. + +:::caution + +In development, you can only use one locale at a same time. + +::: + +## Add a Locale Dropdown + +To navigate seamlessly across languages, add a locale dropdown. + +Modify the `docusaurus.config.js` file: + +```js title="docusaurus.config.js" +module.exports = { + themeConfig: { + navbar: { + items: [ + // highlight-start + { + type: 'localeDropdown', + }, + // highlight-end + ], + }, + }, +}; +``` + +The locale dropdown now appears in your navbar: + +![Locale Dropdown](/img/tutorial/localeDropdown.png) + +## Build your localized site + +Build your site for a specific locale: + +```bash +npm run build -- --locale fr +``` + +Or build your site to include all the locales at once: + +```bash +npm run build +``` diff --git a/docusaurus.config.js b/docusaurus.config.js new file mode 100644 index 000000000..462bf89f7 --- /dev/null +++ b/docusaurus.config.js @@ -0,0 +1,148 @@ +// @ts-check +// Note: type annotations allow type checking and IDEs autocompletion + +const lightCodeTheme = require("prism-react-renderer/themes/github"); +const darkCodeTheme = require("prism-react-renderer/themes/dracula"); + +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: "Gruntwork Docs", + tagline: "Dinosaurs are cool", + url: "https://your-docusaurus-test-site.com", + baseUrl: "/", + onBrokenLinks: "warn", + onBrokenMarkdownLinks: "warn", + favicon: "img/favicon.ico", + organizationName: "gruntwork-io", // Usually your GitHub org/user name. + projectName: "docs", // Usually your repo name., + + presets: [ + [ + "@docusaurus/preset-classic", + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + docs: { + sidebarPath: require.resolve("./sidebars.js"), + // Please change this to your repo. + editUrl: "https://github.com/facebook/docusaurus/edit/main/website/", + }, + blog: { + showReadingTime: true, + // Please change this to your repo. + editUrl: + "https://github.com/facebook/docusaurus/edit/main/website/blog/", + }, + theme: { + customCss: require.resolve("./src/css/custom.css"), + }, + }), + ], + ], + plugins: [ + [ + "@docusaurus/plugin-content-docs", + { + id: "guides", + path: "guides", + routeBasePath: "guides", + sidebarPath: require.resolve("./sidebarsCommunity.js"), + }, + ], + // [ + // "@docusaurus/plugin-content-docs", + // { + // id: "reference", + // path: "reference", + // routeBasePath: "reference", + // sidebarPath: require.resolve("./sidebarsCommunity.js"), + // }, + // ], + "plugin-image-zoom", + ], + + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ + navbar: { + title: "My Site", + logo: { + alt: "My Site Logo", + src: "img/logo.svg", + }, + items: [ + { + type: "doc", + docId: "intro", + position: "left", + label: "Tutorial", + }, + { + type: "dropdown", + position: "left", + label: "Guides", + to: "/guides/guides-intro", + items: [ + { + label: "Getting Started", + to: "/guides/getting-started/zero2hero/zero-to-hero", + }, + ], + }, + { to: "/courses", label: "Courses", position: "left" }, + { + href: "https://github.com/facebook/docusaurus", + label: "GitHub", + position: "right", + }, + ], + }, + footer: { + style: "dark", + links: [ + { + title: "Docs", + items: [ + { + label: "Tutorial", + to: "/docs/intro", + }, + ], + }, + { + title: "Community", + items: [ + { + label: "Stack Overflow", + href: "https://stackoverflow.com/questions/tagged/docusaurus", + }, + { + label: "Discord", + href: "https://discordapp.com/invite/docusaurus", + }, + { + label: "Twitter", + href: "https://twitter.com/docusaurus", + }, + ], + }, + { + title: "More", + items: [ + { + label: "GitHub", + href: "https://github.com/facebook/docusaurus", + }, + ], + }, + ], + copyright: `Copyright Β© ${new Date().getFullYear()} My Project, Inc. Built with Docusaurus.`, + }, + prism: { + theme: lightCodeTheme, + darkTheme: darkCodeTheme, + }, + zoomSelector: ".markdown img", + }), +}; + +module.exports = config; diff --git a/gatsby-browser.js b/gatsby-browser.js deleted file mode 100644 index d25c5532e..000000000 --- a/gatsby-browser.js +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Implement Gatsby's Browser APIs in this file. - * - * See: https://www.gatsbyjs.org/docs/browser-apis/ - */ - -// custom CSS styles -import "./src/scss/gatstrap.scss" - -// Highlighting for code blocks -import "prismjs/themes/prism.css" - -// Import all js dependencies. -import 'jquery/dist/jquery.min.js' -import 'popper.js/dist/popper.min' -import 'bootstrap/js/dist/util' -import 'bootstrap/js/dist/carousel' -import 'bootstrap/js/dist/dropdown' \ No newline at end of file diff --git a/gatsby-config.js b/gatsby-config.js deleted file mode 100644 index 6af744e5b..000000000 --- a/gatsby-config.js +++ /dev/null @@ -1,122 +0,0 @@ -module.exports = { - siteMetadata: { - title: `Gruntwork Docs Site`, - description: `Home to the Gruntwork Documentation.`, - author: `@robmorgan`, - siteUrl: `https://docs.gruntwork.io`, - }, - plugins: [ - `gatsby-plugin-react-helmet`, - { - resolve: `gatsby-source-filesystem`, - options: { - name: `images`, - path: `${__dirname}/src/images`, - }, - }, - { - resolve: `gatsby-plugin-react-svg`, - options: { - rule: { - include: `${__dirname}/src/assets/`, - }, - }, - }, - { - resolve: `gatsby-plugin-google-fonts`, - options: { - fonts: [ - `Poppins:400,500,700`, - `Nunito Sans:400,500`, - `Open Sans:400,300,700`, - `Roboto\:400,500,700`, // you can also specify font weights and styles - ], - }, - }, - { - resolve: `gatsby-source-filesystem`, - options: { - path: `${__dirname}/content`, - name: "markdown-pages", - }, - }, - { - resolve: `gatsby-transformer-remark`, - options: { - plugins: [ - "gatsby-remark-copy-linked-files", - { - resolve: `gatsby-remark-prismjs`, - options: { - // Class prefix for
 tags containing syntax highlighting;
-              // defaults to 'language-' (eg 
).
-              // If your site loads Prism into the browser at runtime,
-              // (eg for use with libraries like react-live),
-              // you may use this to prevent Prism from re-processing syntax.
-              // This is an uncommon use-case though;
-              // If you're unsure, it's best to use the default value.
-              classPrefix: "language-",
-              // This is used to allow setting a language for inline code
-              // (i.e. single backticks) by creating a separator.
-              // This separator is a string and will do no white-space
-              // stripping.
-              // A suggested value for English speakers is the non-ascii
-              // character 'β€Ί'.
-              inlineCodeMarker: null,
-              // This lets you set up language aliases.  For example,
-              // setting this to '{ sh: "bash" }' will let you use
-              // the language "sh" which will highlight using the
-              // bash highlighter.
-              aliases: {},
-              // This toggles the display of line numbers globally alongside the code.
-              // To use it, add the following line in src/layouts/index.js
-              // right after importing the prism color scheme:
-              //  `require("prismjs/plugins/line-numbers/prism-line-numbers.css");`
-              // Defaults to false.
-              // If you wish to only show line numbers on certain code blocks,
-              // leave false and use the {numberLines: true} syntax below
-              showLineNumbers: false,
-              // If setting this to true, the parser won't handle and highlight inline
-              // code used in markdown i.e. single backtick code like `this`.
-              noInlineHighlight: false,
-            },
-          },
-        ],
-      },
-    },
-    `gatsby-remark-smartypants`,
-    `gatsby-plugin-sass`,
-    `gatsby-plugin-sitemap`,
-    {
-      resolve: `gatsby-plugin-manifest`,
-      options: {
-        name: `gruntwork-docs`,
-        short_name: `gruntdocs`,
-        start_url: `/`,
-        background_color: `#663399`,
-        theme_color: `#663399`,
-        display: `minimal-ui`,
-        icon: `src/images/gruntwork-icon.png`, // This path is relative to the root of the site.
-      },
-    },
-    {
-      resolve: `gatsby-plugin-google-analytics`,
-      options: {
-        trackingId: "UA-76462621-5",
-        // Defines where to place the tracking script - `true` in the head and `false` in the body
-        head: false,
-        // Avoids sending pageview hits from custom paths
-        exclude: ["/preview/**"],
-      },
-    },
-    {
-      resolve: `gatsby-plugin-s3`,
-      options: {
-        bucketName: "docs.gruntwork.io",
-      },
-    },
-    // this (optional) plugin enables Progressive Web App + Offline functionality
-    // To learn more, visit: https://gatsby.dev/offline
-    // 'gatsby-plugin-offline',
-  ],
-}
diff --git a/gatsby-node.js b/gatsby-node.js
deleted file mode 100644
index dc742d463..000000000
--- a/gatsby-node.js
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Implement Gatsby's Node APIs in this file.
- *
- * See: https://www.gatsbyjs.org/docs/node-apis/
- */
-
-const path = require("path")
-const { createFilePath } = require(`gatsby-source-filesystem`)
-
-exports.onCreateNode = ({ node, getNode, actions }) => {
-  const { createNodeField } = actions
-  // Ensures we are processing only markdown files
-  if (node.internal.type === "MarkdownRemark") {
-    // Use `createFilePath` to turn markdown files in our `data/faqs` directory into `/faqs/slug`
-    const relativeFilePath = createFilePath({
-      node,
-      getNode,
-      basePath: "content/",
-    })
-
-    // Creates new query'able field with name of 'slug'
-    createNodeField({
-      node,
-      name: "slug",
-      value: `${relativeFilePath}`,
-    })
-  }
-}
-
-exports.createPages = ({ actions, graphql }) => {
-  const { createPage } = actions
-
-  const pageTemplate = path.resolve(`src/templates/markdownTemplate.js`)
-
-  return graphql(`
-    {
-      allMarkdownRemark {
-        edges {
-          node {
-            fields {
-              slug
-            }
-          }
-        }
-      }
-    }
-  `).then(result => {
-    if (result.errors) {
-      return Promise.reject(result.errors)
-    }
-
-    result.data.allMarkdownRemark.edges.forEach(({ node }) => {
-      createPage({
-        path: node.fields.slug,
-        component: pageTemplate,
-        context: {}, // additional data can be passed via context
-      })
-    })
-  })
-}
-
-exports.onCreateWebpackConfig = ({ actions }) => {
-  actions.setWebpackConfig({
-    resolve: {
-      alias: {
-        components: path.resolve(__dirname, "src/components"),
-        templates: path.resolve(__dirname, "src/templates"),
-        scss: path.resolve(__dirname, "src/scss"),
-      },
-    },
-  })
-}
diff --git a/gruntyrepos.yml b/gruntyrepos.yml
deleted file mode 100644
index fe3e89c99..000000000
--- a/gruntyrepos.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-repos:
-  - url: "git@github.com:gruntwork-io/toc.git"
-    branch: "master"
diff --git a/guides/getting-started/common-tasks/foo-html.md b/guides/getting-started/common-tasks/foo-html.md
new file mode 100644
index 000000000..f8038ff79
--- /dev/null
+++ b/guides/getting-started/common-tasks/foo-html.md
@@ -0,0 +1,556 @@
+
+

Intro

+
+
+

This is Gruntwork’s style guide for Terraform. It aims to help us ensure that the code we write is +clear, readable, idiomatic Terraform code. The conventions detailed in this guide are our preferences and should be +thought of as guidelines rather than hard rules.

+
+
+
+
+

Starting point

+
+
+

We follow the official HashiCorp style guide for Terraform.

+
+
+

All of these are enforced using terraform fmt. All Gruntwork Terraform repos should enforce this using pre-commit +hooks; please add if missing.

+
+
+

On top of the official guide, Gruntwork follows some additional conventions.

+
+
+
+
+

Additional conventions

+
+
+

General

+
+

2 space indentations

+
+

Block contents should be indented with 2 spaces.

+
+
+
+

120 column limit

+
+

We follow a 120 column line-length limit, except for description strings in variable and output blocks, where single +line strings are preferred.

+
+
+
+

Block Labels, Variables, and Outputs should be snake case

+
+

The label for blocks should be in snake case. E.g. example_instance , not ExampleInstance or example-instance.

+
+
+ + + + + +
+
Note
+
+Labels are the strings that follow block names. For example, in the following, aws_instance and example_instance +are labels for the resource block. +
+
+
+
+
resource "aws_instance" "example_instance" {
+  # Omitted for brevity
+}
+
+
+
+
+

This includes variables and outputs as well:

+
+
+
+
variable "vpc_id" {}
+output "instance_name" {}
+
+
+
+
+

Module folder conventions

+
+

Each module repo should have the following 3 folders:

+
+
+
    +
  • +

    modules: Terraform modules that are designed to be consumed by users. The intention is that users should pull the +modules in the modules folder in their terraform code using module blocks.

    +
  • +
  • +

    examples: Folder that contains top level Terraform modules that provide an example of how to use the modules in the +modules folder. The examples folder often has subfolders for-learning-and-testing and for-production that contain +corresponding example code. See Testing: Terratest for more info on how these examples should be organized.

    +
  • +
  • +

    test: Terratest Go files for testing the code in the repo. See Testing: Terratest for specific conventions around Terratest.

    +
  • +
+
+
+

Additionally, each module in modules should be organized with the following files:

+
+
+
    +
  • +

    variables.tf: All variable blocks should go in here and they specify the inputs.

    +
  • +
  • +

    outputs.tf: All output blocks should go in here and they specify the outputs.

    +
  • +
  • +

    main.tf: All other logic should be added here.

    +
  • +
  • +

    dependencies.tf (optional): Any external references that are pulled in by a data source block should go in here. +This allows consumers of the module to quickly scan for what resources need to already exist to deploy the module.

    +
  • +
+
+
+

Any nonstandard file structure should be called out in the README (e.g., if main.tf is split up into multiple smaller +terraform files).

+
+
+
+

variables.tf conventions

+
+

Each variable block should always define a description and type, even if it is of the string type (the default), in that order. E.g.:

+
+
+
+
variable "example" {
+  description = "This is an example"
+  type        = string
+  default     = "example"  # NOTE: this is optional
+}
+
+
+
+
Complex types
+
+

Prefer concrete objects (object type) over +free form maps. However, for particularly large objects it is useful to support optional attributes. This is currently +not supported in terraform, so workaround by using any type.

+
+
+

When using any type, always use comments to describe the supported attributes. +Example.

+
+
+
+
+

outputs.tf conventions

+
+

Each output block should always define a description, before the value:

+
+
+
+
output "greeting" {
+  description = "This is a greeting for everyone."
+  value       = "hello world!"
+}
+
+
+
+
+

main.tf conventions

+
+

main.tf should (loosely) be organized by sections that correspond to components. There is no standard on grouping, but +as a rule of thumb each section should be focused on a specific component of the module. For example, an ECS service +module may consist of the following sections:

+
+
+
    +
  • +

    The ECS service resource, and any locals logic for setting up the attributes of the resource.

    +
  • +
  • +

    The ECS task definition resource, and any locals and template logic for setting up the attributes of the resource +(e.g. the container definition).

    +
  • +
  • +

    Any resources related to configuring ELBs to forward traffic to the ECS service (e.g., listeners and target groups).

    +
  • +
  • +

    Any resources related to configuring IAM permissions for the ECS service.

    +
  • +
  • +

    Any resources related to configuring network access (e.g., security group rules) for the ECS service.

    +
  • +
+
+
+

There is no standard on ordering the sections, but as a rule of thumb the following sections should be placed first, in order:

+
+
+
    +
  • +

    Version constraints for the module

    +
  • +
  • +

    Provider blocks, if needed.

    +
  • +
  • +

    The main component of the module (e.g., the aws_ecs_service resource for the ECS service module).

    +
  • +
  • +

    All other sections.

    +
  • +
  • +

    Any data blocks (at the bottom).

    +
  • +
+
+
+
+

Conditionals

+
+

Use () to break up conditionals across multiple lines.

+
+
+

Examples:

+
+
+
+
locals {
+  elb_id = (
+    var.elb_already_exists
+    ? var.elb_id
+    : module.elb.elb_id
+  )
+
+excluded*child_account_ids = (
+var.config_create_account_rules
+? []
+: [
+for account_name, account in module.organization.child_accounts
+: account.id if lookup(lookup(var.child_accounts, account_name, {}), "enable_config_rules", false) == false
+]
+)
+}
+
+
+
+
+
+
+
+

Comments

+
+

This section lists the Gruntwork conventions around comments in Terraform code.

+
+
+

# over //

+
+

Use # for comment strings, not // or /**/.

+
+
+
+

# - over # ~

+
+

Delimit section header comment blocks with # ---- instead of # ~~~~.

+
+
+
+

variables.tf

+
+

variables.tf files should clearly indicate required environment variables, and separate out required variables from +optional variables (with defaults) using block comments.

+
+
+

Example:

+
+
+
+
# ---------------------------------------------------------------------------------------------------------------------
+# ENVIRONMENT VARIABLES
+# Define these secrets as environment variables
+# ---------------------------------------------------------------------------------------------------------------------
+
+# TF_VAR_master_password
+
+# ---------------------------------------------------------------------------------------------------------------------
+# MODULE PARAMETERS
+# These variables are expected to be passed in by the operator
+# ---------------------------------------------------------------------------------------------------------------------
+
+variable "required_var" {
+description = "This variable must be set in order to create the resource."
+type = string
+}
+
+# ---------------------------------------------------------------------------------------------------------------------
+# OPTIONAL PARAMETERS
+# These variables have defaults and may be overridden
+# ---------------------------------------------------------------------------------------------------------------------
+
+variable "optional_var" {
+description = "This variable has a sensible default so it is not necessary to set it explicitly for this module to work."
+type = string
+default = "Hello world"
+}
+ +
+
+
+
+

main.tf

+
+
Section comments
+
+

Each section (as described in main.tf conventions) of main.tf should have block comments describing the component +managed in the section.

+
+
+

Example:

+
+
+
+
# ---------------------------------------------------------------------------------------------------------------------
+# ONE LINE SUMMARY DESCRIBING WHAT IS BEING MANAGED IN THIS SECTION IN ALL CAPS
+# The rest of the comments should be in standard casing. This section should contain an overall description of the
+# component that is being managed, and highlight any unconventional workarounds or configurations that are in place.
+# ---------------------------------------------------------------------------------------------------------------------
+
+
+
+
+
+
+

Testing: Terratest

+
+

Gruntwork uses Terratest to write tests for Terraform modules. Terratest is a Go +library that provides patterns and helper functions for testing infrastructure code.

+
+
+

Terratest best practices

+
+

Follow all the best practices listed in Terratest best practices.

+
+
+

The rest of the items below are additional conventions on top of the documented best practices that Gruntwork follows +when writing tests using Terratest for terraform modules.

+
+
+
+

Code formatting

+
+

Terratest is a Go library, so each test will be written in Go. All Go source files should be formatted using goimports +and go fmt.

+
+
+
+

examples and tests

+
+

In many cases the individual modules in the modules folder are narrowly focused to a specific subset of the overall +infrastructure. This means that in many cases you will need to provide dependent resources externally to the module in +order to actually deploy them. The Terraform modules in the examples folder serves this purpose, specifying test +resources that are injected as dependencies to the modules.

+
+
+

As such, the tests should be written against the examples folder, as opposed to the modules folder directly. In +other words:

+
+
+
    +
  • +

    Every module in modules should have a corresponding example module in examples that calls it. (NOTE: you can have +a single example call multiple modules).

    +
  • +
  • +

    Every example should have at least one test that calls it.

    +
  • +
  • +

    Tests should not directly call modules in the modules folder. Always go through the examples.

    +
  • +
+
+
+
+

Parallel

+
+

Every test should have the t.Parallel call in the test function unless there is a specific need to run tests serially, +e.g. manipulating process global resources, like environment variables. This is so that tests run as quickly as possible.

+
+
+

To facilitate this, every reference to a terraform example should use +test_structure.CopyTerraformFolderToTemp +to create a copy of the example module in a temp directory. Then as the test runs, any stateful changes to the example +module directory are isolated across tests, so that there’s no conflict on parallel runs.

+
+
+
+

Use TestStages for faster development

+
+

Use test stages +in the test code, unless you only have 1 or 2 steps in the test code (e.g. a plan verification test).

+
+
+

It’s very tedious to build and deploy resources over and over when you only want to tweak a validation step. TestStages +make it flexible and convenient to skip stages, making development much faster.

+
+
+

For each test stage you introduce, add a commented out series of os.Setenv calls to make it convenient to skip stages +as you develop.

+
+
+
+
func TestJenkins(t *testing.T) {
+	t.Parallel()
+
+    // Uncomment the items below to skip certain parts of the test
+    //os.Setenv("SKIP_build_ami", "true")
+    //os.Setenv("SKIP_deploy_terraform", "true")
+    //os.Setenv("SKIP_validate", "true")
+    //os.Setenv("SKIP_cleanup", "true")
+    //os.Setenv("SKIP_cleanup_ami", "true")
+
+    defer test_structure.RunTestStage(t, "cleanup_ami", deleteAMI)
+    defer test_structure.RunTestStage(t, "cleanup", destroyInfra)
+    test_structure.RunTestStage(t, "build_ami", buildAMI)
+    test_structure.RunTestStage(t, "deploy_terraform", deployInfra)
+    test_structure.RunTestStage(t, "validate", validateInfra)
+
+}
+
+
+ +
+
+
+

To use the stages, here’s an example workflow. The first time you run the test, you’ll want to skip only the cleanup +stages:

+
+
+
+
// Uncomment the items below to skip certain parts of the test
+//os.Setenv("SKIP_build_ami", "true")
+//os.Setenv("SKIP_deploy_terraform", "true")
+//os.Setenv("SKIP_validate", "true")
+os.Setenv("SKIP_cleanup", "true")
+os.Setenv("SKIP_cleanup_ami", "true")
+
+
+
+

Let’s say building and deploying were successful, but validation failed. Since resources were not cleaned up, we can run +only the validate stage. We skip the resource and time intensive build and deploy stages, and also continue to +skip the cleanup stages.:

+
+
+
+
// Uncomment the items below to skip certain parts of the test
+os.Setenv("SKIP_build_ami", "true")
+os.Setenv("SKIP_deploy_terraform", "true")
+//os.Setenv("SKIP_validate", "true")
+os.Setenv("SKIP_cleanup", "true")
+os.Setenv("SKIP_cleanup_ami", "true")
+
+
+
+

Once you’ve established that validation works, you can then run only the cleanup stages as below. Your workflow may vary.

+
+
+
+
// Uncomment the items below to skip certain parts of the test
+os.Setenv("SKIP_build_ami", "true")
+os.Setenv("SKIP_deploy_terraform", "true")
+os.Setenv("SKIP_validate", "true")
+//os.Setenv("SKIP_cleanup", "true")
+//os.Setenv("SKIP_cleanup_ami", "true")
+
+
+
+

When committing the final version of the test, all should be commented out so all stages run.

+
+
+
+
// Uncomment the items below to skip certain parts of the test
+//os.Setenv("SKIP_build_ami", "true")
+//os.Setenv("SKIP_deploy_terraform", "true")
+//os.Setenv("SKIP_validate", "true")
+//os.Setenv("SKIP_cleanup", "true")
+//os.Setenv("SKIP_cleanup_ami", "true")
+
+
+
+
+

Setup and Teardown pattern

+
+

In some cases you will want to write a group of tests that use a common resource, such as a Docker image or VPC. In this +case, you will want to setup the common resource once, run a bunch of tests, and then teardown the resource. To achieve +this, you can follow the subtest pattern of Go.

+
+
+

Use table driven tests where possible to make +the subtest routines maintainable. Briefly, this means that you group your test cases using a test struct that reflects +the unique parameters of the test cases. Then you can conveniently loop over the test cases in parallel, taking +advantage of uniformity and speed.

+
+
+

Note that the subtest pattern has gotchas when running tests in parallel:

+
+
+
    +
  • +

    The main test function will not wait for the subtest to run if it uses t.Parallel. To avoid this, you need to wrap +the parallel subtests in a synchronous, blocking subtest. In the example below, the group subtest is synchronous (no +call to t.Parallel) and thus the main function will wait until that test finishes. The group test does not finish +until all the subtests it spawns are finished, even if they are non-blocking and parallel, and thus the tearDownVPC +call does not happen until all subtests are done.

    +
  • +
  • +

    If you are using table driven tests, the range variable will be updated to the next iteration before it is used within +the subtest. That is, in the example below, if we did not have the testCase := testCase line in the range block, the +testCase reference used in the subtest after the t.Parallel call will correspond to the last testCase in the +testCases list. To avoid this, we create a new variable in the scope of the range block so that it does not get +updated during the loop.

    +
  • +
+
+
+

Example:

+
+
+
+
func TestECS(t *testing.T) {
+    t.Parallel()
+
+    defer tearDownVPC()
+    deployVPC()
+
+    // Wrap the parallel tests in a synchronous test group to ensure that the main test function (the one calling
+    // `tearDownVPC` and `deployVPC`) waits until all the subtests are done before running the deferred function.
+    t.Run("group", func(t *testing.T) {
+        for _, testCase := range testCases {
+            // To avoid the range variable from getting updated in the parallel tests, we bind a new name that is within
+            // the scope of the for block.
+            testCase := testCase
+            t.Run(testCase.name, func(t *testing.T) {
+                t.Parallel()
+                testCase.testCode()
+            })
+        }
+    })
+
+}
+
+
+ +
+
+
+
+
+
diff --git a/guides/getting-started/common-tasks/foo.mdx b/guides/getting-started/common-tasks/foo.mdx new file mode 100644 index 000000000..66907298b --- /dev/null +++ b/guides/getting-started/common-tasks/foo.mdx @@ -0,0 +1,5 @@ +import AsciiDocContent from "/src/components/AsciiDocContent"; + +# Title goes here + + diff --git a/guides/getting-started/setup-workspace/setup-workspace-intro.md b/guides/getting-started/setup-workspace/setup-workspace-intro.md new file mode 100644 index 000000000..2c2df6f91 --- /dev/null +++ b/guides/getting-started/setup-workspace/setup-workspace-intro.md @@ -0,0 +1,3 @@ +# Intro for setting up workspace + +This is where content for setting up workspace goes diff --git a/guides/getting-started/zero2hero/a-web-app-using-docker.md b/guides/getting-started/zero2hero/a-web-app-using-docker.md new file mode 100644 index 000000000..0a19b3b5a --- /dev/null +++ b/guides/getting-started/zero2hero/a-web-app-using-docker.md @@ -0,0 +1,86 @@ +# A web app using Docker + +## Prerequisites + +In this section, we will be running commands locally on your development machine, _not_ inside the Docker container created in the previous chapter. + +## Build the container + +Let's start with the simple web front end. First, we'll build the container: + +```bash +cd src/web && docker build . -t web && cd - +``` + + + +## Run the container + +Now, we'll run the container, directing the container's traffic on port 8081 to our local port 8081: + +```bash +docker run -p 8081:8081 web +``` + +You'll see something that looks like this: + +```bash +$ docker run -p 8081:8081 web +time="2021-07-13T18:43:48Z" level=info msg="Starting web server on :8081 connecting to API at http://localhost:8080/" +``` + +Don't worry, there's no API for it to connect to. Try opening your browser to [http://localhost:8081] and you should see a message: + +```bash +Unable to get counter information +``` + +You can also use `curl` from another terminal window to get the same results: + +```bash +$ curl localhost:8081 +Unable to get counter information +``` + +You will see warning messages eminating from the web application. This is fine for now. + +```bash +time="2021-07-13T18:44:05Z" level=warning msg="Error getting response from http://localhost:8080/: Get \"http://localhost:8080/\": dial tcp 127.0.0.1:8080: connect: connection refused" +``` + +When done, use `Ctrl-C` to shut down your docker container and the web application inside it. + +## Build and run all three containers + +Use `docker-compose` to build run the web app, the API, and the PostgreSQL backend. + +```bash +docker-compose build +docker-compose up +``` + +Give it a little time for all three containers to fully launch. Then, point your browser at [http://localhost:8081] again, or use `curl`: + +```bash +$ curl localhost:8081 +Counter: 1 +``` + +Great! We now have a minimalistic "counter" web application that uses an internal API that talks to a persistent database. + +When you're done, destroy `docker-compose` with `Ctrl-C`, and then clean up: + +```bash +docker-compose down +``` + +We've provided some helper scripts in the [scripts](scripts/) directory to facilitate the bringing up and down of the stack. + +Run `scripts/up.sh` to bring everything up, and `scripts/down.sh` to destroy everything. Note that the counter value will persist after bringing the stack down and then back up. +If you want the counter to reset, you can run `scripts/up.sh clean`. + +--- + +[Table of Contents](../README.md) + +Next Section: [Create an ECR Repository with Terraform](../03_ECR_repo) diff --git a/guides/getting-started/zero2hero/create-an-ecr-repository-with-terraform.md b/guides/getting-started/zero2hero/create-an-ecr-repository-with-terraform.md new file mode 100644 index 000000000..0161fa8c5 --- /dev/null +++ b/guides/getting-started/zero2hero/create-an-ecr-repository-with-terraform.md @@ -0,0 +1,286 @@ +# Create an ECR Repository with Terraform + +## Prerequisites + +- [Setup for the Course](../01_setup) +- [Create a Web App via Docker](../02_web_app_via_docker) + +This is using basic [Terraform](https://terraform.io) skills, so you'll either need Terraform installed locally or use the container provided in [Setup for the Course](../01_setup). Make sure you are properly set up for AWS authentication. + +## Create the terrafom code + +tag::start-tag-name + +### [main.tf](main.tf) + +This file is basic setup. We require a minimum version of terraform, pull in the Terraform [AWS Provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs), and then set up a data block so that we can retrieve the AWS Account ID later on. + +```bash +terraform { + required_version = ">= 1.0.0" +} + +provider "aws" { + region = var.aws_region +} + +data "aws_caller_identity" "current" {} +``` + +tag::end-tag-name + +### [variables.tf](variables.tf) + +We'll define just a few variables in this file, so that our code can be changed to suit our needs. We might want to launch resources in a different region or rename our application. Both variables have initial default values. + +```bash +variable "aws_region" { + description = "The AWS region in which to deploy the resources." + type = string + default = "us-west-2" +} + +variable "app_name" { + description = "The name of the application" + type = string + default = "zero2hero" +} +``` + +### [ecr.tf](ecr.tf) + +Here is where the AWS ECR repositories are actually created. We'll create one for each of our three applications (`web`, `api`, and `db`) from the [previous section](../02_web_app_via_docker). + +```bash +# AWS ECR Repository Terraform documentation: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository + +resource "aws_ecr_repository" "web" { + name = "${var.app_name}_web" +} + +resource "aws_ecr_repository" "api" { + name = "${var.app_name}_api" +} + +resource "aws_ecr_repository" "db" { + name = "${var.app_name}_db" +} +``` + +### [outputs.tf](outputs.tf) + +Finally, we specify some outputs for convenience. We'll output the AWS Account ID, and then URLs for the three repositories and URLs for ease of accessing the AWS ECR console for each of the three repositories. + +```bash +output "aws_account_id" { + description = "AWS Account ID" + value = data.aws_caller_identity.current.account_id +} + +output "api_repo_url" { + description = "The repository URL for the api container" + value = aws_ecr_repository.api.repository_url +} + +output "api_aws_ecr_console_url" { + description = "A URL for the api AWS ECR Console" + value = format("https://console.aws.amazon.com/ecr/repositories/private/%s/%s_api?region=%s", data.aws_caller_identity.current.account_id, var.app_name, var.aws_region) +} + +output "db_repo_url" { + description = "The repository URL for the db container" + value = aws_ecr_repository.db.repository_url +} + +output "db_aws_ecr_console_url" { + description = "A URL for the db AWS ECR Console" + value = format("https://console.aws.amazon.com/ecr/repositories/private/%s/%s_db?region=%s", data.aws_caller_identity.current.account_id, var.app_name, var.aws_region) +} + + +output "web_repo_url" { + description = "The repository URL for the web container" + value = aws_ecr_repository.web.repository_url +} + +output "web_aws_ecr_console_url" { + description = "A URL for the web AWS ECR Console" + value = format("https://console.aws.amazon.com/ecr/repositories/private/%s/%s_web?region=%s", data.aws_caller_identity.current.account_id, var.app_name, var.aws_region) +} +``` + +## Initialize terraform + +```bash +terraform init +``` + +The output will look similar to this: + +``` +Initializing the backend... + +Initializing provider plugins... +- Finding latest version of hashicorp/aws... +- Installing hashicorp/aws v3.56.0... +- Installed hashicorp/aws v3.56.0 (signed by HashiCorp) + +Terraform has created a lock file .terraform.lock.hcl to record the provider +selections it made above. Include this file in your version control repository +so that Terraform can guarantee to make the same selections by default when +you run "terraform init" in the future. + +Terraform has been successfully initialized! + +You may now begin working with Terraform. Try running "terraform plan" to see +any changes that are required for your infrastructure. All Terraform commands +should now work. + +If you ever set or change modules or backend configuration for Terraform, +rerun this command to reinitialize your working directory. If you forget, other +commands will detect it and remind you to do so if necessary. +``` + +## Run the terraform plan + +```bash +terraform plan -out current.plan +``` + +The output will look similar to this: + +````bash +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # aws_ecr_repository.api will be created + + resource "aws_ecr_repository" "api" { + + arn = (known after apply) + + id = (known after apply) + + image_tag_mutability = "MUTABLE" + + name = "zero2hero_api" + + registry_id = (known after apply) + + repository_url = (known after apply) + + tags_all = (known after apply) + } + + # aws_ecr_repository.db will be created + + resource "aws_ecr_repository" "db" { + + arn = (known after apply) + + id = (known after apply) + + image_tag_mutability = "MUTABLE" + + name = "zero2hero_db" + + registry_id = (known after apply) + + repository_url = (known after apply) + + tags_all = (known after apply) + } + + # aws_ecr_repository.web will be created + + resource "aws_ecr_repository" "web" { + + arn = (known after apply) + + id = (known after apply) + + image_tag_mutability = "MUTABLE" + + name = "zero2hero_web" + + registry_id = (known after apply) + + repository_url = (known after apply) + + tags_all = (known after apply) + } + +Plan: 3 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + api_aws_ecr_console_url = "https://console.aws.amazon.com/ecr/repositories/private/012345678910/zero2hero_api?region=us-west-2" + + api_repo_url = (known after apply) + + aws_account_id = "012345678910" + + db_aws_ecr_console_url = "https://console.aws.amazon.com/ecr/repositories/private/012345678910/zero2hero_db?region=us-west-2" + + db_repo_url = (known after apply) + + web_aws_ecr_console_url = "https://console.aws.amazon.com/ecr/repositories/private/012345678910/zero2hero_web?region=us-west-2" + + web_repo_url = (known after apply)``` +```` + +## Apply the plan + +```bash +terraform apply current.plan +``` + +Output will look similar to this: + +```bash +aws_ecr_repository.db: Creating... +aws_ecr_repository.web: Creating... +aws_ecr_repository.api: Creating... +aws_ecr_repository.web: Creation complete after 1s [id=zero2hero_web] +aws_ecr_repository.db: Creation complete after 1s [id=zero2hero_db] +aws_ecr_repository.api: Creation complete after 1s [id=zero2hero_api] + +Apply complete! Resources: 3 added, 0 changed, 0 destroyed. + +Outputs: + +api_aws_ecr_console_url = "https://console.aws.amazon.com/ecr/repositories/private/012345678910/zero2hero_api?region=us-west-2" +api_repo_url = "012345678910.dkr.ecr.us-west-2.amazonaws.com/zero2hero_api" +aws_account_id = "012345678910" +db_aws_ecr_console_url = "https://console.aws.amazon.com/ecr/repositories/private/012345678910/zero2hero_db?region=us-west-2" +db_repo_url = "012345678910.dkr.ecr.us-west-2.amazonaws.com/zero2hero_db" +web_aws_ecr_console_url = "https://console.aws.amazon.com/ecr/repositories/private/012345678910/zero2hero_web?region=us-west-2" +web_repo_url = "012345678910.dkr.ecr.us-west-2.amazonaws.com/zero2hero_web" +``` + +## Push your code to the repositories + +You can visit the AWS ECR Console by going to the `aws_ecr_console_url` URL as shown above (Your URL will be different). The instructions are duplicated below. + +Let's first export your AWS Account ID listed above (`aws_account_id`) in the outputs: + +```bash +export AWS_ACCOUNT_ID= +``` + +Use the AWS CLI to log in to ECR with docker: + +```bash +aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com +``` + +Build the Docker image (note this is referencing the directory in the [previous section](../02_web_app_via_docker)): + +```bash +docker build -t zero2hero_web ../02_web_app_via_docker/src/web +``` + +After the build completes, tag your image so you can push the image to this repository: + +```bash +docker tag zero2hero_web:latest ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/zero2hero_web:latest +``` + +Run the following command to push this image to your newly created AWS repository: + +```bash +docker push ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/zero2hero_web:latest +``` + +Repeat the process for the API and the DB containers (you don't need to log in again): + +```bash +# API + +docker build -t zero2hero_api ../02_web_app_via_docker/src/api +docker tag zero2hero_api:latest ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/zero2hero_api:latest +docker push ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/zero2hero_:latest + +# DB + +docker build -t zero2hero_db ../02_db_app_via_docker/src/db +docker tag zero2hero_db:latest ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/zero2hero_db:latest +docker push ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com/zero2hero_db:latest +``` + +--- + +[Table of Contents](../README.md) + +Next Section: [Launch Web App to ECS Fargate](../04_web_app_to_ecs_fargate) diff --git a/guides/getting-started/zero2hero/html-content.mdx b/guides/getting-started/zero2hero/html-content.mdx new file mode 100644 index 000000000..97787847b --- /dev/null +++ b/guides/getting-started/zero2hero/html-content.mdx @@ -0,0 +1,5 @@ +import AsciiDocContent from "/src/components/AsciiDocContent"; + +# Rendering HTML content + + diff --git a/guides/getting-started/zero2hero/setup-for-courses-in-gruntwork-academy.md b/guides/getting-started/zero2hero/setup-for-courses-in-gruntwork-academy.md new file mode 100644 index 000000000..9a5c2fcc6 --- /dev/null +++ b/guides/getting-started/zero2hero/setup-for-courses-in-gruntwork-academy.md @@ -0,0 +1,93 @@ +# Setup for courses in Gruntwork Academy + +## AWS + +Most of the [Gruntwork Academy courses](https://github.com/gruntwork-io/gruntwork-academy) will build infrastructure in AWS. You'll need an [AWS account](https://aws.amazon.com/) and a user for that account with sufficient permissions to create and destroy infrastructure. + +Note that this infrastructure will cost you some money! We will make best efforts to minimize the cost to you by leveraging low cost infrastructure and helping you destroy it afterwards. The responsibility is yours to monitor, pay for, and destroy any lingering infrastructure. + +## AWS Authentication + +The tools used will all need to be authorized and authenticated. See [AWS's documention](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). + +The examples in this class will assume that you are authenticated via one of the methods described. + +99design's [AWS Vault](https://github.com/99designs/aws-vault) makes it easy to use multiple AWS accounts. If you set up an AWS account with the name `sand`, for example, you can do the following to run the AWS cli in that account and get a list of S3 buckets: + +```bash +aws-vault exec sand -- aws s3api list-buckets +``` + + +## CLI Tools + +In order to complete these courses, you'll need some or all the following third party tools: + +TODO: Make sure this list is complete and doesn't list something we don't actually need + +* [Docker](https://www.docker.com/) +* [Packer](https://www.packer.io/) +* [Terraform](https://www.terraform.io/) +* [Terragrunt](https://terragrunt.gruntwork.io/) +* [AWS CLI](https://aws.amazon.com/cli/) +* [AWS Vault](https://github.com/99designs/aws-vault) + +If you have these tools already installed, you can run them natively from your machine. Make sure the versions of each of the tools match what is inside the Dockerfile for best results: + +```bash +# Show the current tool versions defined in the Dockerfile +$ grep 'ARG [A-Z]*_VERSION' Dockerfile | awk '{print $2}' +PACKER_VERSION=1.7.3 +TERRAFORM_VERSION=1.0.2 +TERRAGRUNT_VERSION=0.31.0 +``` + +If you'd prefer, you can use Docker and the included Dockerfile to build a container with the tools installed. + +From the root directory of the course (one directory up from here): + +```bash +# Build the docker container named gw_academy +docker build . -t zero2hero -f 01_setup/Dockerfile +``` + +If you need to change the version of packer, terraform, or terragrunt, you can pass those variables in as `--build-arg`s: + +```bash +# Build the docker container with specific tool versions +docker build . --build-arg PACKER_VERSION=1.7.3 --build-arg TERRAFORM_VERSION=1.0.2 --build-arg TERRAGRUNT_VERSION=0.31.0 -t zero2hero -f 01_setup/Dockerfile +``` + +Once the container has been built, you can run the container, mounting the current directory to `/zero2hero`: + +```bash +# Run the container, mounting the course at /gw_academy +docker run -it -v $(pwd):/zero2hero zero2hero /bin/bash +``` + +You should see the courses mounted: + +```bash +# Inside the docker container! + +root@cc8ae6945307:/# ls /zero2hero +01_setup 04_web_app_to_ecs_fargate 07_service_catalog_web_app 10_upgrade_a_module_version +02_web_app_via_docker 05_web_app_production_ready 08_data_store README.md +03_ECR_repo 06_gruntwork_modules_web_app 09_web_app_gruntwork_way +``` + +You will also have the AWS CLI, AWS Vault, aws-vault, Packer, Terraform, and Terragrunt installed: + +```bash +# Inside the docker container! + +root@cc8ae6945307:/# ls /usr/local/bin +aws aws-vault aws_completer packer terraform terragrunt +``` + +You can run the Zero to Hero Gruntwork Academy course from inside this running container. + +--- +[Table of Contents](../README.md) + +Next Section: [A Web App via Docker](../02_web_app_via_docker) diff --git a/guides/getting-started/zero2hero/zero-to-hero.md b/guides/getting-started/zero2hero/zero-to-hero.md new file mode 100644 index 000000000..70e6b645e --- /dev/null +++ b/guides/getting-started/zero2hero/zero-to-hero.md @@ -0,0 +1,54 @@ +--- +sidebar_position: 1 +title: Zero To Hero +--- + +# Zero To Hero + +## Overview + +This course is designed to incrementally take you from the terraforming you do today to the Gruntwork way of managing infrastructure. We'll start with a simple web application in Docker, and deploy it with Terraform. We'll then show you how to deploy it with our Gruntwork modules and our Service Catalog. + +We recommend you start with the first section and work your way through the course. However, if you read a section and know that you already comprehend all of the concepts explained in it, feel free to skip the section and move forward. We'll note any pre-requisites at the top of each section. + +## Table of Contents + +1. [Setup for the Course](01_setup) + + Make sure you're set up with the tools needed to go through the rest of the hands-on examples. + +1. [Create a Web App via Docker](02_web_app_via_docker) + + We craft a simple web application backed by an API and a persistent database. Later on, we deploy these services in the cloud in a few different ways. + +1. [Create an ECR Repository with Terraform](03_ECR_repo) + + We create a simple Terraform module to provision an [Elastic Container Registry](https://aws.amazon.com/ecr/) for your Docker image, and push your image to that repository. + +1. [Launch Web App to ECS Fargate](04_web_app_to_ecs_fargate) + + We write a more complex Terraform module that deploys your Docker image using [ECS on Fargate](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html). + +1. [Make a Web App Production Ready](05_web_app_production_ready) + + We discuss features necessary to properly productionize your code and show a production-ready checklist. + +1. [Use Gruntwork Modules to Launch your Web App](06_gruntwork_modules_web_app) + + We deploy the simple web application using a [Gruntwork module](https://gruntwork.io/infrastructure-as-code-library/). + +1. [Use Service Catalog to Launch your Web App](07_service_catalog_web_app) + + We deploy the simple web application using the [Gruntwork Service Catalog](https://blog.gruntwork.io/introducing-the-gruntwork-module-service-and-architecture-catalogs-eb3a21b99f70#122a) + +1. [Add a Data Store to your Web App](08_data_store) + + We add a persistent data store via the [Amazon Relational Database Service (RDS)](https://aws.amazon.com/rds/) to the web application. + +1. [Deploy a Web App Production Ready the Gruntwork Way](09_web_app_gruntwork_way) + + We discuss how to leverage Gruntwork to complete other production requirements. + +1. [Upgrade a module](10_upgrade_a_module_version) + + We discuss how to upgrade a Gruntwork module to stay current with features and security. diff --git a/guides/guides-intro.md b/guides/guides-intro.md new file mode 100644 index 000000000..1379525f2 --- /dev/null +++ b/guides/guides-intro.md @@ -0,0 +1,11 @@ +# Main content + +This is where the main content goes + +There's also just regular HTML content + + + + + +
Stuff
diff --git a/package.json b/package.json index 056299c77..85b0fe676 100644 --- a/package.json +++ b/package.json @@ -1,63 +1,48 @@ { - "name": "gruntwork-docs", + "name": "docusaurus-test", + "version": "0.0.0", "private": true, - "description": "The Gruntwork Docs site.", - "version": "0.2.0", - "author": "Rob Morgan ", + "scripts": { + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids", + "typecheck": "tsc" + }, "dependencies": { - "@fortawesome/fontawesome-svg-core": "1.2.34", - "@fortawesome/free-brands-svg-icons": "5.15.2", - "@fortawesome/free-solid-svg-icons": "5.15.2", - "@fortawesome/react-fontawesome": "0.1.14", - "font-awesome": "4.7.0", - "bootstrap": "4.6.0", - "gatsby": "^3.4.0", - "gatsby-plugin-google-analytics": "^3.4.0", - "gatsby-plugin-google-fonts": "^0.0.4", - "gatsby-plugin-manifest": "^3.4.0", - "gatsby-plugin-nprogress": "3.4.0", - "gatsby-plugin-offline": "^4.4.0", - "gatsby-plugin-react-helmet": "^4.4.0", - "gatsby-plugin-react-svg": "^2.1.1", - "gatsby-plugin-s3": "^0.2.5", - "gatsby-plugin-sass": "^4.4.0", - "gatsby-plugin-sitemap": "4.0.0", - "gatsby-remark-copy-linked-files": "^3.2.0", - "gatsby-remark-prismjs": "^4.2.0", - "gatsby-remark-smartypants": "^3.2.0", - "gatsby-source-filesystem": "^3.4.0", - "gatsby-transformer-remark": "^3.2.0", - "jquery": "3.6.0", - "popper.js": "1.16.1", - "prismjs": "^1.23.0", + "@docusaurus/core": "2.0.0-beta.8", + "@docusaurus/preset-classic": "2.0.0-beta.8", + "@mdx-js/react": "^1.6.21", + "@svgr/webpack": "^5.5.0", + "clsx": "^1.1.1", + "file-loader": "^6.2.0", + "plugin-image-zoom": "ataft/plugin-image-zoom", + "prism-react-renderer": "^1.2.1", + "raw-loader": "^4.0.2", "react": "^17.0.1", "react-dom": "^17.0.1", - "react-helmet": "^6.1.0", - "svgo": "^1.2.2", - "sass": "^1.32.12" + "url-loader": "^4.1.1" }, "devDependencies": { - "bootstrap": "4.6.0", - "prettier": "^1.16.4" - }, - "keywords": [ - "gatsby" - ], - "license": "MIT", - "scripts": { - "build": "gatsby build", - "develop": "gatsby develop", - "deploy": "gatsby-plugin-s3 deploy", - "format": "prettier --write src/**/*.{js,jsx}", - "start": "npm run develop", - "serve": "gatsby serve", - "test": "echo \"Write tests! -> https://gatsby.dev/unit-testing\"" - }, - "repository": { - "type": "git", - "url": "https://github.com/gatsbyjs/gatsby-starter-default" + "@docusaurus/module-type-aliases": "2.0.0-beta.8", + "@tsconfig/docusaurus": "^1.0.4", + "typescript": "^4.3.5" }, - "bugs": { - "url": "https://github.com/gatsbyjs/gatsby/issues" + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] } } diff --git a/reference/intro.md b/reference/intro.md new file mode 100644 index 000000000..702b8a818 --- /dev/null +++ b/reference/intro.md @@ -0,0 +1,3 @@ +# Reference + +This is the main descriptor for reference area diff --git a/scripts/push-to-s3-prod.sh b/scripts/push-to-s3-prod.sh new file mode 100755 index 000000000..d427c12e0 --- /dev/null +++ b/scripts/push-to-s3-prod.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -e + +yarn build + +aws s3 cp --recursive build/ s3://[PUT_THIS_IN_WHEN_READY] diff --git a/scripts/push-to-s3-stage.sh b/scripts/push-to-s3-stage.sh new file mode 100755 index 000000000..aaab4b035 --- /dev/null +++ b/scripts/push-to-s3-stage.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -e + +yarn build + +aws s3 cp --recursive build/ s3://docs.dogfood-stage.com diff --git a/sidebars.js b/sidebars.js new file mode 100644 index 000000000..fd342f2cd --- /dev/null +++ b/sidebars.js @@ -0,0 +1,31 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + + // But you can create a sidebar manually + /* + tutorialSidebar: [ + { + type: 'category', + label: 'Tutorial', + items: ['hello'], + }, + ], + */ +}; + +module.exports = sidebars; diff --git a/sidebarsCommunity.js b/sidebarsCommunity.js new file mode 100644 index 000000000..1b5ae2a26 --- /dev/null +++ b/sidebarsCommunity.js @@ -0,0 +1,99 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + // tutorialSidebar: [{ type: "autogenerated", dirName: "." }], + + // But you can create a sidebar manually + + tutorialSidebar: [ + { type: "doc", id: "guides-intro", label: "Introduction" }, + { + type: "category", + label: "Getting Started", + collapsed: false, + collapsible: true, + items: [ + { + type: "category", + label: "Zero to Hero", + collapsed: false, + collapsible: true, + items: [ + { + type: "doc", + id: "getting-started/zero2hero/zero-to-hero", + label: "Introduction", + }, + { + type: "doc", + id: "getting-started/zero2hero/setup-for-courses-in-gruntwork-academy", + label: "Setup for the Course", + }, + { + type: "doc", + id: "getting-started/zero2hero/a-web-app-using-docker", + label: "Create a Web App via Docker", + }, + { + type: "doc", + id: "getting-started/zero2hero/create-an-ecr-repository-with-terraform", + label: "Create an ECR Repository with Terraform", + }, + { + type: "doc", + id: "getting-started/zero2hero/html-content", + label: "Foo?", + }, + ], + }, + { + type: "category", + label: "Setting Up Your Workspace", + collapsed: false, + collapsible: true, + items: [ + { + type: "doc", + id: "getting-started/setup-workspace/setup-workspace-intro", + label: "Introduction", + }, + ], + }, + { + type: "category", + label: "Common Tasks", + collapsed: false, + collapsible: true, + items: [ + { + type: "link", + href: "/foo", + label: "Guide as TSX", + }, + { + type: "doc", + id: "getting-started/common-tasks/foo", + label: "Guide as MDX", + }, + ], + }, + ], + }, + // { type: "autogenerated", dirName: "/foo" }, + ], +}; + +module.exports = sidebars; diff --git a/src/assets/glyphs/bag.svg b/src/assets/glyphs/bag.svg deleted file mode 100644 index 1c0b4e439..000000000 --- a/src/assets/glyphs/bag.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 24px / Bag - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/burger.svg b/src/assets/glyphs/burger.svg deleted file mode 100644 index f82486d90..000000000 --- a/src/assets/glyphs/burger.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - _Icons / 32px / Burger - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/close.svg b/src/assets/glyphs/close.svg deleted file mode 100644 index 7be934918..000000000 --- a/src/assets/glyphs/close.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 24px / Close - Created with Sketch. - - - - diff --git a/src/assets/glyphs/facebook-small.svg b/src/assets/glyphs/facebook-small.svg deleted file mode 100644 index dfab69e69..000000000 --- a/src/assets/glyphs/facebook-small.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/facebook.svg b/src/assets/glyphs/facebook.svg deleted file mode 100644 index 2836d73df..000000000 --- a/src/assets/glyphs/facebook.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 32px - Social / Facebook - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/hide-off.svg b/src/assets/glyphs/hide-off.svg deleted file mode 100644 index c82fe5118..000000000 --- a/src/assets/glyphs/hide-off.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Inputs / _/ Black / Hide - Off - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/hide-on.svg b/src/assets/glyphs/hide-on.svg deleted file mode 100644 index 6098f0230..000000000 --- a/src/assets/glyphs/hide-on.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - Inputs / _/ Black / Hide - On - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/instagram-small.svg b/src/assets/glyphs/instagram-small.svg deleted file mode 100644 index e7aa5d7b2..000000000 --- a/src/assets/glyphs/instagram-small.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/instagram.svg b/src/assets/glyphs/instagram.svg deleted file mode 100644 index e6478253a..000000000 --- a/src/assets/glyphs/instagram.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/link.svg b/src/assets/glyphs/link.svg deleted file mode 100644 index 35c714514..000000000 --- a/src/assets/glyphs/link.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 32px - Social / Copy Link - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/linkedin-small.svg b/src/assets/glyphs/linkedin-small.svg deleted file mode 100644 index eb58ca774..000000000 --- a/src/assets/glyphs/linkedin-small.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/linkedin.svg b/src/assets/glyphs/linkedin.svg deleted file mode 100644 index eabe95c96..000000000 --- a/src/assets/glyphs/linkedin.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 32px - Social / Linkedin - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/pinterest-small.svg b/src/assets/glyphs/pinterest-small.svg deleted file mode 100644 index a3ae1d5ba..000000000 --- a/src/assets/glyphs/pinterest-small.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/pinterest.svg b/src/assets/glyphs/pinterest.svg deleted file mode 100644 index 759e87775..000000000 --- a/src/assets/glyphs/pinterest.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/plane.svg b/src/assets/glyphs/plane.svg deleted file mode 100644 index 01364007a..000000000 --- a/src/assets/glyphs/plane.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 24px / Plane - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/play.svg b/src/assets/glyphs/play.svg deleted file mode 100644 index fb42b4736..000000000 --- a/src/assets/glyphs/play.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/profile.svg b/src/assets/glyphs/profile.svg deleted file mode 100644 index a199023b6..000000000 --- a/src/assets/glyphs/profile.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 24px / Profile - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/rectangle-1-white.svg b/src/assets/glyphs/rectangle-1-white.svg deleted file mode 100644 index c97d2035b..000000000 --- a/src/assets/glyphs/rectangle-1-white.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-1 - Created with Sketch. - - - - - - - - diff --git a/src/assets/glyphs/rectangle-1.svg b/src/assets/glyphs/rectangle-1.svg deleted file mode 100644 index aef70673c..000000000 --- a/src/assets/glyphs/rectangle-1.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-1 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/rectangle-2-white.svg b/src/assets/glyphs/rectangle-2-white.svg deleted file mode 100644 index 85b5ddf14..000000000 --- a/src/assets/glyphs/rectangle-2-white.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-2 - Created with Sketch. - - - - - - - - diff --git a/src/assets/glyphs/rectangle-2.svg b/src/assets/glyphs/rectangle-2.svg deleted file mode 100644 index cad09d310..000000000 --- a/src/assets/glyphs/rectangle-2.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-2 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/rectangle-3-white.svg b/src/assets/glyphs/rectangle-3-white.svg deleted file mode 100644 index 499090673..000000000 --- a/src/assets/glyphs/rectangle-3-white.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-3 - Created with Sketch. - - - - - - - - diff --git a/src/assets/glyphs/rectangle-3.svg b/src/assets/glyphs/rectangle-3.svg deleted file mode 100644 index 1a865d9ef..000000000 --- a/src/assets/glyphs/rectangle-3.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-3 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/rectangle-4-white.svg b/src/assets/glyphs/rectangle-4-white.svg deleted file mode 100644 index b5d5e9fdf..000000000 --- a/src/assets/glyphs/rectangle-4-white.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-4 - Created with Sketch. - - - - - - - - diff --git a/src/assets/glyphs/rectangle-4.svg b/src/assets/glyphs/rectangle-4.svg deleted file mode 100644 index fe5dfcc40..000000000 --- a/src/assets/glyphs/rectangle-4.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-4 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/rectangle-5-white.svg b/src/assets/glyphs/rectangle-5-white.svg deleted file mode 100644 index eba5ef8cc..000000000 --- a/src/assets/glyphs/rectangle-5-white.svg +++ /dev/null @@ -1,21 +0,0 @@ - - - - shape-5 - Created with Sketch. - - - - - - - - - - - - - - - - diff --git a/src/assets/glyphs/rectangle-5.svg b/src/assets/glyphs/rectangle-5.svg deleted file mode 100644 index 44a12e147..000000000 --- a/src/assets/glyphs/rectangle-5.svg +++ /dev/null @@ -1,21 +0,0 @@ - - - - shape-5 - Created with Sketch. - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/rectangle-6-white.svg b/src/assets/glyphs/rectangle-6-white.svg deleted file mode 100644 index 298c61c9a..000000000 --- a/src/assets/glyphs/rectangle-6-white.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-6 - Created with Sketch. - - - - - - - - diff --git a/src/assets/glyphs/rectangle-6.svg b/src/assets/glyphs/rectangle-6.svg deleted file mode 100644 index 3288b098b..000000000 --- a/src/assets/glyphs/rectangle-6.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - shape-6 - Created with Sketch. - - - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/search.svg b/src/assets/glyphs/search.svg deleted file mode 100644 index 4ad605f74..000000000 --- a/src/assets/glyphs/search.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 24px / Search - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/star-primary.svg b/src/assets/glyphs/star-primary.svg deleted file mode 100644 index c85cb9c36..000000000 --- a/src/assets/glyphs/star-primary.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 24px / Favorite - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/glyphs/thumbler-small.svg b/src/assets/glyphs/thumbler-small.svg deleted file mode 100644 index 2a98811f2..000000000 --- a/src/assets/glyphs/thumbler-small.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/twitter-small.svg b/src/assets/glyphs/twitter-small.svg deleted file mode 100644 index 7e5ec04fd..000000000 --- a/src/assets/glyphs/twitter-small.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Icon - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/src/assets/glyphs/twitter.svg b/src/assets/glyphs/twitter.svg deleted file mode 100644 index c7004184a..000000000 --- a/src/assets/glyphs/twitter.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 32px - Social / Twitter 2 - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/logos/acme.svg b/src/assets/logos/acme.svg deleted file mode 100644 index 15eeb2268..000000000 --- a/src/assets/logos/acme.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - Acme - Created with Sketch. - - - - \ No newline at end of file diff --git a/src/assets/logos/airbnb.svg b/src/assets/logos/airbnb.svg deleted file mode 100644 index 3c64cd3cb..000000000 --- a/src/assets/logos/airbnb.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - airbnb - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/android-pay.svg b/src/assets/logos/android-pay.svg deleted file mode 100644 index 8ae716ddf..000000000 --- a/src/assets/logos/android-pay.svg +++ /dev/null @@ -1,21 +0,0 @@ - - - - Logo - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/app-store.svg b/src/assets/logos/app-store.svg deleted file mode 100644 index 17f928f8e..000000000 --- a/src/assets/logos/app-store.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - Buttons / App Store - Created with Sketch. - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/apple-pay.svg b/src/assets/logos/apple-pay.svg deleted file mode 100644 index aaad13fb3..000000000 --- a/src/assets/logos/apple-pay.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - Logo - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/cisco.svg b/src/assets/logos/cisco.svg deleted file mode 100644 index cca34bef1..000000000 --- a/src/assets/logos/cisco.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - cisco - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/cnn.svg b/src/assets/logos/cnn.svg deleted file mode 100644 index d4a63bb8c..000000000 --- a/src/assets/logos/cnn.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - cnn - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/ebay.svg b/src/assets/logos/ebay.svg deleted file mode 100644 index 73b009989..000000000 --- a/src/assets/logos/ebay.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - ebay - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/google-play.svg b/src/assets/logos/google-play.svg deleted file mode 100644 index 8878f4e12..000000000 --- a/src/assets/logos/google-play.svg +++ /dev/null @@ -1,60 +0,0 @@ - - - - Buttons / Google Play - Created with Sketch. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/google.svg b/src/assets/logos/google.svg deleted file mode 100644 index b3c60e181..000000000 --- a/src/assets/logos/google.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - google - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/paypal.svg b/src/assets/logos/paypal.svg deleted file mode 100644 index 137f164d8..000000000 --- a/src/assets/logos/paypal.svg +++ /dev/null @@ -1,28 +0,0 @@ - - - - Logo - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/samsung-pay.svg b/src/assets/logos/samsung-pay.svg deleted file mode 100644 index 3642566d5..000000000 --- a/src/assets/logos/samsung-pay.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - Logo - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/logos/uber.svg b/src/assets/logos/uber.svg deleted file mode 100644 index e17987ec4..000000000 --- a/src/assets/logos/uber.svg +++ /dev/null @@ -1,17 +0,0 @@ - - - - uber - Created with Sketch. - - - - - - - - - - - - \ No newline at end of file diff --git a/src/assets/pictures/chart-555x290.png b/src/assets/pictures/chart-555x290.png deleted file mode 100644 index a85cf5252..000000000 Binary files a/src/assets/pictures/chart-555x290.png and /dev/null differ diff --git a/src/assets/pictures/cover-1.png b/src/assets/pictures/cover-1.png deleted file mode 100644 index 7e4caea39..000000000 Binary files a/src/assets/pictures/cover-1.png and /dev/null differ diff --git a/src/assets/pictures/cover-2.png b/src/assets/pictures/cover-2.png deleted file mode 100644 index ed7640b83..000000000 Binary files a/src/assets/pictures/cover-2.png and /dev/null differ diff --git a/src/assets/pictures/cover-3.png b/src/assets/pictures/cover-3.png deleted file mode 100644 index da6c7239a..000000000 Binary files a/src/assets/pictures/cover-3.png and /dev/null differ diff --git a/src/assets/pictures/cover-4.png b/src/assets/pictures/cover-4.png deleted file mode 100644 index c95ae58a2..000000000 Binary files a/src/assets/pictures/cover-4.png and /dev/null differ diff --git a/src/assets/pictures/full-phone-555x380.png b/src/assets/pictures/full-phone-555x380.png deleted file mode 100644 index 4c9121977..000000000 Binary files a/src/assets/pictures/full-phone-555x380.png and /dev/null differ diff --git a/src/assets/pictures/head-phone-555x380.png b/src/assets/pictures/head-phone-555x380.png deleted file mode 100644 index b33a6acab..000000000 Binary files a/src/assets/pictures/head-phone-555x380.png and /dev/null differ diff --git a/src/assets/pictures/image-1110x410.png b/src/assets/pictures/image-1110x410.png deleted file mode 100644 index 56b254eae..000000000 Binary files a/src/assets/pictures/image-1110x410.png and /dev/null differ diff --git a/src/assets/pictures/image-1110x540.png b/src/assets/pictures/image-1110x540.png deleted file mode 100644 index 67f848478..000000000 Binary files a/src/assets/pictures/image-1110x540.png and /dev/null differ diff --git a/src/assets/pictures/image-1440x600.png b/src/assets/pictures/image-1440x600.png deleted file mode 100644 index f52624192..000000000 Binary files a/src/assets/pictures/image-1440x600.png and /dev/null differ diff --git a/src/assets/pictures/image-255x230.png b/src/assets/pictures/image-255x230.png deleted file mode 100644 index 2816b4ff2..000000000 Binary files a/src/assets/pictures/image-255x230.png and /dev/null differ diff --git a/src/assets/pictures/image-410x250.png b/src/assets/pictures/image-410x250.png deleted file mode 100644 index c4df3149f..000000000 Binary files a/src/assets/pictures/image-410x250.png and /dev/null differ diff --git a/src/assets/pictures/image-540x295.png b/src/assets/pictures/image-540x295.png deleted file mode 100644 index 6bc55ab4a..000000000 Binary files a/src/assets/pictures/image-540x295.png and /dev/null differ diff --git a/src/assets/pictures/image-540x540.png b/src/assets/pictures/image-540x540.png deleted file mode 100644 index b1922be2b..000000000 Binary files a/src/assets/pictures/image-540x540.png and /dev/null differ diff --git a/src/assets/pictures/image-540x620.png b/src/assets/pictures/image-540x620.png deleted file mode 100644 index 167f67c21..000000000 Binary files a/src/assets/pictures/image-540x620.png and /dev/null differ diff --git a/src/assets/pictures/image-555x380.png b/src/assets/pictures/image-555x380.png deleted file mode 100644 index 9c79525c7..000000000 Binary files a/src/assets/pictures/image-555x380.png and /dev/null differ diff --git a/src/assets/pictures/image-555x540.png b/src/assets/pictures/image-555x540.png deleted file mode 100644 index c01d401a0..000000000 Binary files a/src/assets/pictures/image-555x540.png and /dev/null differ diff --git a/src/assets/pictures/some-555x380.png b/src/assets/pictures/some-555x380.png deleted file mode 100644 index a6e97a0fc..000000000 Binary files a/src/assets/pictures/some-555x380.png and /dev/null differ diff --git a/src/assets/pictures/userpic.png b/src/assets/pictures/userpic.png deleted file mode 100644 index 4b2a4914e..000000000 Binary files a/src/assets/pictures/userpic.png and /dev/null differ diff --git a/src/assets/pictures/window-555x380.png b/src/assets/pictures/window-555x380.png deleted file mode 100644 index ba9b7fe3f..000000000 Binary files a/src/assets/pictures/window-555x380.png and /dev/null differ diff --git a/src/components/AsciiDocContent.tsx b/src/components/AsciiDocContent.tsx new file mode 100644 index 000000000..d446cb241 --- /dev/null +++ b/src/components/AsciiDocContent.tsx @@ -0,0 +1,8 @@ +import React from 'react'; + +export default function AsciiDocContent(args:any): JSX.Element { + return ( +
+ ); + } + \ No newline at end of file diff --git a/src/components/Header/index.js b/src/components/Header/index.js deleted file mode 100644 index c887e1aa3..000000000 --- a/src/components/Header/index.js +++ /dev/null @@ -1,33 +0,0 @@ -import { Link } from "gatsby" -import PropTypes from "prop-types" -import React from "react" - -import Logo from "./logo.png" - -const Header = ({ siteTitle }) => ( - <> -
- -
- -) - -Header.propTypes = { - siteTitle: PropTypes.string, -} - -Header.defaultProps = { - siteTitle: ``, -} - -export default Header diff --git a/src/components/Header/logo.png b/src/components/Header/logo.png deleted file mode 100644 index 3f460f767..000000000 Binary files a/src/components/Header/logo.png and /dev/null differ diff --git a/src/components/HomepageFeatures.module.css b/src/components/HomepageFeatures.module.css new file mode 100644 index 000000000..b248eb2e5 --- /dev/null +++ b/src/components/HomepageFeatures.module.css @@ -0,0 +1,11 @@ +.features { + display: flex; + align-items: center; + padding: 2rem 0; + width: 100%; +} + +.featureSvg { + height: 200px; + width: 200px; +} diff --git a/src/components/HomepageFeatures.tsx b/src/components/HomepageFeatures.tsx new file mode 100644 index 000000000..3897078da --- /dev/null +++ b/src/components/HomepageFeatures.tsx @@ -0,0 +1,76 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ +import React from 'react'; +import clsx from 'clsx'; +import styles from './HomepageFeatures.module.css'; + +type FeatureItem = { + title: string; + image: string; + description: JSX.Element; +}; + +const FeatureList: FeatureItem[] = [ + { + title: 'Easy to Use', + image: '/img/undraw_docusaurus_mountain.svg', + description: ( + <> + Docusaurus was designed from the ground up to be easily installed and + used to get your website up and running quickly. + + ), + }, + { + title: 'Focus on What Matters', + image: '/img/undraw_docusaurus_tree.svg', + description: ( + <> + Docusaurus lets you focus on your docs, and we'll do the chores. Go + ahead and move your docs into the docs directory. + + ), + }, + { + title: 'Powered by React', + image: '/img/undraw_docusaurus_react.svg', + description: ( + <> + Extend or customize your website layout by reusing React. Docusaurus can + be extended while reusing the same header and footer. + + ), + }, +]; + +function Feature({title, image, description}: FeatureItem) { + return ( +
+
+ {title} +
+
+

{title}

+

{description}

+
+
+ ); +} + +export default function HomepageFeatures(): JSX.Element { + return ( +
+
+
+ {FeatureList.map((props, idx) => ( + + ))} +
+
+
+ ); +} diff --git a/src/components/Icon/index.js b/src/components/Icon/index.js deleted file mode 100644 index 8e8a8d47d..000000000 --- a/src/components/Icon/index.js +++ /dev/null @@ -1,14 +0,0 @@ -import React from "react" - -const Icon = props => ( - - - -) - -export default Icon diff --git a/src/components/Icon/search.icon b/src/components/Icon/search.icon deleted file mode 100644 index 8ad624ee8..000000000 --- a/src/components/Icon/search.icon +++ /dev/null @@ -1,9 +0,0 @@ - - - - _Icons / 24px / Search - Created with Sketch. - - - - diff --git a/src/components/Sidebar/index.js b/src/components/Sidebar/index.js deleted file mode 100644 index 56fd0073c..000000000 --- a/src/components/Sidebar/index.js +++ /dev/null @@ -1,97 +0,0 @@ -import { Link } from "gatsby" -import React from "react" - -const Sidebar = () => ( - <> -
-
-
-
Introduction
-
-
-
- - What is Gruntwork? - -
-
- - Gruntwork Philosophy - -
-
- - Library Catalog - -
-
-
Guides
-
-
- - Deploying a Dockerized App on GCP/GKE - -
-
- - Deploying a Production Grade EKS Cluster - -
-
- - Upgrading your Reference Architecture Deployment to Terraform - 0.12.x and Terragrunt 0.19.x - -
-
-
Reference
-
-
- - Module Version Compatibility - -
-
- - GCP Reference Architecture - -
-
-
- - How do I get help? - -
-
- -
-
- -) - -export default Sidebar diff --git a/src/components/SupportButton/index.js b/src/components/SupportButton/index.js deleted file mode 100644 index c77f9e8e7..000000000 --- a/src/components/SupportButton/index.js +++ /dev/null @@ -1,21 +0,0 @@ -import React from "react" - -class SupportButton extends React.Component { - render() { - function handleClick(e) { - e.preventDefault() - // do something meaningful, Promises, if/else, whatever, and then - window.location.assign("https://www.gruntwork.io/contact/") - } - - var label = this.props.label || "Talk to a Human" - - return ( - - ) - } -} - -export default SupportButton diff --git a/src/components/layout.js b/src/components/layout.js deleted file mode 100644 index 51f32b582..000000000 --- a/src/components/layout.js +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Layout component that queries for data - * with Gatsby's StaticQuery component - * - * See: https://www.gatsbyjs.org/docs/static-query/ - */ - -import React from "react" -import PropTypes from "prop-types" -import { StaticQuery, graphql } from "gatsby" -import { OutboundLink } from "gatsby-plugin-google-analytics" - -import Header from "./Header" -import Sidebar from "./Sidebar" - -const Layout = ({ children }) => ( - ( - <> -
-
-
- -
-
{children}
-
-
-
- - © {new Date().getFullYear()},{` `} - - Gruntwork Inc. - - -
-
-
-
-
-
- - )} - /> -) - -Layout.propTypes = { - children: PropTypes.node.isRequired, -} - -export default Layout diff --git a/src/components/markdown-page-footer.js b/src/components/markdown-page-footer.js deleted file mode 100644 index 7fbb6ed63..000000000 --- a/src/components/markdown-page-footer.js +++ /dev/null @@ -1,54 +0,0 @@ -import React from "react" -import { graphql } from "gatsby" -import { OutboundLink } from "gatsby-plugin-google-analytics" - -import { FontAwesomeIcon } from "@fortawesome/react-fontawesome" -import { faGithub } from "@fortawesome/free-brands-svg-icons" - -export default class MarkdownPageFooter extends React.Component { - constructor() { - super() - this.state = { feedbackSubmitted: false } - } - render() { - var originUrl - if (this.props.page && this.props.page.frontmatter.origin) { - originUrl = this.props.page.frontmatter.origin - } else { - originUrl = `https://github.com/gruntwork-io/docs/blob/master/content/${ - this.props.page ? this.props.page.parent.relativePath : `` - }` - } - return ( - <> -
-
-
- - - Edit this page on GitHub - - -
-
-
- - ) - } -} - -export const fragment = graphql` - fragment MarkdownPageFooter on MarkdownRemark { - frontmatter { - origin - } - parent { - ... on File { - relativePath - } - } - } -` diff --git a/src/components/seo.js b/src/components/seo.js deleted file mode 100644 index d6ae98254..000000000 --- a/src/components/seo.js +++ /dev/null @@ -1,104 +0,0 @@ -/** - * SEO component that queries for data with - * Gatsby's useStaticQuery React hook - * - * See: https://www.gatsbyjs.org/docs/use-static-query/ - */ - -import React from "react" -import PropTypes from "prop-types" -import Helmet from "react-helmet" -import { useStaticQuery, graphql } from "gatsby" - -function SEO({ description, lang, meta, keywords, title }) { - const { site } = useStaticQuery( - graphql` - query { - site { - siteMetadata { - title - description - author - } - } - } - ` - ) - - const metaDescription = description || site.siteMetadata.description - - return ( - 0 - ? { - name: `keywords`, - content: keywords.join(`, `), - } - : [] - ) - .concat(meta)} - bodyAttributes={{ - class: "light", - }} - > - - - ) -} - -SEO.defaultProps = { - lang: `en`, - meta: [], - keywords: [], - description: ``, -} - -SEO.propTypes = { - description: PropTypes.string, - lang: PropTypes.string, - meta: PropTypes.arrayOf(PropTypes.object), - keywords: PropTypes.arrayOf(PropTypes.string), - title: PropTypes.string.isRequired, -} - -export default SEO diff --git a/src/css/custom.css b/src/css/custom.css new file mode 100644 index 000000000..6abe14854 --- /dev/null +++ b/src/css/custom.css @@ -0,0 +1,28 @@ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary: #25c2a0; + --ifm-color-primary-dark: rgb(33, 175, 144); + --ifm-color-primary-darker: rgb(31, 165, 136); + --ifm-color-primary-darkest: rgb(26, 136, 112); + --ifm-color-primary-light: rgb(70, 203, 174); + --ifm-color-primary-lighter: rgb(102, 212, 189); + --ifm-color-primary-lightest: rgb(146, 224, 208); + --ifm-code-font-size: 95%; +} + +.docusaurus-highlight-code-line { + background-color: rgba(0, 0, 0, 0.1); + display: block; + margin: 0 calc(-1 * var(--ifm-pre-padding)); + padding: 0 var(--ifm-pre-padding); +} + +html[data-theme='dark'] .docusaurus-highlight-code-line { + background-color: rgba(0, 0, 0, 0.3); +} diff --git a/src/images/gruntwork-icon.png b/src/images/gruntwork-icon.png deleted file mode 100644 index 631bcc58d..000000000 Binary files a/src/images/gruntwork-icon.png and /dev/null differ diff --git a/src/pages/404.js b/src/pages/404.js deleted file mode 100644 index bc4c31d79..000000000 --- a/src/pages/404.js +++ /dev/null @@ -1,14 +0,0 @@ -import React from "react" - -import Layout from "../components/layout" -import SEO from "../components/seo" - -const NotFoundPage = () => ( - - -

NOT FOUND

-

You just hit a route that doesn't exist... the sadness.

-
-) - -export default NotFoundPage diff --git a/src/pages/another.tsx b/src/pages/another.tsx new file mode 100644 index 000000000..2e6af9a1a --- /dev/null +++ b/src/pages/another.tsx @@ -0,0 +1,13 @@ +import React from 'react'; +import Layout from '@theme/Layout'; + +export default function MyReactPage() { + const htmlContent = "" + return ( + +

My React page

+

This is a React pageasjsdkjfhsdkjfhsdkjf

+
+
+ ); +} \ No newline at end of file diff --git a/src/pages/courses.tsx b/src/pages/courses.tsx new file mode 100644 index 000000000..48f137384 --- /dev/null +++ b/src/pages/courses.tsx @@ -0,0 +1,11 @@ +import React from 'react'; +import Layout from '@theme/Layout'; + +export default function Courses() { + return ( + +

Courses

+

This is a React pageasjsdkjfhsdkjfhsdkjf

+
+ ); +} \ No newline at end of file diff --git a/src/pages/foo.tsx b/src/pages/foo.tsx new file mode 100644 index 000000000..53cc14656 --- /dev/null +++ b/src/pages/foo.tsx @@ -0,0 +1,555 @@ + + import React from 'react'; + import Layout from '@theme/Layout'; + + export default function MyReactPage() { + const htmlContent = `
+

Intro

+
+
+

This is Gruntwork’s style guide for Terraform. It aims to help us ensure that the code we write is +clear, readable, idiomatic Terraform code. The conventions detailed in this guide are our preferences and should be +thought of as guidelines rather than hard rules.

+
+
+
+
+

Starting point

+
+
+

We follow the official HashiCorp style guide for Terraform.

+
+
+

All of these are enforced using terraform fmt. All Gruntwork Terraform repos should enforce this using pre-commit +hooks; please add if missing.

+
+
+

On top of the official guide, Gruntwork follows some additional conventions.

+
+
+
+
+

Additional conventions

+
+
+

General

+
+

2 space indentations

+
+

Block contents should be indented with 2 spaces.

+
+
+
+

120 column limit

+
+

We follow a 120 column line-length limit, except for description strings in variable and output blocks, where single +line strings are preferred.

+
+
+
+

Block Labels, Variables, and Outputs should be snake case

+
+

The label for blocks should be in snake case. E.g. example_instance , not ExampleInstance or example-instance.

+
+
+ + + + + +
+
Note
+
+Labels are the strings that follow block names. For example, in the following, aws_instance and example_instance +are labels for the resource block. +
+
+
+
+
resource "aws_instance" "example_instance" {
+  # Omitted for brevity
+}
+
+
+
+

This includes variables and outputs as well:

+
+
+
+
variable "vpc_id" {}
+output "instance_name" {}
+
+
+
+
+

Module folder conventions

+
+

Each module repo should have the following 3 folders:

+
+
+
    +
  • +

    modules: Terraform modules that are designed to be consumed by users. The intention is that users should pull the +modules in the modules folder in their terraform code using module blocks.

    +
  • +
  • +

    examples: Folder that contains top level Terraform modules that provide an example of how to use the modules in the +modules folder. The examples folder often has subfolders for-learning-and-testing and for-production that contain +corresponding example code. See Testing: Terratest for more info on how these examples should be organized.

    +
  • +
  • +

    test: Terratest Go files for testing the code in the repo. See Testing: Terratest for specific conventions around Terratest.

    +
  • +
+
+
+

Additionally, each module in modules should be organized with the following files:

+
+
+
    +
  • +

    variables.tf: All variable blocks should go in here and they specify the inputs.

    +
  • +
  • +

    outputs.tf: All output blocks should go in here and they specify the outputs.

    +
  • +
  • +

    main.tf: All other logic should be added here.

    +
  • +
  • +

    dependencies.tf (optional): Any external references that are pulled in by a data source block should go in here. +This allows consumers of the module to quickly scan for what resources need to already exist to deploy the module.

    +
  • +
+
+
+

Any nonstandard file structure should be called out in the README (e.g., if main.tf is split up into multiple smaller +terraform files).

+
+
+
+

variables.tf conventions

+
+

Each variable block should always define a description and type, even if it is of the string type (the default), in that order. E.g.:

+
+
+
+
variable "example" {
+  description = "This is an example"
+  type        = string
+  default     = "example"  # NOTE: this is optional
+}
+
+
+
+
Complex types
+
+

Prefer concrete objects (object type) over +free form maps. However, for particularly large objects it is useful to support optional attributes. This is currently +not supported in terraform, so workaround by using any type.

+
+
+

When using any type, always use comments to describe the supported attributes. +Example.

+
+
+
+
+

outputs.tf conventions

+
+

Each output block should always define a description, before the value:

+
+
+
+
output "greeting" {
+  description = "This is a greeting for everyone."
+  value       = "hello world!"
+}
+
+
+
+
+

main.tf conventions

+
+

main.tf should (loosely) be organized by sections that correspond to components. There is no standard on grouping, but +as a rule of thumb each section should be focused on a specific component of the module. For example, an ECS service +module may consist of the following sections:

+
+
+
    +
  • +

    The ECS service resource, and any locals logic for setting up the attributes of the resource.

    +
  • +
  • +

    The ECS task definition resource, and any locals and template logic for setting up the attributes of the resource +(e.g. the container definition).

    +
  • +
  • +

    Any resources related to configuring ELBs to forward traffic to the ECS service (e.g., listeners and target groups).

    +
  • +
  • +

    Any resources related to configuring IAM permissions for the ECS service.

    +
  • +
  • +

    Any resources related to configuring network access (e.g., security group rules) for the ECS service.

    +
  • +
+
+
+

There is no standard on ordering the sections, but as a rule of thumb the following sections should be placed first, in order:

+
+
+
    +
  • +

    Version constraints for the module

    +
  • +
  • +

    Provider blocks, if needed.

    +
  • +
  • +

    The main component of the module (e.g., the aws_ecs_service resource for the ECS service module).

    +
  • +
  • +

    All other sections.

    +
  • +
  • +

    Any data blocks (at the bottom).

    +
  • +
+
+
+
+

Conditionals

+
+

Use () to break up conditionals across multiple lines.

+
+
+

Examples:

+
+
+
+
locals {
+  elb_id = (
+    var.elb_already_exists
+    ? var.elb_id
+    : module.elb.elb_id
+  )
+
+  excluded_child_account_ids = (
+    var.config_create_account_rules
+    ? []
+    : [
+      for account_name, account in module.organization.child_accounts
+      : account.id if lookup(lookup(var.child_accounts, account_name, {}), "enable_config_rules", false) == false
+    ]
+  )
+}
+
+
+
+
+
+

Comments

+
+

This section lists the Gruntwork conventions around comments in Terraform code.

+
+
+

# over //

+
+

Use # for comment strings, not // or /**/.

+
+
+
+

# - over # ~

+
+

Delimit section header comment blocks with # ---- instead of # ~~~~.

+
+
+
+

variables.tf

+
+

variables.tf files should clearly indicate required environment variables, and separate out required variables from +optional variables (with defaults) using block comments.

+
+
+

Example:

+
+
+
+
# ---------------------------------------------------------------------------------------------------------------------
+# ENVIRONMENT VARIABLES
+# Define these secrets as environment variables
+# ---------------------------------------------------------------------------------------------------------------------
+
+# TF_VAR_master_password
+
+# ---------------------------------------------------------------------------------------------------------------------
+# MODULE PARAMETERS
+# These variables are expected to be passed in by the operator
+# ---------------------------------------------------------------------------------------------------------------------
+
+variable "required_var" {
+  description = "This variable must be set in order to create the resource."
+  type        = string
+}
+
+# ---------------------------------------------------------------------------------------------------------------------
+# OPTIONAL PARAMETERS
+# These variables have defaults and may be overridden
+# ---------------------------------------------------------------------------------------------------------------------
+
+variable "optional_var" {
+  description = "This variable has a sensible default so it is not necessary to set it explicitly for this module to work."
+  type        = string
+  default     = "Hello world"
+}
+
+
+
+
+

main.tf

+
+
Section comments
+
+

Each section (as described in main.tf conventions) of main.tf should have block comments describing the component +managed in the section.

+
+
+

Example:

+
+
+
+
# ---------------------------------------------------------------------------------------------------------------------
+# ONE LINE SUMMARY DESCRIBING WHAT IS BEING MANAGED IN THIS SECTION IN ALL CAPS
+# The rest of the comments should be in standard casing. This section should contain an overall description of the
+# component that is being managed, and highlight any unconventional workarounds or configurations that are in place.
+# ---------------------------------------------------------------------------------------------------------------------
+
+
+
+
+
+
+

Testing: Terratest

+
+

Gruntwork uses Terratest to write tests for Terraform modules. Terratest is a Go +library that provides patterns and helper functions for testing infrastructure code.

+
+
+

Terratest best practices

+
+

Follow all the best practices listed in Terratest best practices.

+
+
+

The rest of the items below are additional conventions on top of the documented best practices that Gruntwork follows +when writing tests using Terratest for terraform modules.

+
+
+
+

Code formatting

+
+

Terratest is a Go library, so each test will be written in Go. All Go source files should be formatted using goimports +and go fmt.

+
+
+
+

examples and tests

+
+

In many cases the individual modules in the modules folder are narrowly focused to a specific subset of the overall +infrastructure. This means that in many cases you will need to provide dependent resources externally to the module in +order to actually deploy them. The Terraform modules in the examples folder serves this purpose, specifying test +resources that are injected as dependencies to the modules.

+
+
+

As such, the tests should be written against the examples folder, as opposed to the modules folder directly. In +other words:

+
+
+
    +
  • +

    Every module in modules should have a corresponding example module in examples that calls it. (NOTE: you can have +a single example call multiple modules).

    +
  • +
  • +

    Every example should have at least one test that calls it.

    +
  • +
  • +

    Tests should not directly call modules in the modules folder. Always go through the examples.

    +
  • +
+
+
+
+

Parallel

+
+

Every test should have the t.Parallel call in the test function unless there is a specific need to run tests serially, +e.g. manipulating process global resources, like environment variables. This is so that tests run as quickly as possible.

+
+
+

To facilitate this, every reference to a terraform example should use +test_structure.CopyTerraformFolderToTemp +to create a copy of the example module in a temp directory. Then as the test runs, any stateful changes to the example +module directory are isolated across tests, so that there’s no conflict on parallel runs.

+
+
+
+

Use TestStages for faster development

+
+

Use test stages +in the test code, unless you only have 1 or 2 steps in the test code (e.g. a plan verification test).

+
+
+

It’s very tedious to build and deploy resources over and over when you only want to tweak a validation step. TestStages +make it flexible and convenient to skip stages, making development much faster.

+
+
+

For each test stage you introduce, add a commented out series of os.Setenv calls to make it convenient to skip stages +as you develop.

+
+
+
+
func TestJenkins(t *testing.T) {
+	t.Parallel()
+
+	// Uncomment the items below to skip certain parts of the test
+	//os.Setenv("SKIP_build_ami", "true")
+	//os.Setenv("SKIP_deploy_terraform", "true")
+	//os.Setenv("SKIP_validate", "true")
+	//os.Setenv("SKIP_cleanup", "true")
+	//os.Setenv("SKIP_cleanup_ami", "true")
+
+	defer test_structure.RunTestStage(t, "cleanup_ami", deleteAMI)
+	defer test_structure.RunTestStage(t, "cleanup", destroyInfra)
+	test_structure.RunTestStage(t, "build_ami", buildAMI)
+	test_structure.RunTestStage(t, "deploy_terraform", deployInfra)
+	test_structure.RunTestStage(t, "validate", validateInfra)
+}
+
+
+
+

To use the stages, here’s an example workflow. The first time you run the test, you’ll want to skip only the cleanup +stages:

+
+
+
+
// Uncomment the items below to skip certain parts of the test
+//os.Setenv("SKIP_build_ami", "true")
+//os.Setenv("SKIP_deploy_terraform", "true")
+//os.Setenv("SKIP_validate", "true")
+os.Setenv("SKIP_cleanup", "true")
+os.Setenv("SKIP_cleanup_ami", "true")
+
+
+
+

Let’s say building and deploying were successful, but validation failed. Since resources were not cleaned up, we can run +only the validate stage. We skip the resource and time intensive build and deploy stages, and also continue to +skip the cleanup stages.:

+
+
+
+
// Uncomment the items below to skip certain parts of the test
+os.Setenv("SKIP_build_ami", "true")
+os.Setenv("SKIP_deploy_terraform", "true")
+//os.Setenv("SKIP_validate", "true")
+os.Setenv("SKIP_cleanup", "true")
+os.Setenv("SKIP_cleanup_ami", "true")
+
+
+
+

Once you’ve established that validation works, you can then run only the cleanup stages as below. Your workflow may vary.

+
+
+
+
// Uncomment the items below to skip certain parts of the test
+os.Setenv("SKIP_build_ami", "true")
+os.Setenv("SKIP_deploy_terraform", "true")
+os.Setenv("SKIP_validate", "true")
+//os.Setenv("SKIP_cleanup", "true")
+//os.Setenv("SKIP_cleanup_ami", "true")
+
+
+
+

When committing the final version of the test, all should be commented out so all stages run.

+
+
+
+
// Uncomment the items below to skip certain parts of the test
+//os.Setenv("SKIP_build_ami", "true")
+//os.Setenv("SKIP_deploy_terraform", "true")
+//os.Setenv("SKIP_validate", "true")
+//os.Setenv("SKIP_cleanup", "true")
+//os.Setenv("SKIP_cleanup_ami", "true")
+
+
+
+
+

Setup and Teardown pattern

+
+

In some cases you will want to write a group of tests that use a common resource, such as a Docker image or VPC. In this +case, you will want to setup the common resource once, run a bunch of tests, and then teardown the resource. To achieve +this, you can follow the subtest pattern of Go.

+
+
+

Use table driven tests where possible to make +the subtest routines maintainable. Briefly, this means that you group your test cases using a test struct that reflects +the unique parameters of the test cases. Then you can conveniently loop over the test cases in parallel, taking +advantage of uniformity and speed.

+
+
+

Note that the subtest pattern has gotchas when running tests in parallel:

+
+
+
    +
  • +

    The main test function will not wait for the subtest to run if it uses t.Parallel. To avoid this, you need to wrap +the parallel subtests in a synchronous, blocking subtest. In the example below, the group subtest is synchronous (no +call to t.Parallel) and thus the main function will wait until that test finishes. The group test does not finish +until all the subtests it spawns are finished, even if they are non-blocking and parallel, and thus the tearDownVPC +call does not happen until all subtests are done.

    +
  • +
  • +

    If you are using table driven tests, the range variable will be updated to the next iteration before it is used within +the subtest. That is, in the example below, if we did not have the testCase := testCase line in the range block, the +testCase reference used in the subtest after the t.Parallel call will correspond to the last testCase in the +testCases list. To avoid this, we create a new variable in the scope of the range block so that it does not get +updated during the loop.

    +
  • +
+
+
+

Example:

+
+
+
+
func TestECS(t *testing.T) {
+    t.Parallel()
+
+    defer tearDownVPC()
+    deployVPC()
+
+    // Wrap the parallel tests in a synchronous test group to ensure that the main test function (the one calling
+    // \`tearDownVPC\` and \`deployVPC\`) waits until all the subtests are done before running the deferred function.
+    t.Run("group", func(t *testing.T) {
+        for _, testCase := range testCases {
+            // To avoid the range variable from getting updated in the parallel tests, we bind a new name that is within
+            // the scope of the for block.
+            testCase := testCase
+            t.Run(testCase.name, func(t *testing.T) {
+                t.Parallel()
+                testCase.testCode()
+            })
+        }
+    })
+}
+
+
+
+
+
+
` + return ( + +
+
+ ); + } diff --git a/src/pages/index.js b/src/pages/index.js deleted file mode 100644 index 35ebe7409..000000000 --- a/src/pages/index.js +++ /dev/null @@ -1,71 +0,0 @@ -import React from "react" -import { Link } from "gatsby" - -import Layout from "../components/layout" -import SEO from "../components/seo" - -const IndexPage = () => ( - - - -

Gruntwork Docs

-

- Welcome to the Gruntwork documentation site. Here you will find guides and - documentation that will get you started with our products. If it is your - first time hearing about Gruntwork, check out{" "} - What is Gruntwork? to - learn more about what we do. -

- -
-
-
-
-

Deploying a Dockerized app on GKE

-

- Learn how to launch a production-grade GKE cluster on GCP, then - take a simple Dockerized Node.js app and deploy it using a Cloud - Load Balancer. -

- - Read more - -
-
-
-
-
-
-

Deploying a Production Grade EKS Cluster

-

- Learn how to use Gruntwork modules to deploy and manage a - production-grade EKS cluster. Includes: network topology, worker - ASG pools, IAM roles and RBAC. -

- - Read more - -
-
-
-
- -

- - Don't see what you are looking for? Check out{" "} - How do I get help? - -

-
-) - -export default IndexPage diff --git a/src/pages/index.module.css b/src/pages/index.module.css new file mode 100644 index 000000000..666feb6a1 --- /dev/null +++ b/src/pages/index.module.css @@ -0,0 +1,23 @@ +/** + * CSS files with the .module.css suffix will be treated as CSS modules + * and scoped locally. + */ + +.heroBanner { + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; +} + +@media screen and (max-width: 966px) { + .heroBanner { + padding: 2rem; + } +} + +.buttons { + display: flex; + align-items: center; + justify-content: center; +} diff --git a/src/pages/index.tsx b/src/pages/index.tsx new file mode 100644 index 000000000..cc4f72112 --- /dev/null +++ b/src/pages/index.tsx @@ -0,0 +1,40 @@ +import React from 'react'; +import clsx from 'clsx'; +import Layout from '@theme/Layout'; +import Link from '@docusaurus/Link'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; +import styles from './index.module.css'; +import HomepageFeatures from '../components/HomepageFeatures'; + +function HomepageHeader() { + const {siteConfig} = useDocusaurusContext(); + return ( +
+
+

{siteConfig.title}

+

{siteConfig.tagline}

+
+ + Docusaurus Tutorial - 5min ⏱️ + +
+
+
+ ); +} + +export default function Home(): JSX.Element { + const {siteConfig} = useDocusaurusContext(); + return ( + + +
+ +
+
+ ); +} diff --git a/src/pages/markdown-page.md b/src/pages/markdown-page.md new file mode 100644 index 000000000..9756c5b66 --- /dev/null +++ b/src/pages/markdown-page.md @@ -0,0 +1,7 @@ +--- +title: Markdown page example +--- + +# Markdown page example + +You don't need React to write simple standalone pages. diff --git a/src/pages/support.js b/src/pages/support.js deleted file mode 100644 index c6d04e3dd..000000000 --- a/src/pages/support.js +++ /dev/null @@ -1,53 +0,0 @@ -import React from "react" -import { OutboundLink } from "gatsby-plugin-google-analytics" - -import Layout from "../components/layout" -import SEO from "../components/seo" - -const SupportPage = () => ( - - -

How do I get help?

-

- Need help getting started with using Gruntwork? Check out the guides on - the sidebar for topics that fit your need. -

-

- If you don't find topics on anything you are looking for, try searching - for your specific question on our community forum or contact us to inquire - about getting support. -

-

- If you are a subscriber, you can ask your question on any of the following - channels available to you: -

-
    -
  1. -

    - Email support@gruntwork.io -

    -
  2. -
  3. -

    - - The Gruntwork Community Forum - -

    -
  4. -
  5. -

    The Gruntwork Community Slack Workspace

    -
  6. -
  7. -

    - Shared Slack Channel (Only available to subscribers of our Dedicated - Support service) -

    -
  8. -
-
-) - -export default SupportPage diff --git a/src/scss/bootstrap/_alert.scss b/src/scss/bootstrap/_alert.scss deleted file mode 100755 index dd43e2376..000000000 --- a/src/scss/bootstrap/_alert.scss +++ /dev/null @@ -1,51 +0,0 @@ -// -// Base styles -// - -.alert { - position: relative; - padding: $alert-padding-y $alert-padding-x; - margin-bottom: $alert-margin-bottom; - border: $alert-border-width solid transparent; - @include border-radius($alert-border-radius); -} - -// Headings for larger alerts -.alert-heading { - // Specified to prevent conflicts of changing $headings-color - color: inherit; -} - -// Provide class for links that match alerts -.alert-link { - font-weight: $alert-link-font-weight; -} - - -// Dismissible alerts -// -// Expand the right padding and account for the close button's positioning. - -.alert-dismissible { - padding-right: ($close-font-size + $alert-padding-x * 2); - - // Adjust close link position - .close { - position: absolute; - top: 0; - right: 0; - padding: $alert-padding-y $alert-padding-x; - color: inherit; - } -} - - -// Alternate styles -// -// Generate contextual modifier classes for colorizing the alert. - -@each $color, $value in $theme-colors { - .alert-#{$color} { - @include alert-variant(theme-color-level($color, $alert-bg-level), theme-color-level($color, $alert-border-level), theme-color-level($color, $alert-color-level)); - } -} diff --git a/src/scss/bootstrap/_badge.scss b/src/scss/bootstrap/_badge.scss deleted file mode 100755 index b87a1b004..000000000 --- a/src/scss/bootstrap/_badge.scss +++ /dev/null @@ -1,47 +0,0 @@ -// Base class -// -// Requires one of the contextual, color modifier classes for `color` and -// `background-color`. - -.badge { - display: inline-block; - padding: $badge-padding-y $badge-padding-x; - font-size: $badge-font-size; - font-weight: $badge-font-weight; - line-height: 1; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - @include border-radius($badge-border-radius); - - // Empty badges collapse automatically - &:empty { - display: none; - } -} - -// Quick fix for badges in buttons -.btn .badge { - position: relative; - top: -1px; -} - -// Pill badges -// -// Make them extra rounded with a modifier to replace v3's badges. - -.badge-pill { - padding-right: $badge-pill-padding-x; - padding-left: $badge-pill-padding-x; - @include border-radius($badge-pill-border-radius); -} - -// Colors -// -// Contextual variations (linked badges get darker on :hover). - -@each $color, $value in $theme-colors { - .badge-#{$color} { - @include badge-variant($value); - } -} diff --git a/src/scss/bootstrap/_breadcrumb.scss b/src/scss/bootstrap/_breadcrumb.scss deleted file mode 100755 index 0825c661a..000000000 --- a/src/scss/bootstrap/_breadcrumb.scss +++ /dev/null @@ -1,49 +0,0 @@ -.breadcrumb { - display: flex; - flex-wrap: wrap; - padding: $breadcrumb-padding-y $breadcrumb-padding-x; - margin-bottom: $breadcrumb-margin-bottom; - list-style: none; - background-color: $breadcrumb-bg; - @include border-radius($breadcrumb-border-radius); -} - -.breadcrumb-item { - margin-bottom: 0; - // The separator between breadcrumbs (by default, a forward-slash: "/") - + .breadcrumb-item { - padding-left: $breadcrumb-item-padding; - - &::before { - content: $breadcrumb-divider; - display: inline-block; // Suppress underlining of the separator in modern browsers - margin-right: $breadcrumb-item-padding; - color: $breadcrumb-divider-color; - width: 5.5px; - height: 5.5px; - border-top: 1.5px solid $breadcrumb-divider-color; - border-right: 1.5px solid $breadcrumb-divider-color; - transform: rotate(45deg); - position: relative; - vertical-align: middle; - } - } - - // IE9-11 hack to properly handle hyperlink underlines for breadcrumbs built - // without `