diff --git a/.gitignore b/.gitignore index e341201..4d029f1 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,5 @@ */build/ +.vscode + +# GCP ignores. +gcp-deployment-manager/local-override.env diff --git a/.travis.yml b/.travis.yml index fbbdae4..2c4bd95 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,9 +22,10 @@ install: script: - cd aws-cloudformation && ./scripts/bash2yaml startup-${TEMPLATE_TYPE}.sh tpl-${TEMPLATE_TYPE}.yaml - - mkdir -p ${LOCAL_DIR} && mv -f template.yaml ${LOCAL_DIR}/${TEMPLATE_TYPE}.yaml + - aws cloudformation package --template-file template.yaml --s3-bucket null --output-template-file package.json --use-json + - mkdir -p ${LOCAL_DIR} && mv -f package.json ${LOCAL_DIR}/${TEMPLATE_TYPE}.json - tmp=$(basename $(mktemp -u)) - - aws s3 cp ${LOCAL_DIR}/${TEMPLATE_TYPE}.yaml s3://${S3_BUCKET}/${tmp} --acl public-read + - aws s3 cp ${LOCAL_DIR}/${TEMPLATE_TYPE}.json s3://${S3_BUCKET}/${tmp} --acl public-read - res=$(aws cloudformation validate-template --region ${AWS_REGION} --template-url https://${S3_BUCKET}.s3.amazonaws.com/${tmp} || echo "error") - aws s3 rm s3://${S3_BUCKET}/${tmp} - if [ "${res}" == "error" ]; then exit 1; fi diff --git a/aws-cloudformation/README.md b/aws-cloudformation/README.md index 31eef8a..506d18e 100644 --- a/aws-cloudformation/README.md +++ b/aws-cloudformation/README.md @@ -36,7 +36,7 @@ If you have an existing AWS account (with billing and an SSH key pair), just cli **WARNING:** if you have an existing sandbox server created before Dec 31, 2019 (v1), **DO NOT UPGRADE**. See [v2.0.0](https://github.com/docksal/sandbox-server/releases/tag/v2.0.0) release notes. -[![Launch Basic Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/new?stackName=docksal-sandbox-server&templateURL=https://s3.us-east-2.amazonaws.com/docksal-aws-templates/sandbox-server/v2/basic.yaml) +[![Launch Basic Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/new?stackName=docksal-sandbox-server&templateURL=https://s3.us-east-2.amazonaws.com/docksal-aws-templates/sandbox-server/v3/basic.json) You will be prompted for: @@ -60,20 +60,24 @@ If you have an existing AWS account (with billing and an SSH key pair), just cli **WARNING:** if you have an existing sandbox server created before Dec 31, 2019 (v1), **DO NOT UPGRADE**. See [v2.0.0](https://github.com/docksal/sandbox-server/releases/tag/v2.0.0) release notes. -[![Launch Advanced Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/new?stackName=docksal-sandbox-server&templateURL=https://s3.us-east-2.amazonaws.com/docksal-aws-templates/sandbox-server/v2/advanced.yaml) +[![Launch Advanced Stack](https://cdn.rawgit.com/buildkite/cloudformation-launch-stack-button-svg/master/launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/new?stackName=docksal-sandbox-server&templateURL=https://s3.us-east-2.amazonaws.com/docksal-aws-templates/sandbox-server/v3/advanced.json) -You will be prompted for few required and optional settings. +You will be prompted for a few required and optional settings. - Basic: Required - Resource type (`ec2` vs `spot`) - - Instance type + - Instance type (primary) + - Instance type 2 (spot only) + - Instance type 3 (spot only) - SSH key - Availability zone -- VPC/Network: Optional** +- VPC/Network: Optional - VPC ID - Subnet ID - Elastic IP - - Access from + - Access from CIDR 1 + - Access from CIDR 2 + - Access from CIDR 3 - Storage: Optional - Persistent data volume - Enable artifacts bucket @@ -85,6 +89,8 @@ You will be prompted for few required and optional settings. - LetsEncrypt settings: Optional - Sandbox domain name - LetsEncrypt configuration +- Docksal settings: Optional + - Docksal version Once provisioned, the IP address of the server will be printed in the **Outputs** section in CloudFormation (``). diff --git a/aws-cloudformation/scripts/find-amis b/aws-cloudformation/scripts/find-amis index e0fe1a9..32929df 100755 --- a/aws-cloudformation/scripts/find-amis +++ b/aws-cloudformation/scripts/find-amis @@ -1,7 +1,7 @@ #!/bin/bash # ubuntu official owner 099720109477 -latest_ami="ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-????????" +latest_ami="ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-????????" # latest_ami="ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20191002" AWS_REGION=$(aws ec2 describe-regions --output text | awk '{print $3}' | xargs) diff --git a/aws-cloudformation/startup-advanced.sh b/aws-cloudformation/startup-advanced.sh index 9caa3bb..e69f79e 100755 --- a/aws-cloudformation/startup-advanced.sh +++ b/aws-cloudformation/startup-advanced.sh @@ -33,7 +33,6 @@ MOUNT_POINT="/data" BUILD_USER="build-agent" BUILD_USER_UID="1100" BUILD_USER_HOME="/home/${BUILD_USER}" -DOCKSAL_VERSION="master" PROJECT_INACTIVITY_TIMEOUT="0.5h" PROJECT_DANGLING_TIMEOUT="168h" PROJECTS_ROOT="${BUILD_USER_HOME}/builds" @@ -159,6 +158,7 @@ do sleep 5 done +# get stack parameters export EIP=$(aws cloudformation describe-stacks --stack-name=${STACK_ID} --query 'Stacks[*].Outputs[?OutputKey==`IPAddress`].OutputValue' --output text) export VOLUME_ID=$(aws cloudformation describe-stacks --stack-name=${STACK_ID} --query 'Stacks[*].Parameters[?ParameterKey==`ExistingDataVolume`].ParameterValue' --output text) export GITHUB_TOKEN=$(aws cloudformation describe-stacks --stack-name=${STACK_ID} --query 'Stacks[*].Parameters[?ParameterKey==`GitHubToken`].ParameterValue' --output text) @@ -167,6 +167,8 @@ export GITHUB_TEAM_SLUG=$(aws cloudformation describe-stacks --stack-name=${STAC export LETSENCRYPT_DOMAIN=$(aws cloudformation describe-stacks --stack-name=${STACK_ID} --query 'Stacks[*].Parameters[?ParameterKey==`LetsEncryptDomain`].ParameterValue' --output text) export LETSENCRYPT_CONFIG=$(aws cloudformation describe-stacks --stack-name=${STACK_ID} --query 'Stacks[*].Parameters[?ParameterKey==`LetsEncryptConfig`].ParameterValue' --output text) export ARTIFACTS_S3_BUCKET=$(aws cloudformation describe-stacks --stack-name=${STACK_ID} --query 'Stacks[*].Outputs[?OutputKey==`ArtifactsBucket`].OutputValue' --output text) +export DOCKSAL_VERSION=$(aws cloudformation describe-stacks --stack-name=${STACK_ID} --query 'Stacks[*].Parameters[?ParameterKey==`DocksalVersion`].ParameterValue' --output text) +export DOCKSAL_VERSION=${DOCKSAL_VERSION:-"master"} # attach/detach elastic ip if [[ "${EIP}" != "${ATTACHED_IP}" ]] @@ -325,6 +327,9 @@ then sed -i "s|^BACKUP_SSH_PUBLIC_KEY=\".*\"|BACKUP_SSH_PUBLIC_KEY=\"${BACKUP_SSH_PUBLIC_KEY}\"|g" /usr/local/bin/ssh-rake chmod +x /usr/local/bin/ssh-rake /usr/local/bin/ssh-rake install + # Remove ec2-instance-connect as it brakes ssh-rake + # See https://github.com/aws/aws-ec2-instance-connect-config/issues/19 + apt-get purge ec2-instance-connect -y fi if [[ "${old_stack_md5sum}" != "${stack_md5sum}" ]] @@ -385,4 +390,3 @@ fi su - build-agent -c "fin system reset" echo "${stack_md5sum}" >/root/stack_last_update - diff --git a/aws-cloudformation/tpl-advanced.yaml b/aws-cloudformation/tpl-advanced.yaml index f6b8ed9..7946c21 100644 --- a/aws-cloudformation/tpl-advanced.yaml +++ b/aws-cloudformation/tpl-advanced.yaml @@ -1,4 +1,4 @@ -AWSTemplateFormatVersion: 2010-09-09 +AWSTemplateFormatVersion: "2010-09-09" Description: Sandbox server template (advanced) Parameters: @@ -16,35 +16,65 @@ Parameters: - spot InstanceType: Description: | - EC2 instance type. Default: t3.small = 2 vCPU, 2GB RAM. - Instances with < 2GB RAM are not recommended and should only be used for testing the CloudFormation template. + EC2 instance type (default: t3.small = 2 vCPU, 2GB RAM). + t3 (Intel) / t3a (AMD) family instances provide burstable CPU performance and are generally the best choise for a sandbox server. + t3/t3a: CPU/RAM ratio is variable from 1/1 to 1/4 based on instance size (t3.large = 2 vCPU, 8GB RAM). + c5: CPU/RAM ratio is 1/2 (c5.large = 2 vCPU, 4GB RAM). + m5: CPU/RAM ratio is 1/4 (m5.large = 2 vCPU, 8GB RAM). + r5: CPU/RAM ratio is 1/8 (r5.large = 2 vCPU, 16GB RAM). Type: String - # Instances with < 2GB RAM should only be used for testing the CloudFormation template - # t3.small: 2 vCPU, 2GB RAM Default: t3.small AllowedValues: - - t3.nano - - t3.micro - t3.small - t3.medium - t3.large - t3.xlarge - t3.2xlarge + - t3a.small + - t3a.medium + - t3a.large + - t3a.xlarge + - t3a.2xlarge + - c5.large + - c5.xlarge + - c5.2xlarge + - c5.4xlarge + - m5.large + - m5.xlarge + - m5.2xlarge + - m5.4xlarge + - r5.large + - r5.xlarge + - r5.2xlarge + InstanceType2: + Description: | + Additional EC2 instance type (spot mode only). AWS will pick the cheapest available option for spot instance. + IMPORTANT: "Instance Type"/"Instance Type 2" must be unique, overwise stack creation/update will fail. + Recomendation: Use a "t3" instance for the primary option and a matching size "t3a" instance type here. + Type: String + Default: t3.medium + AllowedValues: + - t3.small + - t3.medium + - t3.large + - t3.xlarge + - t3.2xlarge + - t3a.small + - t3a.medium + - t3a.large + - t3a.xlarge + - t3a.2xlarge + - c5.large + - c5.xlarge + - c5.2xlarge + - c5.4xlarge - m5.large - m5.xlarge - m5.2xlarge - m5.4xlarge - - m5d.large - - m5d.xlarge - - m5d.2xlarge - - m5d.4xlarge - r5.large - r5.xlarge - r5.2xlarge - - r5d.large - - r5d.xlarge - - r5d.2xlarge - ConstraintDescription: "Must be a valid EC2 instance type" KeyName: Description: "Name of an existing EC2 KeyPair to enable SSH access to the instance" Type: "AWS::EC2::KeyPair::KeyName" @@ -128,6 +158,11 @@ Parameters: Description: "Set name for the artifacts bucket. Leave empty to have the bucket name automatically generated." Type: String Default: "" + DocksalVersion: + Description: | + Specify the version of Docksal to install (e.g., v1.14.0). Leave empty to get the latest stable version (master). + Type: String + Default: "" Metadata: AWS::CloudFormation::Interface: @@ -137,6 +172,7 @@ Metadata: Parameters: - ResourceType - InstanceType + - InstanceType2 - KeyName - ManualAZ - Label: @@ -165,11 +201,17 @@ Metadata: Parameters: - LetsEncryptDomain - LetsEncryptConfig + - Label: + default: "Docksal settings: Optional" + Parameters: + - DocksalVersion ParameterLabels: ResourceType: default: "Resource type" InstanceType: - default: "Instance type" + default: "Instance type (primary)" + InstanceType2: + default: "Instance type 2 (spot only)" KeyName: default: "SSH key" ManualAZ: @@ -181,11 +223,11 @@ Metadata: ExistingEIP: default: "Elastic IP" AccessFrom1: - default: "Access from cidr 1" + default: "Access from CIDR 1" AccessFrom2: - default: "Access from cidr 2" + default: "Access from CIDR 2" AccessFrom3: - default: "Access from cidr 3" + default: "Access from CIDR 3" ExistingDataVolume: default: "Persistent data volume" EnableArtifactsBucket: @@ -202,41 +244,43 @@ Metadata: default: "Sandbox domain name" LetsEncryptConfig: default: "LetsEncrypt configuration" + DocksalVersion: + default: "Docksal version" Mappings: Region2AMI: eu-north-1: - AMI: ami-005bc7d72deb72a3d + AMI: ami-0d4e2b57f569e9daa ap-south-1: - AMI: ami-0245841fc4b40e22f + AMI: ami-0c5b1a88222ac79cb eu-west-3: - AMI: ami-0b70d1460d5c7a299 + AMI: ami-0e60c6afa19d896ee eu-west-2: - AMI: ami-00622b440d92e55c0 + AMI: ami-0917237b4e71c5759 eu-west-1: - AMI: ami-04c58523038d79132 + AMI: ami-0dad359ff462124ca ap-northeast-2: - AMI: ami-02b4a5559ce53a570 + AMI: ami-0de407404c33d1671 ap-northeast-1: - AMI: ami-0f6b4f4104d26f399 + AMI: ami-0c1ac8728ef7f87a4 sa-east-1: - AMI: ami-049f5d88d2d436431 + AMI: ami-001c16a3a4f5d85f1 ca-central-1: - AMI: ami-0972a0d3135cf1fc0 + AMI: ami-08a6203f59f9df866 ap-southeast-1: - AMI: ami-07febfdfb4080320e + AMI: ami-0b8cf0f359b1335e1 ap-southeast-2: - AMI: ami-04a0f7552cff370ba + AMI: ami-0a1a4d97d4af3009b eu-central-1: - AMI: ami-09356619876445425 + AMI: ami-05c26ae4789875080 us-east-1: - AMI: ami-00a208c7cdba991ea + AMI: ami-068663a3c619dd892 us-east-2: - AMI: ami-059d836af932792c3 + AMI: ami-0e84e211558a022c0 us-west-1: - AMI: ami-0f42d8c4eb586ccf7 + AMI: ami-075fd582acf0c0128 us-west-2: - AMI: ami-0a7d051a1c4b54f65 + AMI: ami-09dd2e08d601bff67 Conditions: CreateEC2: !Equals [!Ref ResourceType, "ec2"] @@ -557,7 +601,8 @@ Resources: TargetCapacity: 1 IamFleetRole: !GetAtt FleetRole.Arn LaunchSpecifications: - - InstanceType: !Ref InstanceType + - &instance-profile + InstanceType: !Ref InstanceType IamInstanceProfile: Arn: Fn::GetAtt: @@ -586,10 +631,13 @@ Resources: - - | # add to the end of init script echo line with template variable values, for recreate instance on every variable change - !Sub - - echo ${eip}-${ExistingDataVolume}-${s3name}-${GitHubToken}-${GitHubOrgName}-${GitHubTeamSlug}-${LetsEncryptDomain}-${LetsEncryptConfig} >/dev/null + - echo ${eip}-${ExistingDataVolume}-${s3name}-${GitHubToken}-${GitHubOrgName}-${GitHubTeamSlug}-${LetsEncryptDomain}-${LetsEncryptConfig}-${DocksalVersion} >/dev/null - s3name: !If [ ArtifactsEnabled, !Ref ArtifactsBucket, "" ] eip: !If [ ExistingEIPNotDefined, !Ref EIP, !Ref ExistingEIP ] + - <<: *instance-profile + InstanceType: !Ref InstanceType2 + ec2Instance: Type: 'AWS::EC2::Instance' Condition: CreateEC2 @@ -616,7 +664,7 @@ Resources: - - | # add to the end of init script echo line with template variable values, for recreate instance on every variable change - !Sub - - echo ${eip}-${ExistingDataVolume}-${s3name}-${GitHubToken}-${GitHubOrgName}-${GitHubTeamSlug}-${LetsEncryptDomain}-${LetsEncryptConfig} >/dev/null + - echo ${eip}-${ExistingDataVolume}-${s3name}-${GitHubToken}-${GitHubOrgName}-${GitHubTeamSlug}-${LetsEncryptDomain}-${LetsEncryptConfig}-${DocksalVersion} >/dev/null - s3name: !If [ ArtifactsEnabled, !Ref ArtifactsBucket, "" ] eip: !If [ ExistingEIPNotDefined, !Ref EIP, !Ref ExistingEIP ] diff --git a/aws-cloudformation/tpl-basic.yaml b/aws-cloudformation/tpl-basic.yaml index 2ef432d..3fc401e 100644 --- a/aws-cloudformation/tpl-basic.yaml +++ b/aws-cloudformation/tpl-basic.yaml @@ -1,16 +1,12 @@ -AWSTemplateFormatVersion: 2010-09-09 +AWSTemplateFormatVersion: "2010-09-09" Description: Sandbox server template (basic) Parameters: InstanceType: Description: "EC2 instance type (default: t3.small = 2 vCPU, 2GB RAM)" Type: String - # Instances with < 2GB RAM should only be used for testing the CloudFormation template - # t3.small: 2 vCPU, 2GB RAM Default: t3.small AllowedValues: - - t3.nano - - t3.micro - t3.small - t3.medium - t3.large @@ -65,37 +61,37 @@ Metadata: Mappings: Region2AMI: eu-north-1: - AMI: ami-005bc7d72deb72a3d + AMI: ami-0d4e2b57f569e9daa ap-south-1: - AMI: ami-0245841fc4b40e22f + AMI: ami-0c5b1a88222ac79cb eu-west-3: - AMI: ami-0b70d1460d5c7a299 + AMI: ami-0e60c6afa19d896ee eu-west-2: - AMI: ami-00622b440d92e55c0 + AMI: ami-0917237b4e71c5759 eu-west-1: - AMI: ami-04c58523038d79132 + AMI: ami-0dad359ff462124ca ap-northeast-2: - AMI: ami-02b4a5559ce53a570 + AMI: ami-0de407404c33d1671 ap-northeast-1: - AMI: ami-0f6b4f4104d26f399 + AMI: ami-0c1ac8728ef7f87a4 sa-east-1: - AMI: ami-049f5d88d2d436431 + AMI: ami-001c16a3a4f5d85f1 ca-central-1: - AMI: ami-0972a0d3135cf1fc0 + AMI: ami-08a6203f59f9df866 ap-southeast-1: - AMI: ami-07febfdfb4080320e + AMI: ami-0b8cf0f359b1335e1 ap-southeast-2: - AMI: ami-04a0f7552cff370ba + AMI: ami-0a1a4d97d4af3009b eu-central-1: - AMI: ami-09356619876445425 + AMI: ami-05c26ae4789875080 us-east-1: - AMI: ami-00a208c7cdba991ea + AMI: ami-068663a3c619dd892 us-east-2: - AMI: ami-059d836af932792c3 + AMI: ami-0e84e211558a022c0 us-west-1: - AMI: ami-0f42d8c4eb586ccf7 + AMI: ami-075fd582acf0c0128 us-west-2: - AMI: ami-0a7d051a1c4b54f65 + AMI: ami-09dd2e08d601bff67 Resources: InstanceSecurityGroup: diff --git a/gcp-deployment-manager/Docksal.jinja b/gcp-deployment-manager/Docksal.jinja deleted file mode 100644 index 7b6ee8e..0000000 --- a/gcp-deployment-manager/Docksal.jinja +++ /dev/null @@ -1,88 +0,0 @@ -{# - -Docksal Sandbox Server template - -#} - -{% set project = env["project"] %} -{% set deployment = env["deployment"] %} -{% set instanceName = "%s-vm" % deployment %} -{% set zone = properties["zone"] %} -{% set machineType = properties["machineType"] %} -{% set dataDisk = "%s-data" % deployment %} -{% set dataDiskSizeGb = properties["dataDiskSizeGb"] %} - -resources: - -# Firewall -resources: -- name: {{ deployment }}-firewall - type: compute.v1.firewall - properties: - network: https://www.googleapis.com/compute/v1/projects/{{ project }}/global/networks/default - sourceRanges: ["0.0.0.0/0"] - allowed: - - IPProtocol: TCP - ports: - - "80" - - "443" - - "22" - -# Data disk -- type: compute.v1.disk - name: {{ dataDisk }} - properties: - zone: {{ zone }} - sizeGb: {{ dataDiskSizeGb }} - # Disk type is a full URI. Example uses pd-standard, but pd-ssd can be used as well - type: https://www.googleapis.com/compute/v1/projects/{{ project }}/zones/{{ zone }}/diskTypes/pd-standard - -# VM instance -- type: compute.v1.instance - name: {{ instanceName }} - properties: - zone: {{ zone }} - machineType: https://www.googleapis.com/compute/v1/projects/{{ project }}/zones/{{ zone }}/machineTypes/{{ machineType }} - - disks: - - deviceName: {{ dataDisk }}-boot - type: PERSISTENT - boot: true - autoDelete: true - initializeParams: - sourceImage: https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-1804-bionic-v20191113 - - deviceName: {{ dataDisk }}-data - type: PERSISTENT - source: $(ref.{{ dataDisk }}.selfLink) - autoDelete: true - - networkInterfaces: - - network: https://www.googleapis.com/compute/v1/projects/{{ project }}/global/networks/default - # Access Config required to give the instance a public IP address - accessConfigs: - - name: External NAT - type: ONE_TO_ONE_NAT - - metadata: - items: - {% for key, value in properties['metadata-from-file'].iteritems() %} - - key: {{ key }} - value: | - {{ imports[value]|indent(10) }} - {% endfor %} - -outputs: - - name: deployment - value: {{ deployment }} - - name: project - value: {{ project }} - - name: vmId - value: $(ref.{{ instanceName }}.id) - - name: vmExternalIP - value: $(ref.{{ instanceName }}.networkInterfaces[0].accessConfigs[0].natIP) - - name: vmInternalIP - value: $(ref.{{ instanceName }}.networkInterfaces[0].networkIP) - - name: vmName - value: {{ instanceName }} - - name: vmSelfLink - value: $(ref.{{ instanceName }}.selfLink) diff --git a/gcp-deployment-manager/Makefile b/gcp-deployment-manager/Makefile new file mode 100644 index 0000000..af6c83e --- /dev/null +++ b/gcp-deployment-manager/Makefile @@ -0,0 +1,85 @@ +# Default project variables. +include project.env +# Project-specific overrides. These should be version controlled. +PROJECT_OVERRIDES:=project-override.env +# Untracked local overrides file, for development purposes. +LOCAL_OVERRIDES:=local-override.env +DOCKER_ENV_FILE:=--env-file=project.env +# GCLOUD:=docker run -it --rm --volumes-from gcloud-config -v `pwd`:/opt/deployment $(DOCKER_ENV_FILE) google/cloud-sdk:$(CLOUD_SDK_IMAGE_VERSION) gcloud + +ifneq ("$(wildcard $(PROJECT_OVERRIDES))","") + include $(PROJECT_OVERRIDES) + DOCKER_ENV_FILE+=--env-file=$(PROJECT_OVERRIDES) +endif + +ifneq ("$(wildcard $(LOCAL_OVERRIDES))","") + include $(LOCAL_OVERRIDES) + DOCKER_ENV_FILE+=--env-file=$(LOCAL_OVERRIDES) +endif + +GCLOUD:=docker run -it --rm \ + --volumes-from gcloud-config \ + -v `pwd`:/opt/deployment \ + $(DOCKER_ENV_FILE) \ + google/cloud-sdk:$(CLOUD_SDK_IMAGE_VERSION) \ + gcloud + +export + +login: + docker run -ti --name gcloud-config google/cloud-sdk:$(CLOUD_SDK_IMAGE_VERSION) gcloud auth login + +guard-%: + @ if [ "${${*}}" = "" ]; then \ + echo "Environment variable $* not set"; \ + exit 1; \ + fi + +bash: + docker run -it --rm \ + --volumes-from gcloud-config \ + -v `pwd`:/opt/deployment \ + $(DOCKER_ENV_FILE) \ + -w /opt/deployment \ + google/cloud-sdk:$(CLOUD_SDK_IMAGE_VERSION) /bin/bash + +clean-container: + docker rm -v gcloud-config + +create: + $(GCLOUD) deployment-manager deployments create $(DISK_DEPLOYMENT) \ + --template=/opt/deployment/templates/docksal_disk.jinja \ + --properties="diskSizeGb:$(DISK_SIZE),zone:'$(ZONE)'" + + $(GCLOUD) deployment-manager deployments create $(TEMPLATE_DEPLOYMENT) \ + --template=/opt/deployment/templates/docksal_template.jinja \ + --properties="templateId:'$(TEMPLATE_ID)',region:'$(REGION)',machineType:'$(DEFAULT_MACHINE_SIZE)',preemptive:$(PREEMPTIVE),docksalVersion:'$(DOCKSAL_VERSION)'" + + $(GCLOUD) deployment-manager deployments create $(GROUP_DEPLOYMENT) \ + --template=/opt/deployment/templates/docksal_group.py \ + --properties="templateId:'$(TEMPLATE_ID)',zone:'$(ZONE)'" + +update: + $(GCLOUD) deployment-manager deployments update $(DISK_DEPLOYMENT) \ + --template=/opt/deployment/templates/docksal_disk.jinja \ + --properties="diskSizeGb:$(DISK_SIZE)" + + $(GCLOUD) deployment-manager deployments update $(TEMPLATE_DEPLOYMENT) \ + --template=/opt/deployment/templates/docksal_template.jinja \ + --properties="templateId:'$(TEMPLATE_ID)',region:'$(REGION)',machineType:'$(DEFAULT_MACHINE_SIZE)',preemptive:$(PREEMPTIVE),docksalVersion:'$(DOCKSAL_VERSION)'" \ + --delete-policy=abandon + + $(GCLOUD) deployment-manager deployments update $(GROUP_DEPLOYMENT) \ + --template=/opt/deployment/templates/docksal_group.py \ + --properties="templateId:'$(TEMPLATE_ID)',zone:'$(ZONE)'" + + $(GCLOUD) compute instance-groups managed rolling-action start-update $(GROUP_NAME) \ + --version template=$(TEMPLATE_ID) \ + --zone $(ZONE) + +delete: + $(GCLOUD) deployment-manager deployments delete $(GROUP_DEPLOYMENT) --quiet + + $(GCLOUD) deployment-manager deployments delete $(TEMPLATE_DEPLOYMENT) --quiet + + $(GCLOUD) deployment-manager deployments delete $(DISK_DEPLOYMENT) --quiet diff --git a/gcp-deployment-manager/README.md b/gcp-deployment-manager/README.md index cc92621..d27d58c 100644 --- a/gcp-deployment-manager/README.md +++ b/gcp-deployment-manager/README.md @@ -2,10 +2,10 @@ This is a Docksal Sandbox Server template for Google Cloud Deployment Manager. -Google Cloud Deployment Manager is an infrastructure management service that makes it simple to create, deploy, -and manage Google Cloud Platform resources. With Deployment Manager, you can create a static or dynamic template -that describes the configuration of your Google Cloud environment and then use Deployment Manager to create these -resources as a single deployment. +Google Cloud Deployment Manager is an infrastructure management service that makes it simple to create, deploy, +and manage Google Cloud Platform resources. With Deployment Manager, you can create a static or dynamic template +that describes the configuration of your Google Cloud environment and then use Deployment Manager to create these +resources as a single or collection of deployments. For an overview of Deployment Manager, see https://cloud.google.com/deployment-manager/docs. @@ -17,20 +17,13 @@ For an overview of Deployment Manager, see https://cloud.google.com/deployment-m 1. [Enable the Deployment Manager and Compute APIs](https://console.cloud.google.com/flows/enableapi?apiid=deploymentmanager,compute_component). -1. Clone this repo +1. Setup SSH keys. - Clone this repository locally or with [Cloud Shell](https://cloud.google.com/shell/). - With Cloud Shell, you can manage your GCP project and resources without installing anything. +1. Proceed with Local setup below. - [![Open in Cloud Shell](http://gstatic.com/cloudssh/images/open-btn.svg)](https://console.cloud.google.com/cloudshell/open?git_repo=https%3A%2F%2Fgithub.com%2Fdocksal%2Fsandbox-server&page=editor) +1. Deploy the sandbox. -1. Configure `gcloud` tool to use your project. Replace `` with the project ID - - ``` - gcloud config set project - ``` - -## Set up SSH keys in the GCP project +### Set up SSH keys in the GCP project Generate a new SSH key pair: @@ -40,38 +33,120 @@ Replace `` with something meaningful to identify the key, e.g. `docksal **Note**: The `-C build-agent@docksal-sandbox` part is important. -GCP uses the comment in the key to map the key to a Linux user. It will update the `build-agent` user's +GCP uses the comment in the key to map the key to a Linux user. It will update the `build-agent` user's `~/.ssh/authorized_keys` automatically, when you follow the steps below. View and copy the public key: cat ~/.ssh/.pub - **Note**: they key is a single line. If using Cloud Shell, it may break it into multiple lines. If that is the case, you will have yo manually fix the string to be one line. - Use the copied string to set a [project-wide](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#project-wide) public SSH key on GCP. -## Deploy the sandbox server +### Local Setup + +You must have Docker installed. Local setup uses the image `google/cloud-sdk`. See the [image's Docker Hub page](https://hub.docker.com/r/google/cloud-sdk) for additional details on using this image. + +1. Run `make login`. This will ask you to copy/paste a URL into your browser and copy/paste a token. See [`gcloud auth login`](https://cloud.google.com/sdk/gcloud/reference/auth/login) for additional details. + +1. Run `make bash` for container CLI and run `gcloud config set project `. Future `make` commands will automatically run with the context of your project. + +### Deploying the sandbox + +After deployment you will have the following resources requisitioned (resources you pay for denoted by ($)): + +- VM instance (defaults to preemptible) ($) +- Standard disk (150GB default) ($) +- Static IP address ($) +- Managed instance group +- VM instance template + +Assuming you have already logged in and set the project as directed above, you simply need to run the following to setup your sandbox: + + make create + +Then, in your GCP Console: + +1. navigate to Compute Engine > VM instances. +1. Find the IP address assigned to your VM. +1. Log into your VM via `build-agent@x.x.x.x`, where x.x.x.x is the IP identified in step 2. +1. Run `fin system status`. If the Docksal system containers all appear as `Up` then everything is ready to go. +1. If the Docksal system status is either not returned, or you get an error of some kind, you will want to: log out, wait a few minutes, then log back in. Run `fin system status` again and it should come back as up. + +**NOTE**: You will want to wait ~5 minutes after running `make create` to ensure the VM has had time to fully assemble itself. This is only true for first time setups. + +## Managing user access to the machine + +You can associate multiple SSH keys with the project to control user access, see [working with SSH keys](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys) for details on how to add and remove SSH keys from the project. + +Project SSH keys, and their associated user, are automatically added to all VM instances in your project. If you add an SSH key to the project metadata with user id `user-a`, then after the machine has been provisioned, or restarted, that user will be able to login to the machine via `ssh user-a@x.x.x.x` where `x.x.x.x` is the sandbox VM's IP address. + +### Running commands as `build-agent` + +Sandboxes are stored in `/home/build-agent/builds`. Therefore, if you need to interact with a sandbox, you must be the user `build-agent`. All users with project-level SSH keys have passwordless sudo access. So, if you are logging into the machine as a user besides `build-agent`, to interact with the builds you must do the following: + +1. Run `ssh my-user-id@x.x.x.x`. +1. Run `sudo su - build-agent` to assume root. + +## Customizing VM instance properties + +Reasonable defaults are assigned to the sandbox server out of the box: + +| Property | Value | +|--------------|---------------| +| Machine size | n1-standard-2 | +| Disk size | 150GB | +| Preemptible | true | +| Region | us-east4 | +| Zone | us-east4-c | + +These properties are represented in `project.env`. You may override these values in one of two files: `project-override.env` and `local-override.env`. `project-override.env` is intended to be a version controlled file. `local-override.env` is, conversely, not version controlled. The files are included in the `Makefile` in the following order: + +1. project.env +1. project-override.env +1. local-override.env + +This project supports the customization of the following variables: + +1. `ZONE` - See [Regions and Zones](https://cloud.google.com/compute/docs/regions-zones) for acceptable values. +1. `REGION` - See [Regions and Zones](https://cloud.google.com/compute/docs/regions-zones) for acceptable values. +1. `DEFAULT_MACHINE_SIZE` - See [Machine Types](https://cloud.google.com/compute/docs/machine-types) for the various types of machines you can use. We have found that the `n1-standard` series provides optimum computing power for sandbox purposes. +1. `DISK_SIZE` - the size of the machine in GB (integer only). +1. `TEMPLATE_ID` - unique identifier for a template. If any other properties of the template changes, you MUST change the template ID as well. You will most frequently need to change this if you decide to toggle the instance's preemptive setting. +1. `PREEMPTIVE` - (boolean) true or false. Determines whether or not a machine should be preemptive. Preemptive machines are far cheaper, and the uptime is usually adequate enough as to not provide disruptions to your daily operations. See [Preemptible VM instances](https://cloud.google.com/compute/docs/instances/preemptible) for more information. + +**NOTE:** Never surround the values of overridden variables with quotes. Reference `project.env` for the proper way to format values. + +### Examples + +Lets say you need a bigger disk and a bigger machine than comes standard. You'll want: + +1. Add `project-override.env` and set its contents to: + + DEFAULT_MACHINE_SIZE=n1-standard-4 + DISK_SIZE=250 -Navigate to the `gcp-deployment-manager` folder: +1. Run `make create`. That will create a sandbox server with machine type `n1-standard-4` instead of `n1-standard-2` and a disk with size `250GB` instead of `150GB`. - cd gcp-deployment-manager +If you have already created the sandbox instance and you need to upsize the machine because you are running more active sandboxes, you should: -Launch the deployment: +1. Update/create `project-override.env` to include: - gcloud deployment-manager deployments create docksal-sandbox-server --config config.yaml + - A bigger machine size. + - A DIFFERENT value for TEMPLATE_ID. This is because the machine size is a property of the instance template, and GCP does not allow updating properties of a template. A NEW template must be created with the new values. The `Makefile` handles this for you automatically, but you DO need to set a `TEMPLATE_ID` value that is different from what was last deployed. -In the output of the command you'll find the server public IP address: + Note that, any updates to the following variables requires you also update the current value of `TEMPLATE_ID` to something different: - OUTPUTS VALUE - ip + - ZONE + - PREEMPTIVE + - DEFAULT_MACHINE_SIZE -There is a startup script that will do server provisioning. It will take 2-5 minutes from this point. +1. With the above in mind, lets say the current machine type is `n1-standard-4`, and you want to upsize it to `n1-standard-8` to get more processors and memory. Update `project-override.env` to look like: -You can now proceed to [Access the sandbox server](/README.md#server-access) + DEFAULT_MACHINE_SIZE=n1-standard-8 + TEMPLATE_ID=custom-docksal-sandbox-template-002 -## Delete the sandbox server +1. Run `make update`. If you forget to change `TEMPLATE_ID` to a new, unique, value, then `make update` will error. -To delete the deployment and all the resources that were created: +1. Observe the GCP Console and verify that your machine size has been updated. If there is a sandbox site present on the server, visit its sandbox URL to verify that the site is still functioning. - gcloud deployment-manager deployments delete docksal-sandbox-server +The machine instance is intended to be stateless, but the disk is stateful. So, you can recreate the machine instance as many times as you'd like, but it will keep using the same data disk which preserves all of the existing sandboxes and settings in the `build-agent` home directory. diff --git a/gcp-deployment-manager/config.yaml b/gcp-deployment-manager/config.yaml deleted file mode 100644 index 02506c7..0000000 --- a/gcp-deployment-manager/config.yaml +++ /dev/null @@ -1,17 +0,0 @@ -imports: -- path: Docksal.jinja -- path: startup.sh - -resources: -- name: Docksal - type: Docksal.jinja - properties: - zone: us-east4-c - machineType: n1-standard-2 - dataDiskSizeGb: 100 - metadata-from-file: - startup-script: startup.sh - -outputs: -- name: ip - value: $(ref.Docksal.vmExternalIP) diff --git a/gcp-deployment-manager/project.env b/gcp-deployment-manager/project.env new file mode 100644 index 0000000..551b914 --- /dev/null +++ b/gcp-deployment-manager/project.env @@ -0,0 +1,19 @@ +# Project constants. DO NOT OVERRIDE. +CLOUD_SDK_IMAGE_VERSION=298.0.0-alpine +DISK_DEPLOYMENT=docksal-sandbox-disk +GROUP_DEPLOYMENT=docksal-sandbox-group +GROUP_NAME=mig-docksal-group-py +TEMPLATE_DEPLOYMENT=docksal-sandbox-template + +# Overridable properties. +# NOTE: If changing PREEMPTIVE, ZONE, or DEFAULT_MACHINE_SIZE then you MUST +# also change TEMPLATE_ID to be something different as well. We recommend +# chaning it to e.g. custom-docksal-sandbox-template-002. Always increment the +# last number as a matter of convention. +ZONE=us-east4-c +REGION=us-east4 +DEFAULT_MACHINE_SIZE=n1-standard-2 +DISK_SIZE=150 +TEMPLATE_ID=docksal-sandbox-template-002 +PREEMPTIVE=true +DOCKSAL_VERSION=master diff --git a/gcp-deployment-manager/startup.sh b/gcp-deployment-manager/startup.sh deleted file mode 100755 index 4459a08..0000000 --- a/gcp-deployment-manager/startup.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash - -# This is a startup script for a Docksal Sandbox server in GCP. -# It installs and configures Docksal on a bare Ubuntu machine (tested with Ubuntu 18.04 Minimal). -# -# The startup script log can be views via "gcloud compute ssh vm-sandbox-test -- tail -f /var/log/syslog" - -set -x # Print commands -set -e # Fail on errors - -# Persistent disk settings -DATA_DISK="/dev/sdb" -MOUNT_POINT="/data" -BUILD_USER="build-agent" -BUILD_USER_UID="1100" -BUILD_USER_HOME="/home/${BUILD_USER}" -DATA_BUILD_USER_HOME="${MOUNT_POINT}${BUILD_USER_HOME}" -DOCKSAL_VERSION="master" -PROJECT_INACTIVITY_TIMEOUT="0.5h" -PROJECT_DANGLING_TIMEOUT="168h" -PROJECTS_ROOT="${BUILD_USER_HOME}/builds" - -# Create build-agent user with no-password sudo access -# Forcing the uid to avoid race conditions with GCP creating project level users at the same time. -# (Otherwise, we may run into something like "useradd: UID 1001 is not unique") -if [[ "$(id -u ${BUILD_USER})" != "${BUILD_USER_UID}" ]]; then - adduser --disabled-password --gecos "" --uid ${BUILD_USER_UID} ${BUILD_USER} - usermod -aG sudo ${BUILD_USER} - echo "${BUILD_USER} ALL=(ALL) NOPASSWD:ALL" >/etc/sudoers.d/101-${BUILD_USER} -fi - -# Mount the persistent data disk if it was attached -if lsblk ${DATA_DISK} &>/dev/null; then - echo "Using persistent disk: ${DATA_DISK} for data storage: ${MOUNT_POINT}" - - # Format the disk if necessary - if [[ $(lsblk -f ${DATA_DISK}) != *ext4* ]]; then - mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard ${DATA_DISK} - fi - - # Mount the data disk - mkdir -p ${MOUNT_POINT} - cp /etc/fstab /etc/fstab.backup - # Write disk mount to /etc/fstab (so that it persists on reboots) - # Equivalent of `mount /dev/sdb /mnt/data` - echo "${DATA_DISK} ${MOUNT_POINT} ext4 defaults,nofail 0 2" | tee -a /etc/fstab - mount -a - - # Move BUILD_USER_HOME to the data disk - # E.g. /home/build-agent => /mnt/data/home/build-agent - if [[ ! -d ${DATA_BUILD_USER_HOME} ]]; then - mkdir -p $(dirname ${DATA_BUILD_USER_HOME}) - mv ${BUILD_USER_HOME} $(dirname ${DATA_BUILD_USER_HOME}) - else - rm -rf ${BUILD_USER_HOME} - fi - ln -s ${DATA_BUILD_USER_HOME} ${BUILD_USER_HOME} - - # Symlink /var/lib/docker (should not yet exist when this script runs) to the data volume - mkdir -p ${MOUNT_POINT}/var/lib/docker - ln -s ${MOUNT_POINT}/var/lib/docker /var/lib/docker -else - echo "WARNING: data volume not found. Using instance-only storage" -fi - -# Create the projects/builds directory -mkdir -p ${PROJECTS_ROOT} - -# SSH settings: ensure ~/.ssh exists for the build user -mkdir -p ${BUILD_USER_HOME}/.ssh - -# SSH settings: authorized_keys -# If ~/.ssh/authorized_keys does not exist for the build user, reuse the one from the default user account (ubuntu) -if [[ ! -f "${BUILD_USER_HOME}/.ssh/authorized_keys" ]]; then - cp "/home/ubuntu/.ssh/authorized_keys" "${BUILD_USER_HOME}/.ssh/authorized_keys" - chown ${BUILD_USER}:${BUILD_USER} "${BUILD_USER_HOME}/.ssh/authorized_keys" -fi - -# SSH settings: disable the host key check -if [[ ! -f "${BUILD_USER_HOME}/.ssh/config" ]]; then - tee "${BUILD_USER_HOME}/.ssh/config" < + description: | + Disk deployment meant to be used with Docksal sandbox infrastructure. + +additionalProperties: false + +properties: + zone: + type: string + description: The zone which this resource resides. + default: us-east4-c + diskSizeGb: + type: integer + description: The size of the disk, in gigabytes (GB). + default: 150 diff --git a/gcp-deployment-manager/templates/docksal_group.py b/gcp-deployment-manager/templates/docksal_group.py new file mode 100644 index 0000000..5f2cd97 --- /dev/null +++ b/gcp-deployment-manager/templates/docksal_group.py @@ -0,0 +1,35 @@ +def generate_config(context): + properties = context.properties + resource_name = 'mig-' + context.env['name'] + project = context.env['project'] + zone = properties['zone'] + template_id = properties['templateId'] + instance_template = 'projects/' + project + '/global/instanceTemplates/' + template_id + + outputs = [] + resources = [{ + 'name': resource_name, + 'type': 'gcp-types/compute-v1:instanceGroupManagers', + 'properties': { + 'instanceTemplate': instance_template, + 'name': resource_name, + 'zone': zone, + 'targetSize': 1, + 'updatePolicy': { + 'maxSurge': { + 'calculated': 0, + 'fixed': 0 + }, + 'maxUnavailable': { + 'calculated': 1, + 'fixed': 1 + }, + 'minReadySec': 0, + 'minimalAction': 'REPLACE', + 'replacementMethod': 'RECREATE', + 'type': 'PROACTIVE' + } + } + }] + + return {'resources': resources, 'outputs': outputs} diff --git a/gcp-deployment-manager/templates/docksal_group.py.schema b/gcp-deployment-manager/templates/docksal_group.py.schema new file mode 100644 index 0000000..49b3ac9 --- /dev/null +++ b/gcp-deployment-manager/templates/docksal_group.py.schema @@ -0,0 +1,22 @@ +info: + title: Docksal Managed Instance Group + author: Les Peabody + description: | + Created a MIG meant to control a singular, preemptible VM. + +additionalProperties: false + +required: + - templateId + +properties: + zone: + type: string + description: The zone which this MIG occupies. + default: us-east4-c + templateId: + type: string + description: | + The current instance template resource name that should be used to + generate the sandbox virtual machine. + default: docksal-sandbox-template-001 diff --git a/gcp-deployment-manager/templates/docksal_template.jinja b/gcp-deployment-manager/templates/docksal_template.jinja new file mode 100644 index 0000000..e214532 --- /dev/null +++ b/gcp-deployment-manager/templates/docksal_template.jinja @@ -0,0 +1,59 @@ +{% set staticIpName = "static-ip-%s" % env["deployment"] %} +{% set project = env["project"] %} +{% set region = properties["region"] %} +{% set templateId = properties["templateId"] %} +{% set deployment = env["deployment"] %} +{% set preemptive = properties["preemptive"] %} +{% set docksalVersion = properties["docksalVersion"] %} + +resources: +- name: {{ staticIpName }} + properties: + addressType: EXTERNAL + description: | + The static IP address used for VMs in the deployment + {{ deployment }}. + region: {{ region }} + resourceType: addresses + type: gcp-types/compute-v1:addresses +- name: {{ templateId }} + properties: + name: {{ templateId }} + properties: + machineType: {{ properties["machineType"] }} + disks: + - deviceName: boot + boot: true + autoDelete: true + initializeParams: + sourceImage: https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2004-focal-v20200610 + - deviceName: docksal-sandbox-disk-data + source: docksal-sandbox-disk + {% if properties["startup"] or properties["docksalDnsDomain"] %} + metadata: + items: + - key: docksalVersion + value: {{ properties["docksalVersion"] }} + {% if properties["startup"] %} + - key: startup-script + value: | + {{ imports[properties["startup"]]|indent(12) }} + {% endif %} + {% if properties["docksalDnsDomain"] %} + - key: DOCKSAL_DNS_DOMAIN + value: {{ properties["docksalDnsDomain"] }} + {% endif %} + {% endif %} + networkInterfaces: + - network: https://www.googleapis.com/compute/v1/projects/{{ project }}/global/networks/default + accessConfigs: + - type: ONE_TO_ONE_NAT + name: "External NAT" + natIP: $(ref.{{ staticIpName }}.address) + scheduling: + preemptible: {{ properties["preemptive"] }} + tags: + items: + - http-server + - https-server + type: gcp-types/compute-v1:instanceTemplates diff --git a/gcp-deployment-manager/templates/docksal_template.jinja.schema b/gcp-deployment-manager/templates/docksal_template.jinja.schema new file mode 100644 index 0000000..7e9c231 --- /dev/null +++ b/gcp-deployment-manager/templates/docksal_template.jinja.schema @@ -0,0 +1,47 @@ +info: + title: Docksal Instance Template + author: Les Peabody + description: | + Creates a Docksal VM instance template and static IP resource. + +additionalProperties: false + +imports: +- path: startup.sh + name: startup.sh + +properties: + region: + type: string + description: The region which this resource resides. + default: us-east4 + machineType: + type: string + description: | + The type of machine instance to use. Defaults to n1-standard-2. + default: n1-standard-2 + startup: + type: string + description: | + The name of the script processed every time the server starts up. + default: startup.sh + docksalDnsDomain: + type: string + description: | + A custom domain that points at the sandbox server's IP address. + dataDiskName: + type: string + description: The name of the disk containing instance data. + default: docksal-sandbox-disk + templateId: + type: string + description: The resource name attached to the instance template. + default: docksal-sandbox-template-001 + preemptive: + type: boolean + description: Whether or not the machine should be preemptible. Preemptive instances are cheaper overall. + default: true + docksalVersion: + type: string + description: The version of Docksal to install on the sandbox server. + default: develop diff --git a/gcp-deployment-manager/templates/startup.sh b/gcp-deployment-manager/templates/startup.sh new file mode 100755 index 0000000..55ddc6d --- /dev/null +++ b/gcp-deployment-manager/templates/startup.sh @@ -0,0 +1,133 @@ +#!/bin/bash + +# This is a startup script for a Docksal Sandbox server in GCP. +# It installs and configures Docksal on a bare Ubuntu machine (tested with Ubuntu 20.04 Minimal). +# +# The startup script log can be views via "gcloud compute ssh vm-sandbox-test -- tail -f /var/log/syslog" + +set -x # Print commands +set -e # Fail on errors + +# Helper function for getting metadata. +get_metadata() { + curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/$1 -H "Metadata-Flavor: Google" 2>/dev/null +} + +# Persistent disk settings +DATA_DISK="/dev/sdb" +MOUNT_POINT="/data" +BUILD_USER="build-agent" +BUILD_USER_UID="1100" +BUILD_USER_HOME="/home/${BUILD_USER}" +DATA_BUILD_USER_HOME="${MOUNT_POINT}${BUILD_USER_HOME}" +DOCKSAL_VERSION="$(get_metadata docksalVersion)" +PROJECT_INACTIVITY_TIMEOUT="0.5h" +PROJECT_DANGLING_TIMEOUT="168h" +PROJECTS_ROOT="${BUILD_USER_HOME}/builds" + +set_docksal_dns_domain() { + local base_metadata="http://metadata.google.internal/computeMetadata/v1/instance/attributes/" + local dns_attr="DOCKSAL_DNS_DOMAIN" + local flavor_header="Metadata-Flavor: Google" + local metadata=$(curl "$base_metadata" -H "$flavor_header" 2>/dev/null) + if [[ "$metadata" == *"$dns_attr"* ]] ; then + # We assume that if the attribute is present in the metadata directory then + # it has an accompanying value. + local dns_attr_value=$(curl "${base_metadata}${dns_attr}" -H "$flavor_header" 2>/dev/null) + sed -i "/^$dns_attr/d" "/home/$BUILD_USER/.docksal/docksal.env" + echo "$dns_attr=\"$dns_attr_value\"" | tee -a "/home/$BUILD_USER/.docksal/docksal.env" >/dev/null + fi +} + +apt update +apt install -y vim + +# Mount the persistent data disk if it was attached +if lsblk ${DATA_DISK} &>/dev/null; then + echo "Using persistent disk: ${DATA_DISK} for data storage: ${MOUNT_POINT}" + + # Format the disk if necessary + if [[ $(lsblk -f ${DATA_DISK}) != *ext4* ]]; then + mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard ${DATA_DISK} + fi + + # Mount the data disk + mkdir -p ${MOUNT_POINT} + cp /etc/fstab /etc/fstab.backup + # Write disk mount to /etc/fstab (so that it persists on reboots) + # Equivalent of `mount /dev/sdb /mnt/data` + echo "${DATA_DISK} ${MOUNT_POINT} ext4 defaults,nofail 0 2" | tee -a /etc/fstab + mount -a + + # Move BUILD_USER_HOME to the data disk + # E.g. /home/build-agent => /mnt/data/home/build-agent + if [[ ! -d ${DATA_BUILD_USER_HOME} ]]; then + mkdir -p $(dirname ${DATA_BUILD_USER_HOME}) + mv ${BUILD_USER_HOME} $(dirname ${DATA_BUILD_USER_HOME}) + else + rm -rf ${BUILD_USER_HOME} + fi + ln -s ${DATA_BUILD_USER_HOME} ${BUILD_USER_HOME} + + # Symlink /var/lib/docker (should not yet exist when this script runs) to the data volume + mkdir -p ${MOUNT_POINT}/var/lib/docker + ln -s ${MOUNT_POINT}/var/lib/docker /var/lib/docker +else + echo "WARNING: data volume not found. Using instance-only storage" +fi + +# Create the projects/builds directory +mkdir -p ${PROJECTS_ROOT} + +# SSH settings: ensure ~/.ssh exists for the build user +mkdir -p ${BUILD_USER_HOME}/.ssh + +# SSH settings: authorized_keys +# If ~/.ssh/authorized_keys does not exist for the build user, reuse the one from the default user account (ubuntu) +if [[ ! -f "${BUILD_USER_HOME}/.ssh/authorized_keys" ]]; then + cp "/home/ubuntu/.ssh/authorized_keys" "${BUILD_USER_HOME}/.ssh/authorized_keys" + chown ${BUILD_USER}:${BUILD_USER} "${BUILD_USER_HOME}/.ssh/authorized_keys" +fi + +# SSH settings: disable the host key check +if [[ ! -f "${BUILD_USER_HOME}/.ssh/config" ]]; then + tee "${BUILD_USER_HOME}/.ssh/config" <