you'll need python, the terraform cli, and depending on the scripts you run you may need to install jq
and dig
# from root of project
VENVDIR=.kubespray-venv
KUBESPRAYDIR=kubespray
python3 -m venv $VENVDIR
source $VENVDIR/bin/activate
cd $KUBESPRAYDIR
pip install -U -r requirements.txt
create a ./terraform/inputs/vars.tfvars
file with the following content:
cp_count = 3 # how many control plane nodes, 3 is default if not specified
worker_count = 1 # how many worker nodes, 1 is default if not specified
project = "<gcp project id>" # your gcp project id e.g. name-123456
public_ip = "0.0.0.0/32" # your public ip in CIDR format, use ./scripts/whoami.sh if you don't know your public ip address
other overridable variables available in ./terraform/variables.tf
create a servce account in GCP and download the json key file and place it in ./terraform/inputs/credentials.json
or
somewhere else specified by your gcp_credentials
variable.
# from project root
cd ./terraform
terraform init
terraform apply -var-file="./inputs/vars.tfvars"
make sure that ansible-playbook is using the binary in your venv.
which ansible-playbook
the result should be inside /<project path>/.kubespray-venv/bin/ansible-playbook
, if not restart shell and reactivate
python venv with ./kubespray-venv/bin/activate
# from project root with venv activated
cd ./kubespray
ansible-playbook -i ../inventory/hosts.yaml cluster.yml --become \
-e "{\"supplementary_addresses_in_ssl_keys\":[\"$(terraform -chdir=../terraform output -raw jumpbox_ip)\"]}"
# from project root
cd ./playbooks
ansible-playbook -i ../inventory/hosts.yaml load-balancer.yaml
the load balancer will split kube-api traffic among the control nodes through the jumpbox (bastion node in ansible inventory). it will also split http traffic to node port 32359 across all nodes if you want to setup a service.
# from project root
cd ./terraform
JUMPBOX_HOST=$(terraform output -raw jumpbox_ip)
ANSIBLE_USER=$(terraform state pull | jq 'first(.resources[] | select(.type=="ansible_host")).instances[0].attributes.variables.ansible_user' -r)
ssh -J $ANSIBLE_USER@$JUMPBOX_HOST $ANSIBLE_USER@k8s-node-cp-0.internal.
# get the kubeconfig admin credentials from the cluster control-plane for kubectl, from project root
./scripts/get-credentials.sh
kubectl get nodes