Setting up a production cluster for Pair
Create a kind cluster
kind create cluster
Export credentials for Cluster-API + Packet running in kind to use
read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID && \
export PACKET_PROJECT_ID && \
read -p 'PACKET_API_KEY: ' PACKET_API_KEY && \
export PACKET_API_KEY && \
clusterctl init --infrastructure=packet
kubectl wait -n cluster-api-provider-packet-system --for=condition=ready pod --selector=control-plane=controller-manager --timeout=90s
kubectl -n cluster-api-provider-packet-system logs -l control-plane=controller-manager -c manager
export CLUSTER_NAME="iisharingiopair"
export PROJECT_ID=7a44b778-41d2-49fa-9c92-99148516c600
export PACKET_PROJECT_ID=$PROJECT_ID
export FACILITY=sv15
export KUBERNETES_VERSION=v1.22.11
export POD_CIDR=10.244.0.0/16
export SERVICE_CIDR=10.96.0.0/12
export NODE_OS=ubuntu_20_04
export CONTROLPLANE_NODE_TYPE=c3.small.x86
export CONTROL_PLANE_MACHINE_COUNT=3
export WORKER_NODE_TYPE=c3.small.x86
export WORKER_MACHINE_COUNT=0
export SSH_KEY="gh:BobyMCbobs"
clusterctl generate cluster "${CLUSTER_NAME}" --from ./cluster-template-packet.yaml -n sharingio > cluster-packet-"${CLUSTER_NAME}".yaml
kubectl create ns sharingio
kubectl -n sharingio apply -f cluster-packet-${CLUSTER_NAME}.yaml
kubectl -n sharingio get machines,packetmachines
kubectl -n sharingio get secrets ${CLUSTER_NAME}-kubeconfig -o=jsonpath='{.data.value}' | base64 -d > ~/.kube/packet-${CLUSTER_NAME}
ssh root@$(kubectl -n sharingio get cluster ${CLUSTER_NAME} -o=jsonpath='{.spec.controlPlaneEndpoint.host}') tail -f /var/log/cloud-init-output.log
export KUBECONFIG="$HOME/.kube/packet-${CLUSTER_NAME}"
kubectl get pods -A
curl -s -L -o ./weavenet.yaml "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.IPALLOC_RANGE=192.168.0.0/16"
kubectl apply -f ./weavenet.yaml
kubectl create namespace sharingio
clusterctl init --infrastructure=packet
Note: this also installs cert-manager.
Move the management from kind to the Pair cluster
KUBECONFIG= clusterctl move -n sharingio --kubeconfig "$HOME/.kube/config" --to-kubeconfig "$HOME/.kube/packet-${CLUSTER_NAME}"
kubectl create namespace helm-operator -o yaml --dry-run=client | \
kubectl apply -f -
kubectl -n helm-operator apply \
-f https://github.com/sharingio/.sharing.io/raw/main/cluster-api/manifests/helm-operator-crds.yaml \
-f https://github.com/sharingio/.sharing.io/raw/main/cluster-api/manifests/helm-operator.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml
kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e "s/strictARP: false/strictARP: true/" | kubectl apply -f - -n kube-system
kubectl -n metallb-system create secret generic memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: nginx-ingress
namespace: nginx-ingress
spec:
releaseName: nginx-ingress
chart:
repository: https://kubernetes.github.io/ingress-nginx
name: ingress-nginx
version: 4.1.4
values:
controller:
ingressClassResource:
default: true
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 5
targetCPUUtilizationPercentage: 80
service:
externalTrafficPolicy: Local
annotations:
metallb.universe.tf/allow-shared-ip: nginx-ingress
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- ingress-nginx
topologyKey: "kubernetes.io/hostname"
kubectl create namespace nginx-ingress -o yaml --dry-run=client | \
kubectl apply -f -
kubectl -n nginx-ingress apply -f ./nginx-ingress.yaml
kubectl create namespace external-dns -o yaml --dry-run=client | \
kubectl apply -f -
read -p 'DOMAIN_FILTER: ' DOMAIN_FILTER && export DOMAIN_FILTER && \
read -p 'TXT_OWNER_ID: ' TXT_OWNER_ID && export TXT_OWNER_ID && \
read -p 'AWS_ACCESS_KEY_ID: ' AWS_ACCESS_KEY_ID && export AWS_ACCESS_KEY_ID && \
read -p 'AWS_SECRET_ACCESS_KEY: ' AWS_SECRET_ACCESS_KEY && export AWS_SECRET_ACCESS_KEY && \
kubectl -n external-dns create secret generic external-dns-aws \
--from-literal=domain-filter=$DOMAIN_FILTER \
--from-literal=txt-owner-id=$TXT_OWNER_ID \
--from-literal=aws-access-key-id=$AWS_ACCESS_KEY_ID \
--from-literal=aws-secret-access-key=$AWS_SECRET_ACCESS_KEY
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
api-approved.kubernetes.io: "https://github.com/kubernetes-sigs/external-dns/pull/2007"
creationTimestamp: null
name: dnsendpoints.externaldns.k8s.io
spec:
group: externaldns.k8s.io
names:
kind: DNSEndpoint
listKind: DNSEndpointList
plural: dnsendpoints
singular: dnsendpoint
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: DNSEndpointSpec defines the desired state of DNSEndpoint
properties:
endpoints:
items:
description: Endpoint is a high-level way of a connection between a service and an IP
properties:
dnsName:
description: The hostname of the DNS record
type: string
labels:
additionalProperties:
type: string
description: Labels stores labels defined for the Endpoint
type: object
providerSpecific:
description: ProviderSpecific stores provider specific config
items:
description: ProviderSpecificProperty holds the name and value of a configuration which is specific to individual DNS providers
properties:
name:
type: string
value:
type: string
type: object
type: array
recordTTL:
description: TTL for the record
format: int64
type: integer
recordType:
description: RecordType type of record, e.g. CNAME, A, SRV, TXT etc
type: string
setIdentifier:
description: Identifier to distinguish multiple records with the same name and type (e.g. Route53 records with routing policies other than 'simple')
type: string
targets:
description: The targets the DNS record points to
items:
type: string
type: array
type: object
type: array
type: object
status:
description: DNSEndpointStatus defines the observed state of DNSEndpoint
properties:
observedGeneration:
description: The generation observed by the external-dns controller.
format: int64
type: integer
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: external-dns
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- watch
- list
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints
verbs:
- get
- watch
- list
- apiGroups:
- externaldns.k8s.io
resources:
- dnsendpoints/status
verbs:
- get
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: external-dns
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: k8s.gcr.io/external-dns/external-dns:v0.10.0
args:
- --source=crd
- --crd-source-apiversion=externaldns.k8s.io/v1alpha1
- --crd-source-kind=DNSEndpoint
- --provider=aws
# - --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization
- --aws-zone-type=public # only look at public hosted zones (valid values are public, private or no value for both)
- --registry=txt
- --log-level=debug
- --aws-batch-change-size=99
- --managed-record-types=A
- --managed-record-types=CNAME
- --managed-record-types=NS
env:
- name: EXTERNAL_DNS_DOMAIN_FILTER
valueFrom:
secretKeyRef:
name: external-dns-aws
key: domain-filter
- name: EXTERNAL_DNS_TXT_OWNER_ID
valueFrom:
secretKeyRef:
name: external-dns-aws
key: txt-owner-id
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: external-dns-aws
key: aws-access-key-id
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: external-dns-aws
key: aws-secret-access-key
securityContext:
fsGroup: 65534 # For ExternalDNS to be able to read Kubernetes and AWS token files
kubectl apply -f ./external-dns-crd.yaml -f ./external-dns.yaml
kubectl -n kube-system apply -f https://github.com/sharingio/.sharing.io/raw/main/cluster-api/manifests/metrics-server.yaml
kubectl create namespace kube-prometheus -o yaml --dry-run=client | \
kubectl apply -f -
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: kube-prometheus
namespace: kube-prometheus
spec:
releaseName: kube-prometheus
chart:
repository: https://prometheus-community.github.io/helm-charts
name: kube-prometheus-stack
version: 16.1.2
values:
prometheus:
prometheusSpec:
storageSpec:
emptyDir: {}
grafana:
dashboards:
default:
node-exporter:
gnetId: 1860
revision: 15
datasource: Prometheus
deployments:
gnetId: 8588
revision: 1
datasource: Prometheus
adminPassword: "admin"
ingress:
enabled: true
hosts:
- grafana.sharing.io
tls:
- secretName: letsencrypt-prod
hosts:
- grafana.sharing.io
kubectl apply -f ./kube-prometheus.yaml
kubectl -n kube-prometheus get secrets kube-prometheus-grafana -o json | jq -r '.data["admin-user"]' | base64 -d
printf ": "
kubectl -n kube-prometheus get secrets kube-prometheus-grafana -o json | jq -r '.data["admin-password"]' | base64 -d
With DNS, we’ll set up management for Pair. The records that will be set up will be:
- sharing.io
- *.sharing.io; and
- *.pair.sharing.io
kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}'
should be assign as an A record to
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: sharingio
namespace: sharingio
spec:
endpoints:
# - dnsName: sharing.io
# recordTTL: 60
# recordType: A
# targets:
# - ${LOAD_BALANCER_IP}
- dnsName: '*.sharing.io'
recordTTL: 60
recordType: A
targets:
- ${LOAD_BALANCER_IP}
- dnsName: '*.pair.sharing.io'
recordTTL: 60
recordType: A
targets:
- ${LOAD_BALANCER_IP}
export LOAD_BALANCER_IP=$(kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')
envsubst < ./dnsendpoint-sharingio.yaml | kubectl apply -f -
- Deploying Pair
- CI setup (maintainers of Pair)