diff --git a/Day-1/demo-traefik-docker/front-test/package-lock.json b/Day-1/demo-traefik-docker/front-test/package-lock.json index 50da317..bd098d7 100644 --- a/Day-1/demo-traefik-docker/front-test/package-lock.json +++ b/Day-1/demo-traefik-docker/front-test/package-lock.json @@ -8498,9 +8498,9 @@ "dev": true }, "set-value": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz", - "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", + "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", "dev": true, "requires": { "extend-shallow": "^2.0.1", @@ -8512,7 +8512,7 @@ "extend-shallow": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", "dev": true, "requires": { "is-extendable": "^0.1.0" @@ -9231,38 +9231,15 @@ "dev": true }, "union-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.0.tgz", - "integrity": "sha1-XHHDTLW61dzr4+oM0IIHulqhrqQ=", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", + "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", "dev": true, "requires": { "arr-union": "^3.1.0", "get-value": "^2.0.6", "is-extendable": "^0.1.1", - "set-value": "^0.4.3" - }, - "dependencies": { - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, - "requires": { - "is-extendable": "^0.1.0" - } - }, - "set-value": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-0.4.3.tgz", - "integrity": "sha1-fbCPnT0i3H945Trzw79GZuzfzPE=", - "dev": true, - "requires": { - "extend-shallow": "^2.0.1", - "is-extendable": "^0.1.1", - "is-plain-object": "^2.0.1", - "to-object-path": "^0.3.0" - } - } + "set-value": "^2.0.1" } }, "uniq": { diff --git a/solutions/Day-1/exercise-1/README.md b/solutions/Day-1/exercise-1/README.md deleted file mode 100644 index e22852d..0000000 --- a/solutions/Day-1/exercise-1/README.md +++ /dev/null @@ -1,16 +0,0 @@ -## Start the Redis container - -Pull then start the Redis server: -```sh - docker run -d --name redis redis -``` - -La commande à lancer est la suivante : -``` - docker run -d --name nodeapp --network my-net -p 8080:8080 myrepo/nodeapp -``` - -Enter the nodeapp container and show the /etc/hosts file. -Enter the nodeapp container and show the /etc/resolv.conf file. - -If needed, start a ubuntu container connected to the network, then run `dig`to illustrate the DNS resolution. diff --git a/solutions/Day-1/exercise-2/docker-compose.yml b/solutions/Day-1/exercise-2/docker-compose.yml deleted file mode 100644 index c9898a7..0000000 --- a/solutions/Day-1/exercise-2/docker-compose.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: '3' -services: - web: - build: - context: . - ports: - - 8080 - depends_on: - - redis - redis: - image: redis \ No newline at end of file diff --git a/solutions/Day-1/exercise-3/docker-compose.yml b/solutions/Day-1/exercise-3/docker-compose.yml deleted file mode 100644 index 5d4c103..0000000 --- a/solutions/Day-1/exercise-3/docker-compose.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: '3' -services: - chat: - build: - context: . - ports: - - 8000:8080 - volumes: - - ./client:/usr/src/my-app/client diff --git a/solutions/Day-1/hands-on-buildah/README.md b/solutions/Day-1/hands-on-buildah/README.md deleted file mode 100644 index db9f0f4..0000000 --- a/solutions/Day-1/hands-on-buildah/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Hands-on : build images with Buildah - - -## Run the simple image - - -2. Run the container using Docker - -```sh -podman run --rm demo-wescale-training:latest -docker run --rm demo-wescale-training:latest -``` - -## Build with a Dockerfile - -```sh -buildah bud -t demo-wescale-training-bud . -podman run --rm demo-wescale-training-bud -``` \ No newline at end of file diff --git a/solutions/Day-1/hands-on-podman/README.md b/solutions/Day-1/hands-on-podman/README.md deleted file mode 100644 index b80f45d..0000000 --- a/solutions/Day-1/hands-on-podman/README.md +++ /dev/null @@ -1,8 +0,0 @@ - -```sh -podman run --name nginx -p 8080:80 -v ./src:/usr/share/nginx/html/ nginx - -podman logs --latest - -docker ps -a -``` \ No newline at end of file diff --git a/solutions/Day-2/Deployment/README.md b/solutions/Day-2/Deployment/README.md deleted file mode 100644 index 9e4667c..0000000 --- a/solutions/Day-2/Deployment/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# exercise-3: deployments to ensure pods are running - -In this exercise, you will deploy pods via deployment resources. -You will see the interests of using deployment: -* easy update of pods -* horizontal scalability -* maintaining *n* replicas of pods - -## Deploy version 1.0 with 2 replicas - -Create a deployment file with the following content (be carefull, the provided `.yaml` file may be incorrect! See [the documentation if needed](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#deployment-v1-apps)): -``` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-dep - namespace: default -spec: - replicas-numberOf: 2 - selector: - matchLabels: - app: hello-dep - template: - metadata: - labels: - app: hello-dep - spec: - containers: - - image: gcr.io/google-samples/hello-app:1.0 - imagePullPolicy: Always - name: hello-dep - ports: - - containerPort: 8080 -``` - -Create the deployment: -```sh -kubectl apply -f hello-v1.yml -``` - -Ensure you have 2 running pods: -```sh -kubectl get deployment,pods -``` - -Now delete one of the 2 created pod -```sh -kubectl delete pod -``` - -Wait few seconds to see a replacement pod for the one you deleted: -```sh -kubectl get deployment,pods -``` - -## Deploy the version 2.0 -``` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-dep - namespace: default -spec: - replicas: 2 - selector: - matchLabels: - app: hello-dep - template: - metadata: - labels: - app: hello-dep - spec: - containers: - - image: gcr.io/google-samples/hello-app:2.0 - imagePullPolicy: Always - name: hello-dep - ports: - - containerPort: 8080 - ``` - -Apply the changes: -``` -kubectl apply -f hello-v2.yml & kubectl get po -w -``` - -## "Scale up" the application - -You will change the number of replicas: -```sh -kubectl scale deployment hello-dep --replicas=3 & kubectl get po -w -``` - -## Clean all the resources -``` -kubectl delete deployment --all -``` diff --git a/solutions/Day-2/Deployment/exercise-3/hello-v1.yml b/solutions/Day-2/Deployment/exercise-3/hello-v1.yml deleted file mode 100644 index fe0294e..0000000 --- a/solutions/Day-2/Deployment/exercise-3/hello-v1.yml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-dep - namespace: default -spec: - replicas: 2 - selector: - matchLabels: - app: hello-dep - template: - metadata: - labels: - app: hello-dep - spec: - containers: - - image: gcr.io/google-samples/hello-app:1.0 - imagePullPolicy: Always - name: hello-dep - ports: - - containerPort: 8080 diff --git a/solutions/Day-2/Deployment/exercise-3/hello-v2.yml b/solutions/Day-2/Deployment/exercise-3/hello-v2.yml deleted file mode 100644 index 7e11930..0000000 --- a/solutions/Day-2/Deployment/exercise-3/hello-v2.yml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-dep - namespace: default -spec: - replicas: 2 - selector: - matchLabels: - app: hello-dep - template: - metadata: - labels: - app: hello-dep - spec: - containers: - - image: gcr.io/google-samples/hello-app:2.0 - imagePullPolicy: Always - name: hello-dep - ports: - - containerPort: 8080 diff --git a/solutions/Day-2/PVC-PV/exercise-4/pv-claim.yaml b/solutions/Day-2/PVC-PV/exercise-4/pv-claim.yaml deleted file mode 100644 index 8794e27..0000000 --- a/solutions/Day-2/PVC-PV/exercise-4/pv-claim.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: task-pv-claim -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - diff --git a/solutions/Day-2/PVC-PV/exercise-4/pv-pod.yaml b/solutions/Day-2/PVC-PV/exercise-4/pv-pod.yaml deleted file mode 100644 index 9bb4ca4..0000000 --- a/solutions/Day-2/PVC-PV/exercise-4/pv-pod.yaml +++ /dev/null @@ -1,20 +0,0 @@ -kind: Pod -apiVersion: v1 -metadata: - name: task-pv-pod -spec: - volumes: - - name: task-pv-storage - persistentVolumeClaim: - claimName: task-pv-claim - containers: - - name: task-pv-container - image: nginx - ports: - - containerPort: 80 - name: "http-server" - volumeMounts: - - mountPath: "/usr/share/nginx/html" - name: task-pv-storage - - diff --git a/solutions/Day-2/Pods/exercise-1/nginx.yml b/solutions/Day-2/Pods/exercise-1/nginx.yml deleted file mode 100644 index e701710..0000000 --- a/solutions/Day-2/Pods/exercise-1/nginx.yml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: nginx - labels: - env: development -spec: - containers: - - name: nginx - image: nginx - command: ["nginx"] - args: ["-g", "daemon off;", "-q"] - ports: - - containerPort: 80 diff --git a/solutions/Day-2/Pods/exercise-2/README.md b/solutions/Day-2/Pods/exercise-2/README.md deleted file mode 100644 index a87d13a..0000000 --- a/solutions/Day-2/Pods/exercise-2/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# exercise-2: Taints and tolerations - -In this hands-on lab, you will get familiar with taints and tolerations. - -On your multi worker cluster, you will deploy a pod which does not tolerate taints. -Then, you will add taints to all the workers and see their effect. - -You will be responsible for splitting up the worker nodes and making: -* one of the worker nodes a production (`prod`) environment node. -* one of the worker nodes a development (`dev`) environment node. -* one of the worker nodes a pre-production (`iso`) environment node. - -The purpose of identifying the production type is to not accidentally deploy pods into the production environment. You will use taints and tolerations to achieve this, and then you will deploy two pods: One pod will be scheduled to the dev environment, and one pod will be scheduled to the prod environment. - -## Deploy a pod which does not tolerate taints - -Get the list of nodes with current taints: -```sh -kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints -``` - -Deploy a pod which does not tolerate taints (be carefull, the provided `.yaml` file may be incorrect....) -```sh -kubectl apply -f no-toleration-pod.yml -``` - -Ensure the pod is running and note the worker it is running on. - -## For each worker node, apply a taint - -*DO not add taint to a Master nodes!*: -``` -kubectl taint node node-type=prod:NoExecute -kubectl taint node node-type=dev:NoExecute -kubectl taint node node-type=iso:NoExecute -``` - -## Verify the taints are ok - -```sh -kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints -``` - -Is the no-toleration-pod still running ? Why? - -## Schedule a pod to the dev environment. - -Complete the given `dev-pod-busybox.yml` file to tolerate the taint `node-type` with value `dev` and effect `no-execute`. - -Create the pod: -```sh -kubectl create -f dev-pod-busybox.yml -``` - -Verify it is running: -```sh -kubectl get pod -o wide -``` - -On which node is it running ? Why ? - -## Allow a pod to be scheduled to the prod environment. - -Create a yaml file containing the pod spec and a Production Taint toleration: -``` -apiVersion: v1 -kind: Pod -metadata: - name: prod-pod - labels: - app: busybox -spec: - containers: - - name: prod - image: busybox - args: - - sleep - - "3600" - command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600'] - tolerations: - - key: node-type - operator: Equal - value: prod - effect: NoSchedule -``` - -Create a yaml file containing the pod spec: -```sh -kubectl create -f prod-deployment.yml -``` - -Verify each pod has been scheduled and verify the tolerations. -``` -kubectl get pods -o wide -``` - -Is the prod pod running ? Why ? - -## Clean - -Remove Taint on all the worker nodes. -For that, use the `taint node` subcommand and add *-*- at the end of the taint name: -``` -kubectl taint node node-type- -kubectl taint node node-type- -``` - -If your cluster has 3 worker nodes: -```sh -kubectl taint node node-type- -``` - -Delete all the created pods: -```sh -kubectl delete -f . -``` diff --git a/solutions/Day-2/Pods/exercise-2/dev-pod-busybox.yml b/solutions/Day-2/Pods/exercise-2/dev-pod-busybox.yml deleted file mode 100644 index 68aca85..0000000 --- a/solutions/Day-2/Pods/exercise-2/dev-pod-busybox.yml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: dev-pod - labels: - app: busybox -spec: - containers: - - name: dev - image: busybox - command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600'] - tolerations: - - key: node-type - operator: Equal - value: dev - effect: NoExecute \ No newline at end of file diff --git a/solutions/Day-2/Pods/exercise-2/no-toleration-pod.yml b/solutions/Day-2/Pods/exercise-2/no-toleration-pod.yml deleted file mode 100644 index 55010f5..0000000 --- a/solutions/Day-2/Pods/exercise-2/no-toleration-pod.yml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: no-toleration-pod - labels: - app: busybox -spec: - containers: - - name: dev - image: busybox - command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600'] \ No newline at end of file diff --git a/solutions/Day-2/Pods/exercise-2/prod-deployment.yml b/solutions/Day-2/Pods/exercise-2/prod-deployment.yml deleted file mode 100644 index cde4c13..0000000 --- a/solutions/Day-2/Pods/exercise-2/prod-deployment.yml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: prod-pod - labels: - app: busybox -spec: - containers: - - name: prod - image: busybox - args: - - sleep - - "3600" - command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600'] - tolerations: - - key: "node-type" - operator: "Equal" - value: "prod" - effect: "NoSchedule" diff --git a/solutions/Day-2/RBAC/README.md b/solutions/Day-2/RBAC/README.md deleted file mode 100644 index 6ba2fb8..0000000 --- a/solutions/Day-2/RBAC/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# exercise-2: RBAC - -In this exercise, you will create a configmap and try to get the config map from a pod, using a service account. - - -## Create a pod using a service account -```sh -kubectl create serviceaccount myapp -``` - -Modify the `pod-sa.yaml` file to have the pod using the `myapp` service account. - -Then create the pod: -```sh -kubectl create -f pod-sa.yaml -``` - -## Create a configmap and try to access it from the pod - -Create a config map: -```sh -kubectl create configmap myconfig --from-literal data_1=foo -``` - -Get inside the pod and execute: -```sh -kubectl exec -it pod-sa -- bash -# then display the configmap -kubectl get configmap myconfig -``` - -Can you explain what happened ? - -## Create a Role and a RoleBinding for the service account - -Complete the `role.yaml` file to allow read access to configmaps: -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: configmap-reader -rules: -- apiGroups: [""] - # Complete the file - ... -``` - -Create a RoleBinding: -```sh -kubectl create -f binding.yaml -``` - -Try again to display the configMap inside the pod - -Then try to change the configmap - -Can you explain what happened ? - -## Clean - -```sh -kubectl delete -f pod-sa.yaml -kubectl delete -f binding.yaml -kubectl delete -f role.yaml -kubectl delete cm/myconfig -kubectl delete sa/myapp -``` \ No newline at end of file diff --git a/solutions/Day-2/RBAC/binding.yaml b/solutions/Day-2/RBAC/binding.yaml deleted file mode 100644 index bbef74c..0000000 --- a/solutions/Day-2/RBAC/binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: configmap-role-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: configmap-reader -subjects: -- apiGroup: "" - kind: ServiceAccount - name: myapp \ No newline at end of file diff --git a/solutions/Day-2/RBAC/pod-sa.yaml b/solutions/Day-2/RBAC/pod-sa.yaml deleted file mode 100644 index c70c122..0000000 --- a/solutions/Day-2/RBAC/pod-sa.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: pod-sa -spec: - serviceAccountName: myapp - containers: - - image: roffe/kubectl - imagePullPolicy: Always - name: kubectl - command: [ "bash", "-c", "--" ] - args: [ "while true; do sleep 30; done;" ] \ No newline at end of file diff --git a/solutions/Day-2/RBAC/role.yaml b/solutions/Day-2/RBAC/role.yaml deleted file mode 100644 index a201599..0000000 --- a/solutions/Day-2/RBAC/role.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: configmap-reader -rules: -- apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "watch", "list"] \ No newline at end of file diff --git a/solutions/Day-2/Service/exercise-5/ClusterIP/README.md b/solutions/Day-2/Service/exercise-5/ClusterIP/README.md deleted file mode 100644 index c039fba..0000000 --- a/solutions/Day-2/Service/exercise-5/ClusterIP/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# exercise-5: Cluster IP - -You will create a Cluster IP service and access it. - -## Create a Deployment - -Here is the deployment file: -``` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-deployment -spec: - selector: - matchLabels: - app: metrics - department: sales - replicas: 3 - template: - metadata: - labels: - app: metrics - department: sales - spec: - containers: - - name: hello - image: "gcr.io/google-samples/hello-app:2.0" -``` - -```sh -kubectl apply -f deployment.yaml -``` - -## Create a Cluster IP service - -Here is the service file: -``` -apiVersion: v1 -kind: Service -metadata: - name: my-cip-service -spec: - type: ClusterIP - selector: - app: metrics - department: ingeneering - ports: - - protocol: TCP - port: 80 - targetPort: 8080 -``` - -```sh -kubectl apply -f service.yaml -``` - -Describe the service and ensure the service has entries in its `endpoints`... if not, correct the `service.yaml` because it may be incorrect! - -## Test the service connectivity - -Determine the CLUSTER IP. - -Access the service: try the [CLUSTER_IP]:80. This does not work. Why? How can you access the service? ->> It doesn't work because CLUSTER IP Service can be only reached inside the cluster IP - -Execute a shell inside a pod of the service: -```sh -kubectl exec -it /bin/sh -# Install curl -apk update && apk add --no-cache curl -``` - -Try the curl on http://my-cip-service.default.svc.cluster.local:80 - -## Clean all resources - -```sh -kubectl delete -f . -``` \ No newline at end of file diff --git a/solutions/Day-2/Service/exercise-5/ClusterIP/deployment.yaml b/solutions/Day-2/Service/exercise-5/ClusterIP/deployment.yaml deleted file mode 100644 index 1e516b8..0000000 --- a/solutions/Day-2/Service/exercise-5/ClusterIP/deployment.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-deployment -spec: - selector: - matchLabels: - app: metrics - department: sales - replicas: 3 - template: - metadata: - labels: - app: metrics - department: sales - spec: - containers: - - name: hello - image: "gcr.io/google-samples/hello-app:2.0" - readinessProbe: - httpGet: - path: / - port: 8080 - initialDelaySeconds: 30 - timeoutSeconds: 10 \ No newline at end of file diff --git a/solutions/Day-2/Service/exercise-5/ClusterIP/service.yaml b/solutions/Day-2/Service/exercise-5/ClusterIP/service.yaml deleted file mode 100644 index b2e407e..0000000 --- a/solutions/Day-2/Service/exercise-5/ClusterIP/service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: my-cip-service -spec: - type: ClusterIP - selector: - app: metrics - department: sales - ports: - - protocol: TCP - port: 80 - targetPort: 8080 \ No newline at end of file diff --git a/solutions/Day-2/Service/exercise-5/LoadBalancer/my-deployment-50001.yaml b/solutions/Day-2/Service/exercise-5/LoadBalancer/my-deployment-50001.yaml deleted file mode 100644 index 477d256..0000000 --- a/solutions/Day-2/Service/exercise-5/LoadBalancer/my-deployment-50001.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-deployment -spec: - selector: - matchLabels: - app: metrics - department: sales - replicas: 3 - template: - metadata: - labels: - app: metrics - department: sales - spec: - containers: - - name: hello - image: "gcr.io/google-samples/hello-app:2.0" - env: - - name: "PORT" - value: "50001" - readinessProbe: - httpGet: - path: / - port: 50001 - initialDelaySeconds: 30 - timeoutSeconds: 10 diff --git a/solutions/Day-2/Service/exercise-5/NodePort/service.yaml b/solutions/Day-2/Service/exercise-5/NodePort/service.yaml deleted file mode 100644 index 82c7179..0000000 --- a/solutions/Day-2/Service/exercise-5/NodePort/service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: my-np-service -spec: - type: NodePort - selector: - app: metrics - department: engineering - ports: - - protocol: TCP - port: 80 - targetPort: 50000 \ No newline at end of file diff --git a/solutions/Day-2/configMap&Secret/README.md b/solutions/Day-2/configMap&Secret/README.md deleted file mode 100644 index dcb4317..0000000 --- a/solutions/Day-2/configMap&Secret/README.md +++ /dev/null @@ -1,134 +0,0 @@ -# exercise-1: A secret and a configmap for a database. - -In this exercise, you will create a k8s secret as well as a k8s configmap to correctly configure a MariaDB database. - -# Secrets - -## Create the MYSQL_ROOT_PASSWORD secret - -Generate a base-64 encoded string: -``` -echo -n 'KubernetesTraining!' | base64 -``` - -Note the value and put it in the secret definition: -``` -apiVersion: v1 -kind: Secret -metadata: - name: mariadb-root-password -type: Opaque -data: - password: YOUR_VALUE -``` - -Then, create the `mariadb-root-password` secret: -```sh -kubectl apply -f mysql-secret.yaml -``` - -## View the secret: - -```sh -kubectl describe secret mariadb-root-password -kubectl get secret mariadb-root-password -o jsonpath='{.data.password}' | base64 -d -``` - -## Create a secret for the db user - second way to create a secret - -```sh -kubectl create secret generic mariadb-user-creds \ - --from-literal=MYSQL_USER=kubeuser\ - --from-literal=MYSQL_ROOT_PASSWORD=KubernetesTraining -``` - -## View the secret: - -You are a k8s ninja! -You know how to do that. - -```sh -kubectl get secret mariadb-user-creds -o jsonpath='{.data.MYSQL_PASSWORD}' | base64 -d -``` - -# ConfigMap - -## Create a configMap to configure the mariadb application - -Create the config map `mariadb-config` from the `max_allowed_packet.cnf` file. -```sh -kubectl create configmap # Complete arguments -``` - -Edit the configMap to change the value 32M to `max_allowed_packet`. - -```sh -kubectl edit configmap mariadb-config -``` - -# Use the secrets and configMap - -## Add 2 secrets as environment variables to the Deployment: - -* `mariadb-root-password`: key/value pair -* `mariadb-user-creds`: key/value pair - -Both elements must be added in the deployment: - -``` -env: - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: mariadb-root-password - key: password -``` - -and - -``` -envFrom: -- secretRef: - name: mariadb-user-creds -``` - -## Add your configMap to the deployment - -Add your ConfigMap as source into the `volumes` entry of the pod spec. Then add a `volumeMount` to the container definition. - -Use the configMap as a `volumeMount` to `/etc/mysql/conf.d` - -See the documentation for help. - -## Create the deployment - -```sh -kubectl create -f mariadb-deployment.yaml -``` - -Verify the pod uses the Secrets and ConfigMap -``` -kubectl exec -it [pod-id] -- env |grep MYSQL -kubectl exec -it [pod-id] -- ls /etc/mysql/conf.d - -kubectl exec -it [pod-id] -- cat /etc/mysql/conf.d/max_allowed_packet.cnf -``` - -## Check if it works -```sh -kubectl exec -it [pod-id] -- /bin/sh - -mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e 'show databases;' -mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e "SHOW VARIABLES LIKE 'max_allowed_packet';" -``` - -# Clean -``` -kubectl delete deployment mariadb-deployment -kubectl delete cm mariadb-config -kubectl delete secret mariadb-root-password mariadb-user-creds -``` - - - - diff --git a/solutions/Day-2/configMap&Secret/mariadb-deployment.yaml b/solutions/Day-2/configMap&Secret/mariadb-deployment.yaml deleted file mode 100644 index dad6138..0000000 --- a/solutions/Day-2/configMap&Secret/mariadb-deployment.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: mariadb - name: mariadb-deployment -spec: - replicas: 1 - selector: - matchLabels: - app: mariadb - template: - metadata: - labels: - app: mariadb - spec: - containers: - - image: docker.io/mariadb:10.4 - name: mariadb - env: - - name: MYSQL_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: mariadb-root-password - key: password - envFrom: - - secretRef: - name: mariadb-user-creds - ports: - - containerPort: 3306 - protocol: TCP - volumeMounts: - - mountPath: /var/lib/mysql - name: mariadb-volume-1 - - mountPath: /etc/mysql/conf.d - name: mariadb-config-volume - volumes: - - emptyDir: {} - name: mariadb-volume-1 - - configMap: - name: mariadb-config - items: - - key: max_allowed_packet.cnf - path: max_allowed_packet.cnf - name: mariadb-config-volume \ No newline at end of file diff --git a/solutions/Day-2/configMap&Secret/max_allowed_packet.cnf b/solutions/Day-2/configMap&Secret/max_allowed_packet.cnf deleted file mode 100644 index 5ccc57f..0000000 --- a/solutions/Day-2/configMap&Secret/max_allowed_packet.cnf +++ /dev/null @@ -1,2 +0,0 @@ -[mysqld] -max_allowed_packet = 64M diff --git a/solutions/Day-2/configMap&Secret/mysql-secret.yaml b/solutions/Day-2/configMap&Secret/mysql-secret.yaml deleted file mode 100644 index 5a5aee5..0000000 --- a/solutions/Day-2/configMap&Secret/mysql-secret.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: mariadb-root-password -type: Opaque -data: - password: S3ViZXJuZXRlc1RyYWluaW5nIQ== \ No newline at end of file diff --git a/solutions/Day-2/ingressController/exercise-6/README.md b/solutions/Day-2/ingressController/exercise-6/README.md deleted file mode 100644 index 32253b2..0000000 --- a/solutions/Day-2/ingressController/exercise-6/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# exercise-6: IngressRules - -In this exercise, you will create an application pod `/v1` and expose it through an Ingress targetting a Service. - - -Then you will create a second version of the application (`/v2`) and manage the routing via another Ingress. - -## Deploy version /v1 of the application - -Instead of creating a YAML file, use the imperative command `kubectl run` to create a pod with: -* name: `web` -* image: `gcr.io/google-samples/hello-app:1.0` -* declared port: 8080 ->> kubectl run web --image=gcr.io/google-samples/hello-app:1.0 --port=8080 - -Instead of creating a YAML file, use the imperative command `kubectl expose` to create a NodePort service targetting the pod above. ->>> kubectl expose pod web --type=NodePort --target-port=8080 --port=80 - -Ensure the pod and the service are OK. ->> kubectel get pod web ->> kubectl get service web ->> kubectl describe service web - - -## Create an Ingress resource without path based routing rules - -Here is the Ingress definition file: -``` -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: basic-ingress -spec: - defaultBackend: - service: - name: web - port: - number: 80 -``` - -Create the ingress. Be carefull, it may be incorrect regarding the service we want to target... -```sh -kubectl apply -f basic-ingress.yaml -``` - -Ensure the ingress is correctly created (it can take some time): -```sh -kubectl get ingress basic-ingress -kubectl descibe ingress basic-ingress -``` - -Test the connectivity - what is the IP to connect on ? - -## Deploy a second version of the application - -Create the Pod and service for the version 2: -```sh -kubectl create deployment web2 --image=gcr.io/google-samples/hello-app:2.0 --port=8080 -kubectl expose deployment web2 --target-port=8080 --port=8080 --type=NodePort -``` - -Complete the given `fanout-ingress.yaml` file to add a `/v2` path which targets the web2 service: -``` -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: fanout-ingress -spec: - rules: - - http: - paths: - - path: /v1 - pathType: ImplementationSpecific - backend: - service: - name: web - port: - number: 80 - - path: /v2 - pathType: ImplementationSpecific - backend: - service: - name: web2 - port: - number: 8080 - ... -``` - -```sh -kubectl apply -f fanout-ingress.yaml -``` - -Ensure the ingress is correctly created (it can take some time): - -```sh -kubectl get ingress fanout-ingress -``` - -Connect to the services via the new Ingress. - -Test URLs `/v1` and `/v2`. - -## Clean -``` -kubectl delete ingress basic-ingress -kubectl delete ingress fanout-ingress -kubectl delete pod web -kubectl delete deployment web2 -kubectl delete service web web2 -``` diff --git a/solutions/Day-2/ingressController/exercise-6/basic-ingress.yaml b/solutions/Day-2/ingressController/exercise-6/basic-ingress.yaml deleted file mode 100644 index ca8697a..0000000 --- a/solutions/Day-2/ingressController/exercise-6/basic-ingress.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: basic-ingress -spec: - defaultBackend: - service: - name: web - port: - number: 80 diff --git a/solutions/Day-2/ingressController/exercise-6/fanout-ingress.yaml b/solutions/Day-2/ingressController/exercise-6/fanout-ingress.yaml deleted file mode 100644 index 7a591ad..0000000 --- a/solutions/Day-2/ingressController/exercise-6/fanout-ingress.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: fanout-ingress -spec: - rules: - - http: - paths: - - path: /v1 - pathType: ImplementationSpecific - backend: - service: - name: web - port: - number: 80 - - path: /v2 - pathType: ImplementationSpecific - backend: - service: - name: web2 - port: - number: 8080 diff --git a/solutions/Day-3/GitOps/application.yaml b/solutions/Day-3/GitOps/application.yaml deleted file mode 100644 index cc13159..0000000 --- a/solutions/Day-3/GitOps/application.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - name: declarative-gitops-chart - namespace: argocd -spec: - project: default - source: - repoURL: 'https://github.com/wescale/k8s-advanced-training.git' - targetRevision: HEAD - path: 'Correction/Helm/exercice 2/sample-demo' - syncPolicy: - syncOptions: - - CreateNamespace=true - destination: - server: https://kubernetes.default.svc - namespace: 'declarative-gitops' diff --git a/solutions/Day-3/Helm/exercice 1/README.md b/solutions/Day-3/Helm/exercice 1/README.md deleted file mode 100644 index 3c63ec2..0000000 --- a/solutions/Day-3/Helm/exercice 1/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Install Helm 3 and try it - - -- Install Helm: https://helm.sh/docs/intro/install/ -- Create a new Chart => simpleApp. Use Helm commands -```sh -helm create simpleapp -``` -- Can you browse through all the generated files : templates, values ... and try to understand how it works ? -- What is the default project in the chart helm ? -=> nginx -- Install the application. And voila you have an application up and running in the kubernetes cluster. Check it out. -```sh -$ helm install simpleapp ./simpleapp -``` -- Checkout the revision, version saved by Helm and using Helm API. -```sh -$ helm ls -$ helm get manifest simpleapp -$ kubectl get all -``` -- Clean up this installation using helm commands -```sh -$ helm uninstall simpleapp -``` - - - - - diff --git a/solutions/Day-3/Helm/exercice 2/README.md b/solutions/Day-3/Helm/exercice 2/README.md deleted file mode 100644 index 0b8fa03..0000000 --- a/solutions/Day-3/Helm/exercice 2/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Install an application from a predefined chart - -The objective of this exercise is to migrate the k8s configuration from an application to Helm. -This is a simple two-thirds application: frontend and backend - -Rewrite the different yaml files for this using Chart Helm. You find here: - -- _helpers.tpl - This file is as a place where you store all the reusable part. Typically something like application name, deployment name, service name, etc. In this file, we can also use a template function: for example here we defined `simplebackend.fullname` with value taken from `.Release.Name` and `.Chart.Name` ended by “backend”. - - -- values.yaml: -We put the Image repository and container ports for both backend and frontend. At this point, we can use the chart later to deploy different application image and maybe publish it on different ports. - -- deployment.yaml - -We can create our template for deployment in templates/deployment.yaml. This template will be used to generate both backend-deployment.yamland frontend-deployment.yaml - -- service.yaml -This template will be used to generate both backend-service.yaml and frontend-service.yaml - -- Install the application. And voila you have an application up and running in the kubernetes cluster. Check it out. -```sh -$ helm install simpleapp ./sample-demo -``` \ No newline at end of file diff --git a/solutions/Day-3/Helm/exercice 2/namespaces.yaml b/solutions/Day-3/Helm/exercice 2/namespaces.yaml deleted file mode 100644 index a82638b..0000000 --- a/solutions/Day-3/Helm/exercice 2/namespaces.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: helm-demo - labels: - stage: test \ No newline at end of file diff --git a/solutions/Day-3/Helm/exercice 2/sample-demo/Chart.yaml b/solutions/Day-3/Helm/exercice 2/sample-demo/Chart.yaml deleted file mode 100644 index d8e3841..0000000 --- a/solutions/Day-3/Helm/exercice 2/sample-demo/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v2 -name: simpleapp -description: A very simple helloworld application -type: application -# This is the chart version. This version number should be -# incremented each time you make changes -# to the chart and its templates, including the app version. -version: 1.0.0 -# This is the version number of the application being deployed. -# This version number should be -# incremented each time you make changes to the application. -appVersion: 1.0.0 \ No newline at end of file diff --git a/solutions/Day-3/Helm/exercice 2/sample-demo/NOTES.txt b/solutions/Day-3/Helm/exercice 2/sample-demo/NOTES.txt deleted file mode 100644 index 42c133f..0000000 --- a/solutions/Day-3/Helm/exercice 2/sample-demo/NOTES.txt +++ /dev/null @@ -1,53 +0,0 @@ -{{- if .Values.frontend.enabled }} - Get the Frontend application URL by running these commands: -{{- if .Values.frontend.ingress.enabled }} -{{- range $host := .Values.frontend.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.frontend.ingress.tls }}s{{ end }}:// - {{ $host.host }}{{ . }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.frontend.service.type }} - export NODE_PORT=$(kubectl get - --namespace {{ .Release.Namespace }} -o jsonpath=" - {.spec.ports[0].nodePort}" services - {{ include "simplefrontend-service.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace - {{ .Release.Namespace }} -o - jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.frontend.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be - available. - You can watch the status of by running - 'kubectl get --namespace {{ .Release.Namespace }} svc -w - {{ include "simplefrontend-service.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace - {{ .Release.Namespace }} - {{ include "simplefrontend-service.fullname" . }} - --template "{{"{{ range (index .status.loadBalancer.ingress 0) }} - {{.}}{{ end }}"}}") - - echo http://$SERVICE_IP:{{ .Values.frontend.service.port }} -{{- else if contains "ClusterIP" .Values.frontend.service.type }} - export POD_NAME=$(kubectl get pods --namespace - {{ .Release.Namespace }} -l - app={{ include "simplefrontend.fullname" . }} - -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:80 to use your application" - kubectl --namespace {{ .Release.Namespace }} - port-forward $POD_NAME 80:80 -{{- end }} -{{- end }} -{{- if .Values.backend.enabled }} - Get the application Backend URL by running these commands: - export POD_NAME=$(kubectl get pods --namespace - {{ .Release.Namespace }} - -l app={{ include "simplebackend.fullname" . }} - -o jsonpath="{.items[0].metadata.name}") - - echo "Visit http://127.0.0.1:8080 to use your application" - - kubectl --namespace {{ .Release.Namespace }} - port-forward $POD_NAME 80:8080 -{{- end }} \ No newline at end of file diff --git a/solutions/Day-3/Helm/exercice 2/sample-demo/templates/_helpers.tpl b/solutions/Day-3/Helm/exercice 2/sample-demo/templates/_helpers.tpl deleted file mode 100644 index 8198314..0000000 --- a/solutions/Day-3/Helm/exercice 2/sample-demo/templates/_helpers.tpl +++ /dev/null @@ -1,73 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* Expand the name of the chart. */}} -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "simpleapp.name" -}} -{{- default .Chart.Name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- define "simpleapp.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- define "simplebackend.fullname" -}} -{{- printf "%s-%s-backend" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- define "simplefrontend.fullname" -}} -{{- printf "%s-%s-frontend" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- define "simplebackend-deployment.fullname" -}} -{{- printf "%s-%s-backend-dpl" .Release.Name .Chart.Name| trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- define "simplebackend-service.url" -}} -{{- if .Values.frontend.env.backend -}} -{{- .Values.frontend.env.backend | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "http://%s-%s-backend-svc:8080" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- define "simplefrontend-deployment.fullname" -}} -{{- printf "%s-%s-frontend-dpl" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- define "simplebackend-service.fullname" -}} -{{- printf "%s-%s-backend-svc" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- define "simplefrontend-service.fullname" -}} -{{- printf "%s-%s-frontend-svc" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{/* Create chart name and version as used by the chart label. */}} -{{- define "simpleapp.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{/* Common labels */}} -{{- define "simpleapp.labels" -}} -helm.sh/chart: {{ include "simpleapp.chart" . }} -{{ include "simpleapp.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} -{{/* Selector labels */}} -{{- define "simpleapp.selectorLabels" -}} -app.kubernetes.io/name: {{ include "simpleapp.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} -{{/* Create the name of the service account to use */}} -{{- define "simpleapp.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "simpleapp.fullname" .) .Values.serviceAccount.name }} -{{- else -}} -{{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/solutions/Day-3/Helm/exercice 2/sample-demo/templates/deployment.yaml b/solutions/Day-3/Helm/exercice 2/sample-demo/templates/deployment.yaml deleted file mode 100644 index a751f44..0000000 --- a/solutions/Day-3/Helm/exercice 2/sample-demo/templates/deployment.yaml +++ /dev/null @@ -1,66 +0,0 @@ -{{- if .Values.backend.enabled -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "simplebackend-deployment.fullname" . }} - labels: - app.kubernetes.io/stack: - {{ include "simplebackend.fullname" . }} - {{- include "simpleapp.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.backend.replicaCount }} - selector: - matchLabels: - app: {{ include "simplebackend.fullname" . }} - template: - metadata: - labels: - app: {{ include "simplebackend.fullname" . }} - spec: - containers: - - name: {{ include "simplebackend.fullname" . }} - image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag | default "latest" }}" - imagePullPolicy: {{ .Values.backend.image.pullPolicy }} - ports: - - name: http - containerPort: 8080 - protocol: TCP - resources: - {{- toYaml .Values.backend.resources | nindent 12 }} -{{- end }} ---- -{{- if .Values.frontend.enabled -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "simplefrontend-deployment.fullname" . }} - labels: - app.kubernetes.io/stack: - {{ include "simplefrontend.fullname" . }} - {{- include "simpleapp.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.frontend.replicaCount }} - selector: - matchLabels: - app: {{ include "simplefrontend.fullname" . }} - template: - metadata: - labels: - app: {{ include "simplefrontend.fullname" . }} - spec: - containers: - - name: {{ include "simplefrontend.fullname" . }} - image: - "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag | default "latest" }}" - imagePullPolicy: {{ .Values.frontend.image.pullPolicy }} - env: - - name: BACKEND_URL - value: {{ include "simplebackend-service.url" .}} - ports: - - name: http - containerPort: - {{ .Values.frontend.image.ports.containerPort }} - protocol: TCP - resources: - {{- toYaml .Values.frontend.resources | nindent 12 }} -{{- end }} \ No newline at end of file diff --git a/solutions/Day-3/Helm/exercice 2/sample-demo/templates/service.yaml b/solutions/Day-3/Helm/exercice 2/sample-demo/templates/service.yaml deleted file mode 100644 index fe95703..0000000 --- a/solutions/Day-3/Helm/exercice 2/sample-demo/templates/service.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.backend.enabled -}} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "simplebackend-service.fullname" . }} - labels: - app.kubernetes.io/stack: - {{ include "simplebackend.fullname" . }} - {{- include "simpleapp.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - port: 8080 - targetPort: http - protocol: TCP - name: http - selector: - app: {{ include "simplebackend.fullname" . }} -{{- end }} ---- -{{- if .Values.frontend.enabled -}} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "simplefrontend-service.fullname" . }} - labels: - app.kubernetes.io/stack: - {{ include "simplefrontend.fullname" . }} - {{- include "simpleapp.labels" . | nindent 4 }} -spec: - type: {{ .Values.frontend.service.type }} - ports: - - port: {{ .Values.frontend.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - app: {{ include "simplefrontend.fullname" . }} -{{- end }} diff --git a/solutions/Day-3/Helm/exercice 2/sample-demo/values.yaml b/solutions/Day-3/Helm/exercice 2/sample-demo/values.yaml deleted file mode 100644 index db0dcae..0000000 --- a/solutions/Day-3/Helm/exercice 2/sample-demo/values.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Default values for simpleapp. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -fullnameOverride: "" -nameOverride: "" -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, - # a name is generated using the fullname template - name: -frontend: - enabled: true - replicaCount: 1 - image: - repository: eu.gcr.io/sandbox-training-225413/simplefrontend - pullPolicy: Always - ports: - containerPort: 80 - env: - backend: "" - service: - type: NodePort - port: 80 - ingress: - enabled: false - annotations: - kubernetes.io/ingress.class: nginx - hosts: - - host: chart-example.local - paths: - - / - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - resources: {} -backend: - enabled: true - replicaCount: 1 - image: - repository: lmnzr/simplebackend - pullPolicy: Always - resources: {} \ No newline at end of file diff --git a/solutions/Day-3/autoscaler/exercise-8/README.md b/solutions/Day-3/autoscaler/exercise-8/README.md deleted file mode 100644 index 947b807..0000000 --- a/solutions/Day-3/autoscaler/exercise-8/README.md +++ /dev/null @@ -1,3 +0,0 @@ -```sh -kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10 -``` \ No newline at end of file diff --git a/solutions/Day-3/deployment_strategies/exercise-5/README.md b/solutions/Day-3/deployment_strategies/exercise-5/README.md deleted file mode 100644 index 8df0cd4..0000000 --- a/solutions/Day-3/deployment_strategies/exercise-5/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# exercise-5: Rolling-update - -In this exercise, you will deploy a version *v1* of your application. - -After performing a rolling update to the version *v1.1*, you will do a rollback to *v1*. - -## Deploy the version v1 - -Complete the provided `deployment-v1.0.yaml` file to indicate the deployment strategy: -``` -strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 -``` - -Then create the deployment: -```sh -kubectl apply -f deployment-v1.0.yaml -``` - -## Ensure everything is fine - -```sh -kubectl get deployments -kubectl describe deployments kdemo-dep -kubectl get pods -o wide -``` - -## Create a service to externally expose the deployment -```sh -kubectl expose deployment kdemo-dep \ ---type=NodePort \ ---name=kdemo-svc \ ---port=80 \ ---target-port=8080 -``` -or -``` -kubectl create -f service.yaml -``` - -Retrieve the external IP/port. -```sh -kubectl get node -o wide -kubectl descibe svc kdemo-dep -``` - -Then display the service in your web Browser. - -## Deploy the new version of the site -```sh -kubectl apply -f deployment-v1.1.yaml & watch kubectl get pods -o wide -``` - -Verify you get a new version of the website in your browser. - -## RollBack to v1.0 -```sh -kubectl rollout undo deployment kdemo-dep - -kubectl rollout status deployment kdemo-dep -``` -## Clean - -```sh -kubectl delete services kdemo-svc -kubectl delete -f deployment-v1.1.yaml -``` diff --git a/solutions/Day-3/deployment_strategies/exercise-5/deployment-v1.0.yaml b/solutions/Day-3/deployment_strategies/exercise-5/deployment-v1.0.yaml deleted file mode 100644 index c915a8c..0000000 --- a/solutions/Day-3/deployment_strategies/exercise-5/deployment-v1.0.yaml +++ /dev/null @@ -1,30 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: kdemo-dep -spec: - replicas: 3 - selector: - matchLabels: - name: kdemo - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - minReadySeconds: 20 - template: - metadata: - labels: - name: kdemo - spec: - containers: - - name: kdemo - image: apprenda/kdemo:v1 - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 8080 - terminationGracePeriodSeconds: 1 diff --git a/solutions/Day-3/deployment_strategies/exercise-5/deployment-v1.1.yaml b/solutions/Day-3/deployment_strategies/exercise-5/deployment-v1.1.yaml deleted file mode 100644 index 1962c34..0000000 --- a/solutions/Day-3/deployment_strategies/exercise-5/deployment-v1.1.yaml +++ /dev/null @@ -1,30 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: kdemo-dep -spec: - replicas: 3 - selector: - matchLabels: - name: kdemo - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - minReadySeconds: 20 - template: - metadata: - labels: - name: kdemo - spec: - containers: - - name: kdemo - image: apprenda/kdemo:v2 - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 8080 - terminationGracePeriodSeconds: 1 diff --git a/solutions/Day-3/troubleshooting/README.md b/solutions/Day-3/troubleshooting/README.md deleted file mode 100644 index 9dc0b80..0000000 --- a/solutions/Day-3/troubleshooting/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# List of errors - -Look for `# HERE` inside [k8s-troubleshooting.yml](../../../kubernetes-ressources/terraform/k8s-troubleshooting.yml): -* invalid selector in service wordpress -* invalid env var MARIA_DB_HOST in wordpress deployment -* invalid registry name for mariadb depl: should be `docker.io/bitnami/mariadb:10.5.15-debian-10-r40` -* invalid readiness probe for maria db - -# Strategy - -At a first glance, some pods are in undesired states: `ImagePullBackOff` and `CrashLoopBackOff`. -A valid state is to get all pods `Running` and `Ready`. - -## Let's start wit the dabatabase - -1. First start by fixing `ImagePullBackOff` for mariadb pods. - * fix value `docker.io/bitnami/mariadb:10.5.15-debian-10-r40` in the statefulset, then delete the `pod/wordpress-mariadb-0` to force a new creation as a pod managed by a statefulset is not automatically recreated. -2. Now, you see the `pod/wordpress-mariadb-0` never becomes ready. If you describe it, you understand the readiness probe fails because of an `exit 1`. Fix the probe in the stateful set, then delete again the pod to ask a new creation. -3. To ensure everything is fine for the DB, run `kubectl describe service/wordpress-mariadb -n application` to check the endpoints. - -## Let's start wit the wordpress container - -1. If the you describe the `wordpress` pod, you see its readiness probe fails. Look at the logs, you see a Warning related to a `WRONG_DNS_ENTRY`. This value is in the container environment variable to define the `MARIADB_HOST`. Fix the value to use `wordpress-mariadb` in the `wordpress` deployment. Wait for a new pod to be created, or delete the old pod. - -## Fix the wordpress service. - -1. If you describe the services, you see the `wordpress` service has no endpoint. This is due to an invalid selector. Fix the `wordpress` service to use the following selector: -```sh - selector: - app.kubernetes.io/name: wordpress - app.kubernetes.io/instance: wordpress -``` - -## Test the wordpress app - -Access the service via the NodePort service named `wordpress`. \ No newline at end of file