From fa0df728ee5e7c89d7be0f2fd5389221cc68883e Mon Sep 17 00:00:00 2001 From: Nathaniel Date: Thu, 21 Apr 2022 14:03:24 -0400 Subject: [PATCH 1/8] Initial commit and README --- charts/k8s-job/README.md | 1174 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 1174 insertions(+) create mode 100644 charts/k8s-job/README.md diff --git a/charts/k8s-job/README.md b/charts/k8s-job/README.md new file mode 100644 index 00000000..e14a1ff5 --- /dev/null +++ b/charts/k8s-job/README.md @@ -0,0 +1,1174 @@ +# Kubernetes Service Helm Chart + +This Helm Chart can be used to deploy your job container under a +[Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) resource onto your Kubernetes +cluster. You can use this Helm Chart to run and deploy a one time job or periodic task such as a security scanner application or data science pipeline job. + + +## How to use this chart? + +* See the [root README](/README.adoc) for general instructions on using Gruntwork Helm Charts. +* See the [examples](/examples) folder for example usage. +* See the provided [values.yaml](./values.yaml) file for the required and optional configuration values that you can set + on this chart. + +back to [root README](/README.adoc#core-concepts) + +## What resources does this Helm Chart deploy? + +The following resources will be deployed with this Helm Chart, depending on which configuration values you use: + +- `Job`: A standalone `Job` running the image specified in the + `containerImage` input value. + +back to [root README](/README.adoc#core-concepts) + +## How do I deploy additional services not managed by the chart? + +You can create custom Kubernetes resources, that are not directly managed by the chart, within the `customResources` +key. You provide each resource manifest directly as a value under `customResources.resources` and set +`customResources.enabled` to `true`. For examples of custom resources, take a look at the examples in +[test/fixtures/custom_resources_values.yaml](../../test/fixtures/custom_resources_values.yaml) and +[test/fixtures/multiple_custom_resources_values.yaml](../../test/fixtures/multiple_custom_resources_values.yaml). + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I expose my application internally to the cluster? + +In general, `Pods` are considered ephemeral in Kubernetes. `Pods` can come and go at any point in time, either because +containers fail or the underlying instances crash. In either case, the dynamic nature of `Pods` make it difficult to +consistently access your application if you are individually addressing the `Pods` directly. + +Traditionally, this is solved using service discovery, where you have a stateful system that the `Pods` would register +to when they are available. Then, your other applications can query the system to find all the available `Pods` and +access one of the available ones. + +Kubernetes provides a built in mechanism for service discovery in the `Service` resource. `Services` are an abstraction +that groups a set of `Pods` behind a consistent, stable endpoint to address them. By creating a `Service` resource, you +can provide a single endpoint to other applications to connect to the `Pods` behind the `Service`, and not worry about +the dynamic nature of the `Pods`. + +You can read a more detailed description of `Services` in [the official +documentation](https://kubernetes.io/docs/concepts/services-networking/service/). Here we will cover just enough to +understand how to access your app. + +By default, this Helm Chart will deploy your application container in a `Pod` that exposes ports 80. These will +be exposed to the Kubernetes cluster behind the `Service` resource, which exposes port 80. You can modify this behavior +by overriding the `containerPorts` input value and the `service` input value. See the corresponding section in the +`values.yaml` file for more details. + +Once the `Service` is created, you can check what endpoint the `Service` provides by querying Kubernetes using +`kubectl`. First, retrieve the `Service` name that is outputted in the install summary when you first install the Helm +Chart. If you forget, you can get the same information at a later point using `helm status`. For example, if you had +previously installed this chart under the name `edge-service`, you can run the following command to see the created +resources: + +```bash +$ helm status edge-service +LAST DEPLOYED: Fri Feb 8 16:25:49 2019 +NAMESPACE: default +STATUS: DEPLOYED + +RESOURCES: +==> v1/Service +NAME AGE +edge-service-nginx 24m + +==> v1/Deployment +edge-service-nginx 24m + +==> v1/Pod(related) + +NAME READY STATUS RESTARTS AGE +edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 24m +edge-service-nginx-844c978df7-mln26 1/1 Running 0 24m +edge-service-nginx-844c978df7-rdsr8 1/1 Running 0 24m +``` + +This will show you some metadata about the release, the deployed resources, and any notes provided by the Helm Chart. In +this example, the service name is `edge-service-nginx` so we will use that to query the `Service`: + +```bash +$ kubectl get service edge-service-nginx +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +edge-service-nginx ClusterIP 172.20.186.176 80/TCP 27m +``` + +Here you can see basic information about the `Service`. The important piece of information is the `CLUSTER-IP` and +`PORT` fields, which tell you the available endpoint for the `Service`, and any exposed ports. Given that, any `Pod` in +your Kubernetes cluster can access the `Pods` of this application by hitting `{CLUSTER-IP}:{PORT}`. So for this example, +that will be `172.20.186.176:80`. + +But what if you want to automatically find a `Service` by name? The name of the `Service` created by this Helm Chart is +always `{RELEASE_NAME}-{applicationName}`, where `applicationName` is provided in the input value and `RELEASE_NAME` is +set when you install the Helm Chart. This means that the name is predictable, while the allocated IP address may not be. + +To address the `Service` by name, Kubernetes provides two ways: + +- environment variables +- DNS + +### Addressing Service by Environment Variables + +For each active `Service` that a `Pod` has access to, Kubernetes will automatically set a set of environment variables +in the container. These are `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` to get the host address (ip address) +and port respectively, where `SVCNAME` is the name of the `Service`. Note that `SVCNAME` will be the all caps version +with underscores of the `Service` name. + +Using the previous example where we installed this chart with a release name `edge-service` and `applicationName` +`nginx`, we get the `Service` name `edge-service-nginx`. Kubernetes will expose the following environment variables to +all containers that can access the `Service`: + +``` +EDGE_SERVICE_NGINX_SERVICE_HOST=172.20.186.176 +EDGE_SERVICE_NGINX_SERVICE_PORT=80 +``` + +Note that environment variables are set when the container first boots up. This means that if you already had `Pods` +deployed in your system before the `Service` was created, you will have to cycle the `Pods` in order to get the +environment variables. If you wish to avoid ordering issues, you can use the DNS method to address the `Service` +instead, if that is available. + +### Addressing Service by DNS + +If your Kubernetes cluster is deployed with the DNS add-on (this is automatically installed for EKS and GKE), then you +can rely on DNS to address your `Service`. Every `Service` in Kubernetes will register the domain +`{SVCNAME}.{NAMESPACE}.svc.cluster.local` to the DNS service of the cluster. This means that all your `Pods` in the +cluster can get the `Service` host by hitting that domain. + +The `NAMESPACE` in the domain refers to the `Namespace` where the `Service` was created. By default, all resources are +created in the `default` namespace. This is configurable at install time of the Helm Chart using the `--namespace` +option. + +In our example, we deployed the chart to the `default` `Namespace`, and the `Service` name is `edge-service-nginx`. So in +this case, the domain of the `Service` will be `edge-service-nginx.default.svc.cluster.local`. When any `Pod` addresses +that domain, it will get the address `172.20.186.176`. + +Note that DNS does not resolve ports, so in this case, you will have to know which port the `Service` uses. So in your +`Pod`, you will have to know that the `Service` exposes port `80` when you address it in your code for the container as +`edge-service-nginx.default.svc.cluster.local:80`. However, like the `Service` name, this should be predictable since it +is specified in the Helm Chart input value. + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I expose my application externally, outside of the cluster? + +Similar to the previous section ([How do I expose my application internally to the +cluster?](#how-do-i-expose-my-application-internally-to-the-cluster), you can use a `Service` resource to expose your +application externally. The primary service type that facilitates external access is the `NodePort` `Service` type. + +The `NodePort` `Service` type will expose the `Service` by binding an available port on the network interface of the +physical machines running the `Pod`. This is different from a network interface internal to Kubernetes, which is only +accessible within the cluster. Since the port is on the host machine network interface, you can access the `Service` by +hitting that port on the node. + +For example, suppose you had a 2 node Kubernetes cluster deployed on EC2. Suppose further that all your EC2 instances +have public IP addresses that you can access. For the sake of this example, we will assign random IP addresses to the +instances: + +- 54.219.117.250 +- 38.110.235.198 + +Now let's assume you deployed this helm chart using the `NodePort` `Service` type. You can do this by setting the +`service.type` input value to `NodePort`: + +```yaml +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP +``` + +When you install this helm chart with this input config, helm will deploy the `Service` as a `NodePort`, binding an +available port on the host machine to access the `Service`. You can confirm this by querying the `Service` using +`kubectl`: + +```bash +$ kubectl get service edge-service-nginx +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +edge-service-nginx NodePort 10.99.244.96 80:31035/TCP 33s +``` + +In this example, you can see that the `Service` type is `NodePort` as expected. Additionally, you can see that the there +is a port binding between port 80 and 31035. This port binding refers to the binding between the `Service` port (80 in +this case) and the host port (31035 in this case). + +One thing to be aware of about `NodePorts` is that the port binding will exist on all nodes in the cluster. This means +that, in our 2 node example, both nodes now have a port binding of 31035 on the host network interface that routes to +the `Service`, regardless of whether or not the node is running the `Pods` backing the `Service` endpoint. This means +that you can reach the `Service` on both of the following endpoints: + +- `54.219.117.250:31035` +- `38.110.235.198:31035` + +This means that no two `Service` can share the same `NodePort`, as the port binding is shared across the cluster. +Additionally, if you happen to hit a node that is not running a `Pod` backing the `Service`, Kubernetes will +automatically hop to one that is. + +You might use the `NodePort` if you do not wish to manage load balancers through Kubernetes, or if you are running +Kubernetes on prem where you do not have native support for managed load balancers. + +To summarize: + +- `NodePort` is the simplest way to expose your `Service` to externally to the cluster. +- You have a limit on the number of `NodePort` `Services` you can have in your cluster, imposed by the number of open ports + available on your host machines. +- You have potentially inefficient hopping if you happen to route to a node that is not running the `Pod` backing the + `Service`. + +Additionally, Kubernetes provides two mechanisms to manage an external load balancer that routes to the `NodePort` for +you. The two ways are: + +- [Using a `LoadBalancer` `Service` type](#loadbalancer-service-type) +- [Using `Ingress` resources with an `Ingress Controller`](#ingress-and-ingress-controllers) + +### LoadBalancer Service Type + +The `LoadBalancer` `Service` type will expose the `Service` by allocating a managed load balancer in the cloud that is +hosting the Kubernetes cluster. On AWS, this will be an ELB, while on GCP, this will be a Cloud Load Balancer. When the +`LoadBalancer` `Service` is created, Kubernetes will automatically create the underlying load balancer resource in the +cloud for you, and create all the target groups so that they route to the `Pods` backing the `Service`. + +You can deploy this helm chart using the `LoadBalancer` `Service` type by setting the `service.type` input value to +`LoadBalancer`: + +```yaml +service: + enabled: true + type: LoadBalancer + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP +``` + +When you install this helm chart with this input config, helm will deploy the `Service` as a `LoadBalancer`, allocating +a managed load balancer in the cloud hosting your Kubernetes cluster. You can get the attached load balancer by querying +the `Service` using `kubectl`. In this example, we will assume we are using EKS: + +``` +$ kubectl get service edge-service-nginx +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +edge-service-nginx LoadBalancer 172.20.7.35 a02fef4d02e41... 80:32127/TCP 1m +``` + +Now, in this example, we have an entry in the `EXTERNAL-IP` field. This is truncated here, but you can get the actual +output when you describe the service: + +``` +$ kubectl describe service edge-service-nginx +Name: edge-service-nginx +Namespace: default +Labels: app.kubernetes.io/instance=edge-service + app.kubernetes.io/managed-by=helm + app.kubernetes.io/name=nginx + gruntwork.io/app-name=nginx + helm.sh/chart=k8s-service-0.1.0 +Annotations: +Selector: app.kubernetes.io/instance=edge-service,app.kubernetes.io/name=nginx,gruntwork.io/app-name=nginx +Type: LoadBalancer +IP: 172.20.7.35 +LoadBalancer Ingress: a02fef4d02e4111e9891806271fc7470-173030870.us-west-2.elb.amazonaws.com +Port: app 80/TCP +TargetPort: 80/TCP +NodePort: app 32127/TCP +Endpoints: 10.0.3.19:80 +Session Affinity: None +External Traffic Policy: Cluster +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal EnsuringLoadBalancer 2m service-controller Ensuring load balancer + Normal EnsuredLoadBalancer 2m service-controller Ensured load balancer +``` + +In the describe output, there is a field named `LoadBalancer Ingress`. When you have a `LoadBalancer` `Service` type, +this field contains the public DNS endpoint of the associated load balancer resource in the cloud provider. In this +case, we have an AWS ELB instance, so this endpoint is the public endpoint of the associated ELB resource. + +**Note:** Eagle eyed readers might also notice that there is an associated `NodePort` on the resource. This is because under the +hood, `LoadBalancer` `Services` utilize `NodePorts` to handle the connection between the managed load balancer of the +cloud provider and the Kubernetes `Pods`. This is because at this time, there is no portable way to ensure that the +network between the cloud load balancers and Kubernetes can be shared such that the load balancers can route to the +internal network of the Kubernetes cluster. Therefore, Kubernetes resorts to using `NodePort` as an abstraction layer to +connect the `LoadBalancer` to the `Pods` backing the `Service`. This means that `LoadBalancer` `Services` share the same +drawbacks as using a `NodePort` `Service`. + +To summarize: + +- `LoadBalancer` provides a way to set up a cloud load balancer resource that routes to the provisioned `NodePort` on + each node in your Kubernetes cluster. +- `LoadBalancer` can be used to provide a persistent endpoint that is robust to the ephemeral nature of nodes in your + cluster. E.g it is able to route to live nodes in the face of node failures. +- `LoadBalancer` does not support weighted balancing. This means that you cannot balance the traffic so that it prefers + nodes that have more instances of the `Pod` running. +- Note that under the hood, `LoadBalancer` utilizes a `NodePort` `Service`, and thus shares the same limits as `NodePort`. + +### Ingress and Ingress Controllers + +`Ingress` is a mechanism in Kubernetes that abstracts externally exposing a `Service` from the `Service` config itself. +`Ingress` resources support: + +- assigning an externally accessible URL to a `Service` +- perform hostname and path based routing of `Services` +- load balance traffic using customizable balancing rules +- terminate SSL + +You can read more about `Ingress` resources in [the official +documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/). Here, we will cover the basics to +understand how `Ingress` can be used to externally expose the `Service`. + +At a high level, the `Ingress` resource is used to specify the configuration for a particular `Service`. In turn, the +`Ingress Controller` is responsible for fulfilling those configurations in the cluster. This means that the first +decision to make in using `Ingress` resources, is selecting an appropriate `Ingress Controller` for your cluster. + +#### Choosing an Ingress Controller + +Before you can use an `Ingress` resource, you must install an `Ingress Controller` in your Kubernetes cluster. There are +many kinds of `Ingress Controllers` available, each with different properties. You can see [a few examples listed in the +official documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers). + +When you use an external cloud `Ingress Controller` such as the [GCE Ingress +Controller](https://github.com/kubernetes/ingress-gce/blob/master/README.md) or [AWS ALB Ingress +Controller](https://github.com/kubernetes-sigs/aws-alb-ingress-controller), Kubernetes will allocate an externally +addressable load balancer (for GCE this will be a Cloud Load Balancer and for AWS this will be an ALB) that fulfills the +`Ingress` rules. This includes routing the domain names and paths to the right `Service` as configured by the `Ingress` +rules. Additionally, Kubernetes will manage the target groups of the load balancer so that they are up to date with +the latest `Ingress` configuration. However, in order for this to work, there needs to be some way for the load balancer +to connect to the `Pods` servicing the `Service`. Since the `Pods` are internal to the Kubernetes network and the load +balancers are external to the network, there must be a `NodePort` that links the two together. As such, like the +`LoadBalancer` `Service` type, these `Ingress Controllers` also require a `NodePort` under the hood. + + + +Alternatively, you can use an internal `Ingress Controller` that runs within Kubernetes as `Pods`. For example, the +official `nginx Ingress Controller` will launch `nginx` as `Pods` within your Kubernetes cluster. These `nginx` `Pods` +are then configured using `Ingress` resources, which then allows `nginx` to route to the right `Pods`. Since the `nginx` +`Pods` are internal to the Kubernetes network, there is no need for your `Services` to be `NodePorts` as they are +addressable within the network by the `Pods`. However, this means that you need some other mechanism to expose `nginx` +to the outside world, which will require a `NodePort`. The advantage of this approach, despite still requiring a +`NodePort`, is that you can have a single `NodePort` that routes to multiple services using hostnames or paths as +managed by `nginx`, as opposed to requiring a `NodePort` per `Service` you wish to expose. + +Which `Ingress Controller` type you wish to use depends on your infrastructure needs. If you have relatively few +`Services`, and you want the simplicity of a managed cloud load balancer experience, you might opt for the external +`Ingress Controllers` such as GCE and AWS ALB controllers. On the other hand, if you have thousands of micro services +that push you to the limits of the available number of ports on a host machine, you might opt for an internal `Ingress +Controller` approach. Whichever approach you decide, be sure to document your decision where you install the particular +`Ingress Controller` so that others in your team know and understand the tradeoffs you made. + +#### Configuring Ingress for your Service + +Once you have an `Ingress Controller` installed and configured on your Kuberentes cluster, you can now start creating +`Ingress` resources to add routes to it. This helm chart supports configuring an `Ingress` resource to complement the +`Service` resource that is created in the chart. + +To add an `Ingress` resource, first make sure you have a `Service` enabled on the chart. Depending on the chosen +`Ingress Controller`, the `Service` type should be `NodePort` or `ClusterIP`. Here, we will create a `NodePort` +`Service` exposing port 80: + +```yaml +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP +``` + +Then, we will add the configuration for the `Ingress` resource by specifying the `ingress` input value. For this +example, we will assume that we want to route `/app` to our `Service`, with the domain hosted on `app.yourco.com`: + +```yaml +ingress: + enabled: true + path: /app + servicePort: 80 + hosts: + - app.yourco.com +``` + +This will configure the load balancer backing the `Ingress Controller` that will route any traffic with host and path +prefix `app.yourco.com/app` to the `Service` on port 80. If `app.yourco.com` is configured to point to the `Ingress +Controller` load balancer, then once you deploy the helm chart you should be able to start accessing your app on that +endpoint. + +#### Registering additional paths + +Sometimes you might want to add additional path rules beyond the main service rule that is injected to the `Ingress` +resource. For example, you might want a path that routes to the sidecar containers, or you might want to reuse a single +`Ingress` for multiple different `Service` endpoints because to share load balancers. For these situations, you can use +the `additionalPaths` and `additionalPathsHigherPriority` input values. + +Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarMonitor` served on port +3000: + +```yaml +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP + sidecarMonitor: + port: 3000 + targetPort: 3000 + protocol: TCP +``` + +To route `/app` to the `app` service endpoint and `/sidecar` to the `sidecarMonitor` service endpoint, we will configure +the `app` service path rules as the main service route and the `sidecarMonitor` as an additional path rule: + +```yaml +ingress: + enabled: true + path: /app + servicePort: 80 + additionalPaths: + - path: /sidecar + servicePort: 3000 +``` + +Now suppose you had a sidecar service that will return a fixed response indicating server maintainance and you want to +temporarily route all requests to that endpoint without taking down the pod. You can do this by creating a route that +catches all paths as a higher priority path using the `additionalPathsHigherPriority` input value. + +Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarFixedResponse` served on +port 3000: + +```yaml +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP + sidecarFixedResponse: + port: 3000 + targetPort: 3000 + protocol: TCP +``` + +To route all traffic to the fixed response port: + +```yaml +ingress: + enabled: true + path: /app + servicePort: 80 + additionalPathsHigherPriority: + - path: /* + servicePort: 3000 +``` + +The `/*` rule which routes to port 3000 will always be used even when accessing the path `/app` because it will be +evaluated first when routing requests. + +back to [root README](/README.adoc#day-to-day-operations) + +### How do I expose additional ports? + +By default, this Helm Chart will deploy your application container in a Pod that exposes ports 80. Sometimes you might +want to expose additional ports in your application - for example a separate port for Prometheus metrics. You can expose +additional ports for your application by overriding `containerPorts` and `service` input values: + +```yaml + +containerPorts: + http: + port: 80 + protocol: TCP + prometheus: + port: 2020 + protocol: TCP + +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP + prometheus: + port: 2020 + targetPort: 2020 + protocol: TCP + +``` + + +## How do I deploy a worker service? + +Worker services typically do not have a RPC or web server interface to access it. Instead, worker services act on their +own and typically reach out to get the data they need. These services should be deployed without any ports exposed. +However, by default `k8s-service` will deploy an internally exposed service with port 80 open. + +To disable the default port, you can use the following `values.yaml` inputs: + +``` +containerPorts: + http: + disabled: true + +service: + enabled: false +``` + +This will override the default settings such that only the `Deployment` resource is created, with no ports exposed on +the container. + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I check the status of the rollout? + +This Helm Chart packages your application into a `Deployment` controller. The `Deployment` controller will be +responsible with managing the `Pods` of your application, ensuring that the Kubernetes cluster matches the desired state +configured by the chart inputs. + +When the Helm Chart installs, `helm` will mark the installation as successful when the resources are created. Under the +hood, the `Deployment` controller will do the work towards ensuring the desired number of `Pods` are up and running. + +For example, suppose you set the `replicaCount` variable to 3 when installing this chart. This will configure the +`Deployment` resource to maintain 3 replicas of the `Pod` at any given time, launching new ones if there is a deficit or +removing old ones if there is a surplus. + +To see the current status of the `Deployment`, you can query Kubernetes using `kubectl`. The `Deployment` resource of +the chart are labeled with the `applicationName` input value and the release name provided by helm. So for example, +suppose you deployed this chart using the following `values.yaml` file and command: + +```yaml +applicationName: nginx +containerImage: + repository: nginx + tag: stable +``` + +```bash +$ helm install -n edge-service gruntwork/k8s-service +``` + +In this example, the `applicationName` is set to `nginx`, while the release name is set to `edge-service`. This chart +will then install a `Deployment` resource in the default `Namespace` with the following labels that uniquely identifies +it: + +``` +app.kubernetes.io/name: nginx +app.kubernetes.io/instance: edge-service +``` + +So now you can query Kubernetes for that `Deployment` resource using these labels to see the state: + +```bash +$ kubectl get deployments -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +edge-service-nginx 3 3 3 1 24s +``` + +This includes a few useful information: + +- `DESIRED` lists the number of `Pods` that should be running in your cluster. +- `CURRENT` lists how many `Pods` are currently created in the cluster. +- `UP-TO-DATE` lists how many `Pods` are running the desired image. +- `AVAILABLE` lists how many `Pods` are currently ready to serve traffic, as defined by the `readinessProbe`. + +When all the numbers are in sync and equal, that means the `Deployment` was rolled out successfully and all the `Pods` +are passing the readiness healthchecks. + +In the example output above, note how the `Available` count is `1`, but the others are `3`. This means that all 3 `Pods` +were successfully created with the latest image, but only `1` of them successfully came up. You can dig deeper into the +individual `Pods` to check the status of the unavailable `Pods`. The `Pods` are labeled the same way, so you can pass in +the same label query to get the `Pods` managed by the deployment: + +```bash +$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" +NAME READY STATUS RESTARTS AGE +edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s +edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s +edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s +``` + +This will show you the status of each individual `Pod` in your deployment. In this example output, there are 2 `Pods` +that are in the `Pending` status, meaning that they have not been scheduled yet. We can look into why the `Pod` failed +to schedule by getting detailed information about the `Pod` with the `describe` command. Unlike `get pods`, `describe +pod` requires a single `Pod` so we will grab the name of one of the failing `Pods` above and feed it to `describe pod`: + +```bash +$ kubectl describe pod edge-service-nginx-844c978df7-mln26 +Name: edge-service-nginx-844c978df7-mln26 +Namespace: default +Priority: 0 +PriorityClassName: +Node: +Labels: app.kubernetes.io/instance=edge-service + app.kubernetes.io/name=nginx + gruntwork.io/app-name=nginx + pod-template-hash=4007534893 +Annotations: +Status: Pending +IP: +Controlled By: ReplicaSet/edge-service-nginx-844c978df7 +Containers: + nginx: + Image: nginx:stable + Ports: 80/TCP + Host Ports: 0/TCP + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from default-token-mgkr9 (ro) +Conditions: + Type Status + PodScheduled False +Volumes: + default-token-mgkr9: + Type: Secret (a volume populated by a Secret) + SecretName: default-token-mgkr9 + Optional: false +QoS Class: BestEffort +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s + node.kubernetes.io/unreachable:NoExecute for 300s +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 1m (x25 over 3m) default-scheduler 0/2 nodes are available: 2 Insufficient pods. +``` + +This will output detailed information about the `Pod`, including an event log. In this case, the roll out failed because +there is not enough capacity in the cluster to schedule the `Pod`. + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I set and share configurations with the application? + +While you can bake most application configuration values into the application container, you might need to inject +dynamic configuration variables into the container. These are typically values that change depending on the environment, +such as the MySQL database endpoint. Additionally, you might also want a way to securely share secrets with the +container such that they are not hard coded in plain text in the container or in the Helm Chart values yaml file. To +support these use cases, this Helm Chart provides three ways to share configuration values with the application +container: + +- [Directly setting environment variables](#directly-setting-environment-variables) +- [Using ConfigMaps](#using-configmaps) +- [Using Secrets](#using-secrets) + +### Directly setting environment variables + +The simplest way to set a configuration value for the container is to set an environment variable for the container +runtime. These variables are set by Kubernetes before the container application is booted, which can then be looked up +using the standard OS lookup functions for environment variables. + +You can use the `envVars` input value to set an environment variable at deploy time. For example, the following entry in +a `values.yaml` file will set the `DB_HOST` environment variable to `mysql.default.svc.cluster.local` and the `DB_PORT` +environment variable to `3306`: + +```yaml +envVars: + DB_HOST: "mysql.default.svc.cluster.local" + DB_PORT: 3306 +``` + +One thing to be aware of when using environment variables is that they are set at start time of the container. This +means that updating the environment variables require restarting the containers so that they propagate. + +### Using ConfigMaps + +While environment variables are an easy way to inject configuration values, what if you want to share the configuration +across multiple deployments? If you wish to use the direct environment variables approach, you would have no choice but +to copy paste the values across each deployment. When this value needs to change, you are now faced with going through +each deployment and updating the reference. + +For this situation, `ConfigMaps` would be a better option. `ConfigMaps` help decouple configuration values from the +`Deployment` and `Pod` config, allowing you to share the values across the deployments. `ConfigMaps` are dedicated +resources in Kubernetes that store configuration values as key value pairs. + +For example, suppose you had a `ConfigMap` to store the database information. You might store the information as two key +value pairs: one for the host (`dbhost`) and one for the port (`dbport`). You can create a `ConfigMap` directly using +`kubectl`, or by using a resource file. + +To directly create the `ConfigMap`: + +``` +kubectl create configmap my-config --from-literal=dbhost=mysql.default.svc.cluster.local --from-literal=dbport=3306 +``` + +Alternatively, you can manage the `ConfigMap` as code using a kubernetes resource config: + +```yaml +# my-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-config +data: + dbhost: mysql.default.svc.cluster.local + dbport: 3306 +``` + +You can then apply this resource file using `kubectl`: + +``` +kubectl apply -f my-config.yaml +``` + +`kubectl` supports multiple ways to seed the `ConfigMap`. You can read all the different ways to create a `ConfigMap` in +[the official +documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap). + +Once the `ConfigMap` is created, you can access the `ConfigMap` within the `Pod` by configuring the access during +deployment. This Helm Chart provides the `configMaps` input value to configure what `ConfigMaps` should be shared with +the application container. There are two ways to inject the `ConfigMap`: + +- [Accessing the `ConfigMap` as Environment Variables](#accessing-the-configmap-as-environment-variables) +- [Accessing the `ConfigMap` as Files](#accessing-the-configmap-as-files) + +**NOTE**: It is generally not recommended to use `ConfigMaps` to store sensitive data. For those use cases, use +`Secrets` or an external secret store. + +##### Accessing the ConfigMap as Environment Variables + +You can set the values of the `ConfigMap` as environment variables in the application container. To do so, you set the +`as` attribute of the `configMaps` input value to `environment`. For example, to share the `my-config` `ConfigMap` above +using the same environment variables as the example in [Directly setting environment +variables](#directly-settings-environment-variables), you would set the `configMaps` as follows: + +```yaml +configMaps: + my-config: + as: environment + items: + dbhost: + envVarName: DB_HOST + dbport: + envVarName: DB_PORT +``` + +In this configuration for the Helm Chart, we specify that we want to share the `my-config` `ConfigMap` as environment +variables with the main application container. Additionally, we want to map the `dbhost` config value to the `DB_HOST` +environment variable, and similarly map the `dbport` config value to the `DB_PORT` environment variable. + +Note that like directly setting environment variables, these are set at container start time, and thus the containers +need to be restarted when the `ConfigMap` is updated for the new values to be propagated. You can use files instead if +you wish the `ConfigMap` changes to propagate immediately. + +##### Accessing the ConfigMap as Files + +You can mount the `ConfigMap` values as files on the container filesystem. To do so, you set the `as` attribute of the +`configMaps` input value to `volume`. + +For example, suppose you wanted to share the `my-config` `ConfigMap` above as the files `/etc/db/host` and +`/etc/db/port`. For this case, you would set the `configMaps` input value to: + +```yaml +configMaps: + my-config: + as: volume + mountPath: /etc/db + items: + dbhost: + filePath: host + dbport: + filePath: port +``` + +In the container, now the values for `dbhost` is stored as a text file at the path `/etc/db/host` and `dbport` is stored +at the path `/etc/db/port`. You can then read these files in in your application to get the values. + +Unlike environment variables, using files has the advantage of immediately reflecting changes to the `ConfigMap`. For +example, when you update `my-config`, the files at `/etc/db` are updated automatically with the new values, without +needing a redeployment to propagate the new values to the container. + +### Using Secrets + +In general, it is discouraged to store sensitive information such as passwords in `ConfigMaps`. Instead, Kubernetes +provides `Secrets` as an alternative resource to store sensitive data. Similar to `ConfigMaps`, `Secrets` are key value +pairs that store configuration values that can be managed independently of the `Pod` and containers. However, unlike +`ConfigMaps`, `Secrets` have the following properties: + +- A secret is only sent to a node if a pod on that node requires it. They are automatically garbage collected when there + are no more `Pods` referencing it on the node. +- A secret is stored in `tmpfs` on the node, so that it is only available in memory. +- Starting with Kubernetes 1.7, they can be encrypted at rest in `etcd` (note: this feature was in alpha state until + Kubernetes 1.13). + +You can read more about the protections and risks of using `Secrets` in [the official +documentation](https://kubernetes.io/docs/concepts/configuration/secret/#security-properties). + +Creating a `Secret` is very similar to creating a `ConfigMap`. For example, suppose you had a `Secret` to store the +database password. Like `ConfigMaps`, you can create a `Secret` directly using `kubectl`: + +``` +kubectl create secret generic my-secret --from-literal=password=1f2d1e2e67df +``` + +The `generic` keyword indicates the `Secret` type. Almost all use cases for your application should use this type. Other +types include `docker-registry` for specifying credentials for accessing a private docker registry, and `tls` for +specifying TLS certificates to access the Kubernetes API. + +You can also manage the `Secret` as code, although you may want to avoid this for `Secrets` to avoid leaking them in +unexpected locations (e.g source control). Unlike `ConfigMaps`, `Secrets` require values to be stored as base64 encoded +values when using resource files. So the configuration for the above example will be: + +```yaml +# my-secret.yaml +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: my-secret +data: + password: MWYyZDFlMmU2N2Rm +``` + +Note that `MWYyZDFlMmU2N2Rm` is the base 64 encoded version of `1f2d1e2e67df`. You can then apply this resource config +using `kubectl`: + +``` +kubectl apply -f my-secret.yaml +``` + +Similar to `ConfigMaps`, this Helm Chart supports two ways to inject `Secrets` into the application container: as +environment variables, or as files. The syntax to share the values is very similar to the `configMaps` input value, only +you use the `secrets` input value. The properties of each approach is very similar to `ConfigMaps`. Refer to [the +previous section](#using-configmaps) for more details on each approach. Here, we show you examples of the input values +to use for each approach. + +**Mounting secrets as environment variables**: In this example, we mount the `my-secret` `Secret` created above as the +environment variable `DB_PASSWORD`. + +```yaml +secrets: + my-secret: + as: environment + items: + password: + envVarName: DB_PASSWORD +``` + +**Mounting secrets as files**: In this example, we mount the `my-secret` `Secret` as the file `/etc/db/password`. + +```yaml +secrets: + my-secret: + as: volume + mountPath: /etc/db + items: + password: + filePath: password +``` + +**NOTE**: The volumes are different between `secrets` and `configMaps`. This means that if you use the same `mountPath` +for different secrets and config maps, you can end up with only one. It is undefined which `Secret` or `ConfigMap` ends +up getting mounted. To be safe, use a different `mountPath` for each one. + +**NOTE**: If you want mount the volumes created with `secrets` or `configMaps` on your init or sidecar containers, you will +have to append `-volume` to the volume name in . In the example above, the resulting volume will be `my-secret-volume`. + +```yaml +sideCarContainers: + sidecar: + image: sidecar/container:latest + volumeMounts: + - name: my-secret-volume + mountPath: /etc/db +``` + +### Which configuration method should I use? + +Which configuration method you should use depends on your needs. Here is a summary of the pro and con of each +approach: + +##### Directly setting environment variables + +**Pro**: + +- Simple setup +- Manage configuration values directly with application deployment config +- Most application languages support looking up environment variables + +**Con**: + +- Tightly couple configuration settings with application deployment +- Requires redeployment to update values +- Must store in plain text, and easy to leak into VCS + +**Best for**: + +- Iterating different configuration values during development +- Sotring non-sensitive values that are unique to each environment / deployment + +##### Using ConfigMaps + +**Pro**: + +- Keep config DRY by sharing a common set of configurations +- Independently update config values from the application deployment +- Automatically propagate new values when stored as files + +**Con**: + +- More overhead to manage the configuration +- Stored in plain text +- Available on all nodes automatically + +**Best for**: + +- Storing non-sensitive common configuration that are shared across environments +- Storing non-sensitive dynamic configuration values that change frequently + +##### Using Secrets + +**Pro**: + +- All the benefits of using `ConfigMaps` +- Can be encrypted at rest +- Opaque by default when viewing the values (harder to remember base 64 encoded version of "admin") +- Only available to nodes that use it, and only in memory + +**Con**: + +- All the challenges of using `ConfigMaps` +- Configured in plain text, making it difficult to manage as code securely +- Less safe than using dedicated secrets manager / store like HashiCorp Vault. + +**Best for**: + +- Storing sensitive configuration values + +back to [root README](/README.adoc#day-to-day-operations) + +## How do you update the application to a new version? + +To update the application to a new version, you can upgrade the Helm Release using updated values. For example, suppose +you deployed `nginx` version 1.15.4 using this Helm Chart with the following values: + +```yaml +containerImage: + repository: nginx + tag: 1.15.4 + +applicationName: nginx +``` + +In this example, we will further assume that you deployed this chart with the above values using the release name +`edge-service`, using a command similar to below: + +```bash +$ helm install -f values.yaml --name edge-service gruntwork/k8s-service +``` + +Now let's try upgrading `nginx` to version 1.15.8. To do so, we will first update our values file: + +```yaml +containerImage: + repository: nginx + tag: 1.15.8 + +applicationName: nginx +``` + +The only difference here is the `tag` of the `containerImage`. + +Next, we will upgrade our release using the updated values. To do so, we will use the `helm upgrade` command: + +```bash +$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service +``` + +This will update the created resources with the new values provided by the updated `values.yaml` file. For this example, +the only resource that will be updated is the `Deployment` resource, which will now have a new `Pod` spec that points to +`nginx:1.15.8` as opposed to `nginx:1.15.4`. This automatically triggers a rolling deployment internally to Kubernetes, +which will launch new `Pods` using the latest image, and shut down old `Pods` once those are ready. + +You can read more about how changes are rolled out on `Deployment` resources in [the official +documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment). + +Note that certain changes will lead to a replacement of the `Deployment` resource. For example, updating the +`applicationName` will cause the `Deployment` resource to be deleted, and then created. This can lead to down time +because the resources are replaced in an uncontrolled fashion. + +## How do I create a canary deployment? + +You may optionally configure a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of an arbitrary tag that will run as an individual deployment behind your configured service. This is useful for ensuring a new application tag runs without issues prior to fully rolling it out. + +To configure a canary deployment, set `canary.enabled = true` and define the `containerImage` values. Typically, you will want to specify the tag of your next release candidate: + +```yaml +canary: + enabled: true + containerImage: + repository: nginx + tag: 1.15.9 +``` +Once deployed, your service will route traffic across both your stable and canary deployments, allowing you to monitor for and catch any issues early. + +back to [root README](/README.adoc#major-changes) + +## How do I verify my canary deployment? + +Canary deployment pods have the same name as your stable deployment pods, with the additional `-canary` appended to the end, like so: + +```bash +$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" +NAME READY STATUS RESTARTS AGE +edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s +edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s +edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s +edge-service-nginx-canary-844c978df7-bsr8 0/1 Pending 0 52s +``` + +Therefore, in this example, you could monitor your canary by running `kubectl logs -f edge-service-nginx-canary-844c978df7-bsr8` + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I roll back a canary deployment? + +Update your values.yaml file, setting `canary.enabled = false` and then upgrade your helm installation: + +```bash +$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service +``` +Following this update, Kubernetes will determine that your canary deployment is no longer desired and will delete it. + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I ensure a minimum number of Pods are available across node maintenance? + +Sometimes, you may want to ensure that a specific number of `Pods` are always available during [voluntary +maintenance](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions). +This chart exposes an input value `minPodsAvailable` that can be used to specify a minimum number of `Pods` to maintain +during a voluntary maintenance activity. Under the hood, this chart will create a corresponding `PodDisruptionBudget` to +ensure that a certain number of `Pods` are up before attempting to terminate additional ones. + +You can read more about `PodDisruptionBudgets` in [our blog post covering the +topic](https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085) +and in [the official +documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#how-disruption-budgets-work). + + +back to [root README](/README.adoc#major-changes) + +## Why does the Pod have a preStop hook with a Shutdown Delay? + +When a `Pod` is removed from a Kubernetes cluster, the control plane notifies all nodes to remove the `Pod` from +registered addresses. This includes removing the `Pod` from the list of available `Pods` to service a `Service` +endpoint. However, because Kubernetes is a distributed system, there is a delay between the shutdown sequence and the +`Pod` being removed from available addresses. As a result, the `Pod` could still get traffic despite it having already +been shutdown on the node it was running on. + +Since there is no way to guarantee that the deletion has propagated across the cluster, we address this eventual +consistency issue by adding an arbitrary delay between the `Pod` being deleted and the initiation of the `Pod` shutdown +sequence. This is accomplished by adding a `sleep` command in the `preStop` hook. + +You can control the length of time to delay with the `shutdownDelay` input value. You can also disable this behavior by +setting the `shutdownDelay` to 0. + +You can read more about this topic in [our blog post +"Delaying Shutdown to Wait for Pod Deletion +Propagation"](https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304). + + +back to [root README](/README.adoc#day-to-day-operations) + +## What is a sidecar container? + +In Kubernetes, `Pods` are one or more tightly coupled containers that are deployed together. The containers in the `Pod` +share, amongst other things, the network stack, the IPC namespace, and in some cases the PID namespace. You can read +more about the resources that the containers in a `Pod` share in [the official +documentation](https://kubernetes.io/docs/concepts/workloads/pods/pod/#what-is-a-pod). + +Sidecar Containers are additional containers that you wish to deploy in the `Pod` housing your application container. +This helm chart supports deploying these containers by configuring the `sideCarContainers` input value. This input value +is a map between the side car container name and the values of the container spec. The spec is rendered directly into +the `Deployment` resource, with the `name` being set to the key. For example: + +```yaml +sideCarContainers: + datadog: + image: datadog/agent:latest + env: + - name: DD_API_KEY + value: ASDF-1234 + - name: SD_BACKEND + value: docker + nginx: + image: nginx:1.15.4 +``` + +This input will be rendered in the `Deployment` resource as: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + ... Snipped for brevity ... +spec: + ... Snipped for brevity ... + template: + spec: + containers: + ... The first entry relates to the application ... + - name: datadog + image: datadog/agent:latest + env: + - name: DD_API_KEY + value: ASDF-1234 + - name: SD_BACKEND + value: docker + - name: nginx + image: nginx:1.15.4 +``` + +In this config, the side car containers are rendered as additional containers to deploy alongside the main application +container configured by the `containerImage`, `ports`, `livenessProbe`, etc input values. Note that the +`sideCarContainers` variable directly renders the spec, meaning that the additional values for the side cars such as +`livenessProbe` should be rendered directly within the `sideCarContainers` input value. + +back to [root README](/README.adoc#core-concepts) + +## How do I use a private registry? + +To pull container images from a private registry, the Kubernetes cluster needs to be able to authenticate to the docker +registry with a registry key. On managed Kubernetes clusters (e.g EKS, GKE, AKS), this is automated through the server +IAM roles that are assigned to the instance VMs. In most cases, if the instance VM IAM role has the permissions to +access the registry, the Kubernetes cluster will automatically be able to pull down images from the respective managed +registry (e.g ECR on EKS or GCR on GKE). + +Alternatively, you can specify docker registry keys in the Kubernetes cluster as `Secret` resources. This is helpful in +situations where you do not have the ability to assign registry access IAM roles to the node itself, or if you are +pulling images off of a different registry (e.g accessing GCR from EKS cluster). + +You can use `kubectl` to create a `Secret` in Kubernetes that can be used as a docker registry key: + +``` +kubectl create secret docker-registry NAME \ + --docker-server=DOCKER_REGISTRY_SERVER \ + --docker-username=DOCKER_USER \ + --docker-password=DOCKER_PASSWORD \ + --docker-email=DOCKER_EMAIL +``` + +This command will create a `Secret` resource named `NAME` that holds the specified docker registry credentials. You can +then specify the cluster to use this `Secret` when pulling down images for the service `Deployment` in this chart by +using the `imagePullSecrets` input value: + +``` +imagePullSecrets: + - NAME +``` + +You can learn more about using private registries with Kubernetes in [the official +documentation](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry). + +back to [root README](/README.adoc#day-to-day-operations) From 3a18e5ac27a76402e0cda47c649846dab1040043 Mon Sep 17 00:00:00 2001 From: Nathaniel Date: Thu, 21 Apr 2022 16:52:45 -0400 Subject: [PATCH 2/8] Added Job Helm chart with template test, integration test, and example. NOT VALIDATED YET --- charts/k8s-job/.helmignore | 21 ++ charts/k8s-job/Chart.yaml | 11 + charts/k8s-job/linter_values.yaml | 42 +++ .../templates/_capabilities_helpers.tpl | 4 + charts/k8s-job/templates/_helpers.tpl | 73 +++++ charts/k8s-job/templates/_job_spec.tpl | 207 ++++++++++++++ charts/k8s-job/templates/job.yaml | 5 + charts/k8s-job/values.yaml | 153 ++++++++++ examples/k8s-job-busybox/README.md | 0 examples/k8s-job-busybox/values.yaml | 59 ++++ test/k8s_job_template_test.go | 265 ++++++++++++++++++ test/k8s_job_test.go | 90 ++++++ 12 files changed, 930 insertions(+) create mode 100644 charts/k8s-job/.helmignore create mode 100644 charts/k8s-job/Chart.yaml create mode 100644 charts/k8s-job/linter_values.yaml create mode 100644 charts/k8s-job/templates/_capabilities_helpers.tpl create mode 100644 charts/k8s-job/templates/_helpers.tpl create mode 100644 charts/k8s-job/templates/_job_spec.tpl create mode 100644 charts/k8s-job/templates/job.yaml create mode 100644 charts/k8s-job/values.yaml create mode 100644 examples/k8s-job-busybox/README.md create mode 100644 examples/k8s-job-busybox/values.yaml create mode 100644 test/k8s_job_template_test.go create mode 100644 test/k8s_job_test.go diff --git a/charts/k8s-job/.helmignore b/charts/k8s-job/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/charts/k8s-job/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/k8s-job/Chart.yaml b/charts/k8s-job/Chart.yaml new file mode 100644 index 00000000..b6ec5ff0 --- /dev/null +++ b/charts/k8s-job/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +name: k8s-job +description: A Helm chart to package a job for Kubernetes +# This will be updated with the release tag in the CI/CD pipeline before publishing. This has to be a valid semver for +# the linter to accept. +version: 0.0.1-replace +home: https://github.com/gruntwork-io/helm-kubernetes-services +maintainers: + - name: Gruntwork + email: info@gruntwork.io + url: https://gruntwork.io diff --git a/charts/k8s-job/linter_values.yaml b/charts/k8s-job/linter_values.yaml new file mode 100644 index 00000000..f800b2d7 --- /dev/null +++ b/charts/k8s-job/linter_values.yaml @@ -0,0 +1,42 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS TO USE WITH HELM LINT +# This file declares a complete configuration value for this chart, with required values defined so that it can be used +# with helm lint to lint the chart. This should only specify the required values of the chart, and be combined with the +# default values of the chart. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES +# These values are expected to be defined and passed in by the operator when deploying this helm chart. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# this chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. +# +# EXAMPLE: +# +# containerImage: +# repository: nginx +# tag: stable +# pullPolicy: IfNotPresent +containerImage: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. The label is keyed under "gruntwork.io/app-name" +applicationName: "linter" diff --git a/charts/k8s-job/templates/_capabilities_helpers.tpl b/charts/k8s-job/templates/_capabilities_helpers.tpl new file mode 100644 index 00000000..33f0bafe --- /dev/null +++ b/charts/k8s-job/templates/_capabilities_helpers.tpl @@ -0,0 +1,4 @@ +{{/* Allow KubeVersion to be overridden. This is mostly used for testing purposes. */}} +{{- define "gruntwork.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}} +{{- end -}} diff --git a/charts/k8s-job/templates/_helpers.tpl b/charts/k8s-job/templates/_helpers.tpl new file mode 100644 index 00000000..adc013e8 --- /dev/null +++ b/charts/k8s-job/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "k8s-job.name" -}} + {{- .Values.applicationName | required "applicationName is required" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "k8s-job.fullname" -}} + {{- $name := required "applicationName is required" .Values.applicationName -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "k8s-job.chart" -}} + {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Convert octal to decimal (e.g 644 => 420). For file permission modes, many people are more familiar with octal notation. +However, due to yaml/json limitations, all the Kubernetes resources require file modes to be reported in decimal. +*/}} +{{- define "k8s-job.fileModeOctalToDecimal" -}} + {{- $digits := splitList "" (toString .) -}} + + {{/* Make sure there are exactly 3 digits */}} + {{- if ne (len $digits) 3 -}} + {{- fail (printf "File mode octal expects exactly 3 digits: %s" .) -}} + {{- end -}} + + {{/* Go Templates do not support variable updating, so we simulate it using dictionaries */}} + {{- $accumulator := dict "res" 0 -}} + {{- range $idx, $digit := $digits -}} + {{- $digitI := atoi $digit -}} + + {{/* atoi from sprig swallows conversion errors, so we double check to make sure it is a valid conversion */}} + {{- if and (eq $digitI 0) (ne $digit "0") -}} + {{- fail (printf "Digit %d of %s is not a number: %s" $idx . $digit) -}} + {{- end -}} + + {{/* Make sure each digit is less than 8 */}} + {{- if ge $digitI 8 -}} + {{- fail (printf "%s is not a valid octal digit" $digit) -}} + {{- end -}} + + {{/* Since we don't have math.Pow, we hard code */}} + {{- if eq $idx 0 -}} + {{/* 8^2 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 64)) -}} + {{- else if eq $idx 1 -}} + {{/* 8^1 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 8)) -}} + {{- else -}} + {{/* 8^0 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 1)) -}} + {{- end -}} + {{- end -}} + {{- "res" | index $accumulator | toString | printf -}} +{{- end -}} diff --git a/charts/k8s-job/templates/_job_spec.tpl b/charts/k8s-job/templates/_job_spec.tpl new file mode 100644 index 00000000..bfa70f47 --- /dev/null +++ b/charts/k8s-job/templates/_job_spec.tpl @@ -0,0 +1,207 @@ +{{- /* +Common job spec. This template requires the +context: +- Values +- Release +- Chart +You can construct this context using dict: +(dict "Values" .Values "Release" .Release "Chart" .Chart "isCanary" true) +*/ -}} +{{- define "k8s-job.jobSpec" -}} +{{- /* +We must decide whether or not there are volumes to inject. The logic to decide whether or not to inject is based on +whether or not there are configMaps OR secrets that are specified as volume mounts (`as: volume` attributes). We do this +by using a map to track whether or not we have seen a volume type. We have to use a map because we can't update a +variable in helm chart templates. + +Similarly, we need to decide whether or not there are environment variables to add + +We need this because certain sections are omitted if there are no volumes or environment variables to add. +*/ -}} + +{{/* Go Templates do not support variable updating, so we simulate it using dictionaries */}} +{{- $hasInjectionTypes := dict "hasVolume" false "hasEnvVars" false "exposePorts" false -}} +{{- if .Values.envVars -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} +{{- end -}} +{{- if .Values.additionalContainerEnv -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} +{{- end -}} +{{- $allContainerPorts := values .Values.containerPorts -}} +{{- range $allContainerPorts -}} + {{/* We are exposing ports if there is at least one key in containerPorts that is not disabled (disabled = false or + omitted) + */}} + {{- if or (not (hasKey . "disabled")) (not .disabled) -}} + {{- $_ := set $hasInjectionTypes "exposePorts" true -}} + {{- end -}} +{{- end -}} +{{- $allSecrets := values .Values.secrets -}} +{{- range $allSecrets -}} + {{- if eq (index . "as") "volume" -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} + {{- else if eq (index . "as") "environment" -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} + {{- else if eq (index . "as") "envFrom" }} + {{- $_ := set $hasInjectionTypes "hasEnvFrom" true -}} + {{- else if eq (index . "as") "none" -}} + {{- /* noop */ -}} + {{- else -}} + {{- fail printf "secrets config has unknown type: %s" (index . "as") -}} + {{- end -}} +{{- end -}} +{{- $allConfigMaps := values .Values.configMaps -}} +{{- range $allConfigMaps -}} + {{- if eq (index . "as") "volume" -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} + {{- else if eq (index . "as") "environment" -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} + {{- else if eq (index . "as") "envFrom" }} + {{- $_ := set $hasInjectionTypes "hasEnvFrom" true -}} + {{- else if eq (index . "as") "none" -}} + {{- /* noop */ -}} + {{- else -}} + {{- fail printf "configMaps config has unknown type: %s" (index . "as") -}} + {{- end -}} +{{- end -}} +{{- if gt (len .Values.persistentVolumes) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +{{- if gt (len .Values.scratchPaths) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +{{- if gt (len .Values.emptyDirs) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "k8s-job.fullname" . }} + labels: + # These labels are required by helm. You can read more about required labels in the chart best practices guide: + # https://docs.helm.sh/chart_best_practices/#standard-labels + helm.sh/chart: {{ include "k8s-job.chart" . }} + app.kubernetes.io/name: {{ include "k8s-job.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- range $key, $value := .Values.additionalDeploymentLabels }} + {{ $key }}: {{ $value }} + {{- end}} +{{- with .Values.deploymentAnnotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "k8s-job.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .isCanary }} + gruntwork.io/deployment-type: canary + {{- else }} + gruntwork.io/deployment-type: main + {{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "k8s-job.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + gruntwork.io/deployment-type: main + {{- end }} + {{- range $key, $value := .Values.additionalPodLabels }} + {{ $key }}: {{ $value }} + {{- end }} + + {{- with .Values.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: + {{- if .Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- end}} + + restartPolicy: {{ toYaml .Values.restartPolicy | indent 12 }} + containers: + - name: {{ .Values.applicationName }} + {{- $repo := required ".Values.containerImage.repository is required" .Values.containerImage.repository }} + {{- $tag := required ".Values.containerImage.tag is required" .Values.containerImage.tag }} + image: "{{ $repo }}:{{ $tag }}" + imagePullPolicy: {{ .Values.containerImage.pullPolicy | default "IfNotPresent" }} + {{- end }} + {{- if .Values.containerCommand }} + command: +{{ toYaml .Values.containerCommand | indent 12 }} + {{- if .Values.containerArgs }} + args: +{{ toYaml .Values.containerArgs | indent 12 }} + {{- end }} + securityContext: +{{ toYaml .Values.securityContext | indent 12 }} + {{- end}} + resources: +{{ toYaml .Values.containerResources | indent 12 }} + {{- end }} + + {{- /* START ENV VAR LOGIC */ -}} + {{- if index $hasInjectionTypes "hasEnvVars" }} + env: + {{- end }} + {{- range $key, $value := .Values.envVars }} + - name: {{ $key }} + value: {{ quote $value }} + {{- end }} + {{- if .Values.additionalContainerEnv }} +{{ toYaml .Values.additionalContainerEnv | indent 12 }} + {{- end }} + {{- range $name, $value := .Values.configMaps }} + {{- if eq $value.as "environment" }} + {{- range $configKey, $keyEnvVarConfig := $value.items }} + - name: {{ required "envVarName is required on configMaps items when using environment" $keyEnvVarConfig.envVarName | quote }} + valueFrom: + configMapKeyRef: + name: {{ $name }} + key: {{ $configKey }} + {{- end }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.secrets }} + {{- if eq $value.as "environment" }} + {{- range $secretKey, $keyEnvVarConfig := $value.items }} + - name: {{ required "envVarName is required on secrets items when using environment" $keyEnvVarConfig.envVarName | quote }} + valueFrom: + secretKeyRef: + name: {{ $name }} + key: {{ $secretKey }} + {{- end }} + {{- end }} + {{- end }} + {{- if index $hasInjectionTypes "hasEnvFrom" }} + envFrom: + {{- range $name, $value := .Values.configMaps }} + {{- if eq $value.as "envFrom" }} + - configMapRef: + name: {{ $name }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.secrets }} + {{- if eq $value.as "envFrom" }} + - secretRef: + name: {{ $name }} + {{- end }} + {{- end }} + {{- end }} + {{- /* END ENV VAR LOGIC */ -}} + + {{- /* START IMAGE PULL SECRETS LOGIC */ -}} + {{- if gt (len .Values.imagePullSecrets) 0 }} + imagePullSecrets: + {{- range $secretName := .Values.imagePullSecrets }} + - name: {{ $secretName }} + {{- end }} + {{- end }} + {{- /* END IMAGE PULL SECRETS LOGIC */ -}} + + +{{- end -}} diff --git a/charts/k8s-job/templates/job.yaml b/charts/k8s-job/templates/job.yaml new file mode 100644 index 00000000..907f8fff --- /dev/null +++ b/charts/k8s-job/templates/job.yaml @@ -0,0 +1,5 @@ +{{- /* +The standalone Job to be deployed. This resource manages the creation and replacement +of Jobs you schedule. +*/ -}} +{{ include "k8s-job.jobSpec" }} diff --git a/charts/k8s-job/values.yaml b/charts/k8s-job/values.yaml new file mode 100644 index 00000000..65a8adad --- /dev/null +++ b/charts/k8s-job/values.yaml @@ -0,0 +1,153 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS +# This file declares the configuration input values for the k8s-job Helm chart. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES +# These values are expected to be defined and passed in by the operator when deploying this helm chart. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# this chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. +# +# EXAMPLE: +# +# containerImage: +# repository: nginx +# tag: stable +# pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. The label is keyed under "gruntwork.io/app-name" + + +#---------------------------------------------------------------------------------------------------------------------- +# OPTIONAL VALUES +# These values have defaults, but may be overridden by the operator +#---------------------------------------------------------------------------------------------------------------------- + +# containerCommand is a list of strings that indicate a custom command to run for the container in place of the default +# configured on the image. Omit to run the default command configured on the image. +# +# Example (run echo "Hello World"): +# +# containerCommand: +# - "echo" +# - "Hello World" +containerCommand: null + +# containerArgs is a list of strings that indicate custom arguments when a pod is created. Omit and no arguments will be injected. +# +# Example (run echo "Hello World"): +# +# containerArgs: +# - "echo" +# - "Hello World" +containerArgs: null + +# restartPolicy is a container and pod configuration option which decides which action to take if a container's process +# exits with a non-zero code. The default option "Never" will not attempt to restart the container. The "OnFailure" option +# will re-run the container. +# +# Read more: https://kubernetes.io/docs/concepts/workloads/controllers/job/#handling-pod-and-container-failures +# +# Example (restart on failure) +# +# spec: +# restartPolicy: OnFailure +# containers: +# - name: busybox +# ... +restartPolicy: Never + +# envVars is a map of strings to strings that specifies hard coded environment variables that should be set on the +# application container. The keys will be mapped to environment variable keys, with the values mapping to the +# environment variable values. +# +# NOTE: If you wish to set environment variables using Secrets, see the `secrets` setting in this file. +# +# The following example configures two environment variables, DB_HOST and DB_PORT: +# +# EXAMPLE: +# +# envVars: +# DB_HOST: "mysql.default.svc.cluster.local" +# DB_PORT: 3306 +envVars: {} + +# additionalContainerEnv is a list of additional environment variables +# definitions that will be inserted into the Container's environment YAML. +# +# Example: +# additionalContainerEnv: +# - name: DD_AGENT_HOST +# valueFrom: +# fieldRef: +# fieldPath: status.hostIP +# - name: DD_ENTITY_ID +# valueFrom: +# fieldRef: +# fieldPath: metadata.uid +additionalContainerEnv: {} + +# containerResources specifies the amount of resources the application container will require. Only specify if you have +# specific resource needs. +# NOTE: This variable is injected directly into the pod spec. See the official documentation for what this might look +# like: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +containerResources: {} + +# imagePullSecrets lists the Secret resources that should be used for accessing private registries. Each item in the +# list is a string that corresponds to the Secret name. +imagePullSecrets: [] + +# customResources is a map that lets you define Kubernetes resources you want installed and configured as part of this chart. +# The expected keys of customResources are: +# - enabled (bool) : Whether or not the provided custom resource definitions should be created. +# - resources (map) : A map of custom Kubernetes resources you want to install during the installation of the chart. +# +# NOTE: By default enabled = false, and no custom resources will be created. If you provide any resources, be sure to +# provide them as quoted using "|", and set enabled: true. +# +# The following example creates a custom ConfigMap and a Secret. +# +# EXAMPLE: +# +# customResources: +# enabled: true +# resources: +# custom_configmap: | +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: example +# data: +# key: value +# custom_secret: | +# apiVersion: v1 +# kind: Secret +# metadata: +# name: example +# type: Opaque +# data: +# key: dmFsdWU= +customResources: + enabled: false + resources: {} + +# fullnameOverride is a string that allows overriding the default fullname that appears as the +# application name and is used as the application name by kubernetes. +fullnameOverride: "" diff --git a/examples/k8s-job-busybox/README.md b/examples/k8s-job-busybox/README.md new file mode 100644 index 00000000..e69de29b diff --git a/examples/k8s-job-busybox/values.yaml b/examples/k8s-job-busybox/values.yaml new file mode 100644 index 00000000..a6e5a059 --- /dev/null +++ b/examples/k8s-job-busybox/values.yaml @@ -0,0 +1,59 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS FOR NGINX EXAMPLE +# This file declares the required values for the k8s-service helm chart to deploy nginx. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES OF CHART +# These are the required values defined by the k8s-service chart. Here we will set them to deploy an nginx container. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# the k8s-service chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. We deploy a specific, locked tag so that we +# don't inadvertently upgrade nginx during a deployment that changes some other unrelated input value. +containerImage: + repository: busybox + tag: 1.34 + pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. Here we use nginx as the name since we are deploying nginx. +applicationName: "busybox" + +#---------------------------------------------------------------------------------------------------------------------- +# OVERRIDE OPTIONAL VALUES +# These values have defaults in the k8s-service chart, but we override a few of them for the purposes of this demo. +#---------------------------------------------------------------------------------------------------------------------- +# containerCommand is a list of strings that indicate a custom command to run for the container in place of the default +# configured on the image. Omit to run the default command configured on the image. +# +# Example (run echo "Hello World"): +# +# containerCommand: +# - "echo" +# - "Hello World" +containerCommand: "/bin/sh" + +# containerArgs is a list of strings that indicate custom arguments when a pod is created. Omit and no arguments will be injected. +# +# Example (run echo "Hello World"): +# +# containerArgs: +# - "echo" +# - "Hello World" +containerArgs: + - "-c" + - "while true; do echo hello; sleep 10;done" \ No newline at end of file diff --git a/test/k8s_job_template_test.go b/test/k8s_job_template_test.go new file mode 100644 index 00000000..6c8f08ad --- /dev/null +++ b/test/k8s_job_template_test.go @@ -0,0 +1,265 @@ +//go:build all || tpl +// +build all tpl + +// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently +// run just the template tests. See the test README for more information. + +package test + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test each of the required values. Here, we take advantage of the fact that linter_values.yaml is supposed to define +// all the required values, so we check the template rendering by nulling out each field. +func TestK8SJobRequiredValuesAreRequired(t *testing.T) { + t.Parallel() + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-job")) + require.NoError(t, err) + + eachRequired := []string{ + "containerImage.repository", + "containerImage.tag", + "applicationName", + } + for _, requiredVal := range eachRequired { + // Capture the range value and force it into this scope. Otherwise, it is defined outside this block so it can + // change when the subtests parallelize and switch contexts. + requiredVal := requiredVal + t.Run(requiredVal, func(t *testing.T) { + t.Parallel() + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + // We then use SetValues to null out the value. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-job", "linter_values.yaml")}, + SetValues: map[string]string{requiredVal: "null"}, + } + _, err := helm.RenderTemplateE(t, options, helmChartPath, strings.ToLower(t.Name()), []string{}) + assert.Error(t, err) + }) + } +} + +// Test each of the optional values defined in linter_values.yaml. Here, we take advantage of the fact that +// linter_values.yaml is supposed to define all the required values, so we check the template rendering by nulling out +// each field. +func TestK8SJobOptionalValuesAreOptional(t *testing.T) { + t.Parallel() + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-job")) + require.NoError(t, err) + + eachOptional := []string{ + "containerImage.pullPolicy", + } + for _, optionalVal := range eachOptional { + // Capture the range value and force it into this scope. Otherwise, it is defined outside this block so it can + // change when the subtests parallelize and switch contexts. + optionalVal := optionalVal + t.Run(optionalVal, func(t *testing.T) { + t.Parallel() + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + // We then use SetValues to null out the value. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-job", "linter_values.yaml")}, + SetValues: map[string]string{optionalVal: "null"}, + } + // Make sure it renders without error + helm.RenderTemplate(t, options, helmChartPath, "all", []string{}) + }) + } +} + +// Test that annotations render correctly to annotate the Job resource +func TestK8SJobtAnnotationsRenderCorrectly(t *testing.T) { + t.Parallel() + + uniqueID := random.UniqueId() + job := renderK8SJobWithSetValues(t, map[string]string{"jobAnnotations.unique-id": uniqueID}) + + assert.Equal(t, len(job.Annotations), 1) + assert.Equal(t, job.Annotations["unique-id"], uniqueID) +} + +func TestK8SJobSecurityContextAnnotationRenderCorrectly(t *testing.T) { + t.Parallel() + job := renderK8SJobWithSetValues( + t, + map[string]string{ + "securityContext.privileged": "true", + "securityContext.runAsUser": "1000", + }, + ) + renderedContainers := job.Spec.Template.Spec.Containers + require.Equal(t, len(renderedContainers), 1) + testContainer := renderedContainers[0] + assert.NotNil(t, testContainer.SecurityContext) + assert.True(t, *testContainer.SecurityContext.Privileged) + assert.Equal(t, *testContainer.SecurityContext.RunAsUser, int64(1000)) +} + +func TestK8SJobSecurityContextAnnotationRenderCorrectly(t *testing.T) { + t.Parallel() + + job := renderK8SJobWithSetValues( + t, + map[string]string{ + "allowPrivilegeEscalation": "false", + }, + ) + renderedContainerSpec := job.Spec.Template.Spec + assert.NotNil(t, renderedContainerSpec.SecurityContext) + assert.Equal(t, *renderedContainerSpec.SecurityContext.allowPrivilegeEscalation, bool(false)) +} + +// Test that default imagePullSecrets do not render any +func TestK8SJobNoImagePullSecrets(t *testing.T) { + t.Parallel() + + job := renderK8SJobWithSetValues( + t, + map[string]string{}, + ) + + renderedImagePullSecrets := job.Spec.Template.Spec.ImagePullSecrets + require.Equal(t, len(renderedImagePullSecrets), 0) +} + +func TestK8SJobMultipleImagePullSecrets(t *testing.T) { + t.Parallel() + + job := renderK8SJobWithSetValues( + t, + map[string]string{ + "imagePullSecrets[0]": "docker-private-registry-key", + "imagePullSecrets[1]": "gcr-registry-key", + }, + ) + + renderedImagePullSecrets := job.Spec.Template.Spec.ImagePullSecrets + require.Equal(t, len(renderedImagePullSecrets), 2) + assert.Equal(t, renderedImagePullSecrets[0].Name, "docker-private-registry-key") + assert.Equal(t, renderedImagePullSecrets[1].Name, "gcr-registry-key") +} + +// Not-reviewed + +// Test that omitting containerCommand does not set command attribute on the Job container spec. +func TestK8SJobDefaultHasNullCommandSpec(t *testing.T) { + t.Parallel() + + job := renderK8SJobWithSetValues(t, map[string]string{}) + renderedContainers := job.Spec.Template.Spec.Containers + require.Equal(t, len(renderedContainers), 1) + appContainer := renderedContainers[0] + assert.Nil(t, appContainer.Command) +} + +// Test that setting containerCommand sets the command attribute on the Job container spec. +func TestK8SJobWithContainerCommandHasCommandSpec(t *testing.T) { + t.Parallel() + + job := renderK8SJobWithSetValues( + t, + map[string]string{ + "containerCommand[0]": "echo", + "containerCommand[1]": "Hello world", + }, + ) + renderedContainers := job.Spec.Template.Spec.Containers + require.Equal(t, len(renderedContainers), 1) + appContainer := renderedContainers[0] + assert.Equal(t, appContainer.Command, []string{"echo", "Hello world"}) +} + +func TestK8SJobMainJobContainersLabeledCorrectly(t *testing.T) { + t.Parallel() + job := renderK8SJobWithSetValues( + t, + map[string]string{ + "containerImage.repository": "nginx", + "containerImage.tag": "1.16.0", + }, + ) + // Ensure a "main" type job is properly labeled as such + assert.Equal(t, job.Spec.Selector.MatchLabels["gruntwork.io/job-type"], "main") +} + +func TestK8SJobAddingAdditionalLabels(t *testing.T) { + t.Parallel() + first_custom_job_label_value := "first-custom-value" + second_custom_job_label_value := "second-custom-value" + job := renderK8SJobWithSetValues(t, + map[string]string{"additionalJobLabels.first-label": first_custom_job_label_value, + "additionalJobLabels.second-label": second_custom_job_label_value}) + + assert.Equal(t, job.Labels["first-label"], first_custom_job_label_value) + assert.Equal(t, job.Labels["second-label"], second_custom_job_label_value) +} + +func TestK8SJobFullnameOverride(t *testing.T) { + t.Parallel() + + overiddenName := "overidden-name" + + job := renderK8SJobWithSetValues(t, + map[string]string{ + "fullnameOverride": overiddenName, + }, + ) + + assert.Equal(t, job.Name, overiddenName) +} + +func TestK8SJobEnvFrom(t *testing.T) { + t.Parallel() + + t.Run("BothConfigMapsAndSecretsEnvFrom", func(t *testing.T) { + job := renderK8SJobWithSetValues(t, + map[string]string{ + "configMaps.test-configmap.as": "envFrom", + "secrets.test-secret.as": "envFrom", + }, + ) + + assert.NotNil(t, job.Spec.Template.Spec.Containers[0].EnvFrom) + assert.Equal(t, len(job.Spec.Template.Spec.Containers[0].EnvFrom), 2) + assert.Equal(t, job.Spec.Template.Spec.Containers[0].EnvFrom[0].ConfigMapRef.Name, "test-configmap") + assert.Equal(t, job.Spec.Template.Spec.Containers[0].EnvFrom[1].SecretRef.Name, "test-secret") + }) + + t.Run("OnlyConfigMapsEnvFrom", func(t *testing.T) { + job := renderK8SJobWithSetValues(t, + map[string]string{ + "configMaps.test-configmap.as": "envFrom", + }, + ) + + assert.NotNil(t, job.Spec.Template.Spec.Containers[0].EnvFrom) + assert.Equal(t, len(job.Spec.Template.Spec.Containers[0].EnvFrom), 1) + assert.Equal(t, job.Spec.Template.Spec.Containers[0].EnvFrom[0].ConfigMapRef.Name, "test-configmap") + }) + + t.Run("OnlySecretsEnvFrom", func(t *testing.T) { + job := renderK8SJobWithSetValues(t, + map[string]string{ + "secrets.test-secret.as": "envFrom", + }, + ) + + assert.NotNil(t, job.Spec.Template.Spec.Containers[0].EnvFrom) + assert.Equal(t, len(job.Spec.Template.Spec.Containers[0].EnvFrom), 1) + assert.Equal(t, job.Spec.Template.Spec.Containers[0].EnvFrom[0].SecretRef.Name, "test-secret") + }) + +} diff --git a/test/k8s_job_test.go b/test/k8s_job_test.go new file mode 100644 index 00000000..2056373e --- /dev/null +++ b/test/k8s_job_test.go @@ -0,0 +1,90 @@ +//go:build all || integration +// +build all integration + +// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently +// run just the template tests. See the test README for more information. + +package test + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + http_helper "github.com/gruntwork-io/terratest/modules/http-helper" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/stretchr/testify/require" + "golang.org/x/mod/semver" +) + +// Test that: +// +// 1. We can deploy the example Job +// 2. The Job succeeds without errors + +func TestK8SJobBusyboxExample(t *testing.T) { + t.Parallel() + + workingDir := filepath.Join(".", "stages", t.Name()) + + //os.Setenv("SKIP_setup", "true") + //os.Setenv("SKIP_create_namespace", "true") + //os.Setenv("SKIP_install", "true") + //os.Setenv("SKIP_validate_initial_deployment", "true") + //os.Setenv("SKIP_upgrade", "true") + //os.Setenv("SKIP_validate_upgrade", "true") + //os.Setenv("SKIP_delete", "true") + //os.Setenv("SKIP_delete_namespace", "true") + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-job")) + require.NoError(t, err) + examplePath, err := filepath.Abs(filepath.Join("..", "examples", "k8s-job-busybox")) + require.NoError(t, err) + + // Create a test namespace to deploy resources into, to avoid colliding with other tests + test_structure.RunTestStage(t, "setup", func() { + kubectlOptions := k8s.NewKubectlOptions("", "", "") + test_structure.SaveKubectlOptions(t, workingDir, kubectlOptions) + + uniqueID := random.UniqueId() + test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) + }) + kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) + uniqueID := test_structure.LoadString(t, workingDir, "uniqueID") + testNamespace := fmt.Sprintf("k8s-job-busybox-%s", strings.ToLower(uniqueID)) + + defer test_structure.RunTestStage(t, "delete_namespace", func() { + k8s.DeleteNamespace(t, kubectlOptions, testNamespace) + }) + + test_structure.RunTestStage(t, "create_namespace", func() { + k8s.CreateNamespace(t, kubectlOptions, testNamespace) + }) + + kubectlOptions.Namespace = testNamespace + + // Use the values file in the example and deploy the chart in the test namespace + // Set a random release name + releaseName := fmt.Sprintf("k8s-job-busybox-%s", strings.ToLower(uniqueID)) + options := &helm.Options{ + KubectlOptions: kubectlOptions, + ValuesFiles: []string{filepath.Join(examplePath, "values.yaml")}, + } + + defer test_structure.RunTestStage(t, "delete", func() { + helm.Delete(t, options, releaseName, true) + }) + + test_structure.RunTestStage(t, "install", func() { + helm.Install(t, options, helmChartPath, releaseName) + }) + + test_structure.RunTestStage(t, "validate_job_deployment", func() { + verifyPodsCreatedSuccessfully(t, kubectlOptions, "busybox", releaseName, NumPodsExpected) + + }) +} From b87d83dc16bc8da06f61b80c072b3f1b1c9a1863 Mon Sep 17 00:00:00 2001 From: "N. G. Perrin" Date: Thu, 21 Apr 2022 17:16:44 -0400 Subject: [PATCH 3/8] Update charts/k8s-job/README.md Co-authored-by: Yoriyasu Yano <430092+yorinasub17@users.noreply.github.com> --- charts/k8s-job/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/k8s-job/README.md b/charts/k8s-job/README.md index e14a1ff5..7285c4a4 100644 --- a/charts/k8s-job/README.md +++ b/charts/k8s-job/README.md @@ -1,4 +1,4 @@ -# Kubernetes Service Helm Chart +# Kubernetes Job Helm Chart This Helm Chart can be used to deploy your job container under a [Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) resource onto your Kubernetes From 049649ff9d7495e1acde50081a1aafa0b9d1770e Mon Sep 17 00:00:00 2001 From: "N. G. Perrin" Date: Thu, 21 Apr 2022 17:16:51 -0400 Subject: [PATCH 4/8] Update charts/k8s-job/README.md Co-authored-by: Yoriyasu Yano <430092+yorinasub17@users.noreply.github.com> --- charts/k8s-job/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/k8s-job/README.md b/charts/k8s-job/README.md index 7285c4a4..f0a9b13f 100644 --- a/charts/k8s-job/README.md +++ b/charts/k8s-job/README.md @@ -23,7 +23,7 @@ The following resources will be deployed with this Helm Chart, depending on whic back to [root README](/README.adoc#core-concepts) -## How do I deploy additional services not managed by the chart? +## How do I deploy additional resources not managed by the chart? You can create custom Kubernetes resources, that are not directly managed by the chart, within the `customResources` key. You provide each resource manifest directly as a value under `customResources.resources` and set From 0513ea36360d9c95b1cf35f151e19451f06dc5f3 Mon Sep 17 00:00:00 2001 From: Nathaniel Date: Thu, 21 Apr 2022 17:27:37 -0400 Subject: [PATCH 5/8] Commit suggestions --- charts/k8s-job/README.md | 819 +------------------------ charts/k8s-job/templates/_job_spec.tpl | 9 +- charts/k8s-job/values.yaml | 11 + 3 files changed, 15 insertions(+), 824 deletions(-) diff --git a/charts/k8s-job/README.md b/charts/k8s-job/README.md index e14a1ff5..557e3dc7 100644 --- a/charts/k8s-job/README.md +++ b/charts/k8s-job/README.md @@ -23,7 +23,7 @@ The following resources will be deployed with this Helm Chart, depending on whic back to [root README](/README.adoc#core-concepts) -## How do I deploy additional services not managed by the chart? +## How do I deploy additional resources not managed by the chart? You can create custom Kubernetes resources, that are not directly managed by the chart, within the `customResources` key. You provide each resource manifest directly as a value under `customResources.resources` and set @@ -33,623 +33,6 @@ key. You provide each resource manifest directly as a value under `customResourc back to [root README](/README.adoc#day-to-day-operations) -## How do I expose my application internally to the cluster? - -In general, `Pods` are considered ephemeral in Kubernetes. `Pods` can come and go at any point in time, either because -containers fail or the underlying instances crash. In either case, the dynamic nature of `Pods` make it difficult to -consistently access your application if you are individually addressing the `Pods` directly. - -Traditionally, this is solved using service discovery, where you have a stateful system that the `Pods` would register -to when they are available. Then, your other applications can query the system to find all the available `Pods` and -access one of the available ones. - -Kubernetes provides a built in mechanism for service discovery in the `Service` resource. `Services` are an abstraction -that groups a set of `Pods` behind a consistent, stable endpoint to address them. By creating a `Service` resource, you -can provide a single endpoint to other applications to connect to the `Pods` behind the `Service`, and not worry about -the dynamic nature of the `Pods`. - -You can read a more detailed description of `Services` in [the official -documentation](https://kubernetes.io/docs/concepts/services-networking/service/). Here we will cover just enough to -understand how to access your app. - -By default, this Helm Chart will deploy your application container in a `Pod` that exposes ports 80. These will -be exposed to the Kubernetes cluster behind the `Service` resource, which exposes port 80. You can modify this behavior -by overriding the `containerPorts` input value and the `service` input value. See the corresponding section in the -`values.yaml` file for more details. - -Once the `Service` is created, you can check what endpoint the `Service` provides by querying Kubernetes using -`kubectl`. First, retrieve the `Service` name that is outputted in the install summary when you first install the Helm -Chart. If you forget, you can get the same information at a later point using `helm status`. For example, if you had -previously installed this chart under the name `edge-service`, you can run the following command to see the created -resources: - -```bash -$ helm status edge-service -LAST DEPLOYED: Fri Feb 8 16:25:49 2019 -NAMESPACE: default -STATUS: DEPLOYED - -RESOURCES: -==> v1/Service -NAME AGE -edge-service-nginx 24m - -==> v1/Deployment -edge-service-nginx 24m - -==> v1/Pod(related) - -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 24m -edge-service-nginx-844c978df7-mln26 1/1 Running 0 24m -edge-service-nginx-844c978df7-rdsr8 1/1 Running 0 24m -``` - -This will show you some metadata about the release, the deployed resources, and any notes provided by the Helm Chart. In -this example, the service name is `edge-service-nginx` so we will use that to query the `Service`: - -```bash -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx ClusterIP 172.20.186.176 80/TCP 27m -``` - -Here you can see basic information about the `Service`. The important piece of information is the `CLUSTER-IP` and -`PORT` fields, which tell you the available endpoint for the `Service`, and any exposed ports. Given that, any `Pod` in -your Kubernetes cluster can access the `Pods` of this application by hitting `{CLUSTER-IP}:{PORT}`. So for this example, -that will be `172.20.186.176:80`. - -But what if you want to automatically find a `Service` by name? The name of the `Service` created by this Helm Chart is -always `{RELEASE_NAME}-{applicationName}`, where `applicationName` is provided in the input value and `RELEASE_NAME` is -set when you install the Helm Chart. This means that the name is predictable, while the allocated IP address may not be. - -To address the `Service` by name, Kubernetes provides two ways: - -- environment variables -- DNS - -### Addressing Service by Environment Variables - -For each active `Service` that a `Pod` has access to, Kubernetes will automatically set a set of environment variables -in the container. These are `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` to get the host address (ip address) -and port respectively, where `SVCNAME` is the name of the `Service`. Note that `SVCNAME` will be the all caps version -with underscores of the `Service` name. - -Using the previous example where we installed this chart with a release name `edge-service` and `applicationName` -`nginx`, we get the `Service` name `edge-service-nginx`. Kubernetes will expose the following environment variables to -all containers that can access the `Service`: - -``` -EDGE_SERVICE_NGINX_SERVICE_HOST=172.20.186.176 -EDGE_SERVICE_NGINX_SERVICE_PORT=80 -``` - -Note that environment variables are set when the container first boots up. This means that if you already had `Pods` -deployed in your system before the `Service` was created, you will have to cycle the `Pods` in order to get the -environment variables. If you wish to avoid ordering issues, you can use the DNS method to address the `Service` -instead, if that is available. - -### Addressing Service by DNS - -If your Kubernetes cluster is deployed with the DNS add-on (this is automatically installed for EKS and GKE), then you -can rely on DNS to address your `Service`. Every `Service` in Kubernetes will register the domain -`{SVCNAME}.{NAMESPACE}.svc.cluster.local` to the DNS service of the cluster. This means that all your `Pods` in the -cluster can get the `Service` host by hitting that domain. - -The `NAMESPACE` in the domain refers to the `Namespace` where the `Service` was created. By default, all resources are -created in the `default` namespace. This is configurable at install time of the Helm Chart using the `--namespace` -option. - -In our example, we deployed the chart to the `default` `Namespace`, and the `Service` name is `edge-service-nginx`. So in -this case, the domain of the `Service` will be `edge-service-nginx.default.svc.cluster.local`. When any `Pod` addresses -that domain, it will get the address `172.20.186.176`. - -Note that DNS does not resolve ports, so in this case, you will have to know which port the `Service` uses. So in your -`Pod`, you will have to know that the `Service` exposes port `80` when you address it in your code for the container as -`edge-service-nginx.default.svc.cluster.local:80`. However, like the `Service` name, this should be predictable since it -is specified in the Helm Chart input value. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I expose my application externally, outside of the cluster? - -Similar to the previous section ([How do I expose my application internally to the -cluster?](#how-do-i-expose-my-application-internally-to-the-cluster), you can use a `Service` resource to expose your -application externally. The primary service type that facilitates external access is the `NodePort` `Service` type. - -The `NodePort` `Service` type will expose the `Service` by binding an available port on the network interface of the -physical machines running the `Pod`. This is different from a network interface internal to Kubernetes, which is only -accessible within the cluster. Since the port is on the host machine network interface, you can access the `Service` by -hitting that port on the node. - -For example, suppose you had a 2 node Kubernetes cluster deployed on EC2. Suppose further that all your EC2 instances -have public IP addresses that you can access. For the sake of this example, we will assign random IP addresses to the -instances: - -- 54.219.117.250 -- 38.110.235.198 - -Now let's assume you deployed this helm chart using the `NodePort` `Service` type. You can do this by setting the -`service.type` input value to `NodePort`: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -When you install this helm chart with this input config, helm will deploy the `Service` as a `NodePort`, binding an -available port on the host machine to access the `Service`. You can confirm this by querying the `Service` using -`kubectl`: - -```bash -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx NodePort 10.99.244.96 80:31035/TCP 33s -``` - -In this example, you can see that the `Service` type is `NodePort` as expected. Additionally, you can see that the there -is a port binding between port 80 and 31035. This port binding refers to the binding between the `Service` port (80 in -this case) and the host port (31035 in this case). - -One thing to be aware of about `NodePorts` is that the port binding will exist on all nodes in the cluster. This means -that, in our 2 node example, both nodes now have a port binding of 31035 on the host network interface that routes to -the `Service`, regardless of whether or not the node is running the `Pods` backing the `Service` endpoint. This means -that you can reach the `Service` on both of the following endpoints: - -- `54.219.117.250:31035` -- `38.110.235.198:31035` - -This means that no two `Service` can share the same `NodePort`, as the port binding is shared across the cluster. -Additionally, if you happen to hit a node that is not running a `Pod` backing the `Service`, Kubernetes will -automatically hop to one that is. - -You might use the `NodePort` if you do not wish to manage load balancers through Kubernetes, or if you are running -Kubernetes on prem where you do not have native support for managed load balancers. - -To summarize: - -- `NodePort` is the simplest way to expose your `Service` to externally to the cluster. -- You have a limit on the number of `NodePort` `Services` you can have in your cluster, imposed by the number of open ports - available on your host machines. -- You have potentially inefficient hopping if you happen to route to a node that is not running the `Pod` backing the - `Service`. - -Additionally, Kubernetes provides two mechanisms to manage an external load balancer that routes to the `NodePort` for -you. The two ways are: - -- [Using a `LoadBalancer` `Service` type](#loadbalancer-service-type) -- [Using `Ingress` resources with an `Ingress Controller`](#ingress-and-ingress-controllers) - -### LoadBalancer Service Type - -The `LoadBalancer` `Service` type will expose the `Service` by allocating a managed load balancer in the cloud that is -hosting the Kubernetes cluster. On AWS, this will be an ELB, while on GCP, this will be a Cloud Load Balancer. When the -`LoadBalancer` `Service` is created, Kubernetes will automatically create the underlying load balancer resource in the -cloud for you, and create all the target groups so that they route to the `Pods` backing the `Service`. - -You can deploy this helm chart using the `LoadBalancer` `Service` type by setting the `service.type` input value to -`LoadBalancer`: - -```yaml -service: - enabled: true - type: LoadBalancer - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -When you install this helm chart with this input config, helm will deploy the `Service` as a `LoadBalancer`, allocating -a managed load balancer in the cloud hosting your Kubernetes cluster. You can get the attached load balancer by querying -the `Service` using `kubectl`. In this example, we will assume we are using EKS: - -``` -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx LoadBalancer 172.20.7.35 a02fef4d02e41... 80:32127/TCP 1m -``` - -Now, in this example, we have an entry in the `EXTERNAL-IP` field. This is truncated here, but you can get the actual -output when you describe the service: - -``` -$ kubectl describe service edge-service-nginx -Name: edge-service-nginx -Namespace: default -Labels: app.kubernetes.io/instance=edge-service - app.kubernetes.io/managed-by=helm - app.kubernetes.io/name=nginx - gruntwork.io/app-name=nginx - helm.sh/chart=k8s-service-0.1.0 -Annotations: -Selector: app.kubernetes.io/instance=edge-service,app.kubernetes.io/name=nginx,gruntwork.io/app-name=nginx -Type: LoadBalancer -IP: 172.20.7.35 -LoadBalancer Ingress: a02fef4d02e4111e9891806271fc7470-173030870.us-west-2.elb.amazonaws.com -Port: app 80/TCP -TargetPort: 80/TCP -NodePort: app 32127/TCP -Endpoints: 10.0.3.19:80 -Session Affinity: None -External Traffic Policy: Cluster -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal EnsuringLoadBalancer 2m service-controller Ensuring load balancer - Normal EnsuredLoadBalancer 2m service-controller Ensured load balancer -``` - -In the describe output, there is a field named `LoadBalancer Ingress`. When you have a `LoadBalancer` `Service` type, -this field contains the public DNS endpoint of the associated load balancer resource in the cloud provider. In this -case, we have an AWS ELB instance, so this endpoint is the public endpoint of the associated ELB resource. - -**Note:** Eagle eyed readers might also notice that there is an associated `NodePort` on the resource. This is because under the -hood, `LoadBalancer` `Services` utilize `NodePorts` to handle the connection between the managed load balancer of the -cloud provider and the Kubernetes `Pods`. This is because at this time, there is no portable way to ensure that the -network between the cloud load balancers and Kubernetes can be shared such that the load balancers can route to the -internal network of the Kubernetes cluster. Therefore, Kubernetes resorts to using `NodePort` as an abstraction layer to -connect the `LoadBalancer` to the `Pods` backing the `Service`. This means that `LoadBalancer` `Services` share the same -drawbacks as using a `NodePort` `Service`. - -To summarize: - -- `LoadBalancer` provides a way to set up a cloud load balancer resource that routes to the provisioned `NodePort` on - each node in your Kubernetes cluster. -- `LoadBalancer` can be used to provide a persistent endpoint that is robust to the ephemeral nature of nodes in your - cluster. E.g it is able to route to live nodes in the face of node failures. -- `LoadBalancer` does not support weighted balancing. This means that you cannot balance the traffic so that it prefers - nodes that have more instances of the `Pod` running. -- Note that under the hood, `LoadBalancer` utilizes a `NodePort` `Service`, and thus shares the same limits as `NodePort`. - -### Ingress and Ingress Controllers - -`Ingress` is a mechanism in Kubernetes that abstracts externally exposing a `Service` from the `Service` config itself. -`Ingress` resources support: - -- assigning an externally accessible URL to a `Service` -- perform hostname and path based routing of `Services` -- load balance traffic using customizable balancing rules -- terminate SSL - -You can read more about `Ingress` resources in [the official -documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/). Here, we will cover the basics to -understand how `Ingress` can be used to externally expose the `Service`. - -At a high level, the `Ingress` resource is used to specify the configuration for a particular `Service`. In turn, the -`Ingress Controller` is responsible for fulfilling those configurations in the cluster. This means that the first -decision to make in using `Ingress` resources, is selecting an appropriate `Ingress Controller` for your cluster. - -#### Choosing an Ingress Controller - -Before you can use an `Ingress` resource, you must install an `Ingress Controller` in your Kubernetes cluster. There are -many kinds of `Ingress Controllers` available, each with different properties. You can see [a few examples listed in the -official documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers). - -When you use an external cloud `Ingress Controller` such as the [GCE Ingress -Controller](https://github.com/kubernetes/ingress-gce/blob/master/README.md) or [AWS ALB Ingress -Controller](https://github.com/kubernetes-sigs/aws-alb-ingress-controller), Kubernetes will allocate an externally -addressable load balancer (for GCE this will be a Cloud Load Balancer and for AWS this will be an ALB) that fulfills the -`Ingress` rules. This includes routing the domain names and paths to the right `Service` as configured by the `Ingress` -rules. Additionally, Kubernetes will manage the target groups of the load balancer so that they are up to date with -the latest `Ingress` configuration. However, in order for this to work, there needs to be some way for the load balancer -to connect to the `Pods` servicing the `Service`. Since the `Pods` are internal to the Kubernetes network and the load -balancers are external to the network, there must be a `NodePort` that links the two together. As such, like the -`LoadBalancer` `Service` type, these `Ingress Controllers` also require a `NodePort` under the hood. - - - -Alternatively, you can use an internal `Ingress Controller` that runs within Kubernetes as `Pods`. For example, the -official `nginx Ingress Controller` will launch `nginx` as `Pods` within your Kubernetes cluster. These `nginx` `Pods` -are then configured using `Ingress` resources, which then allows `nginx` to route to the right `Pods`. Since the `nginx` -`Pods` are internal to the Kubernetes network, there is no need for your `Services` to be `NodePorts` as they are -addressable within the network by the `Pods`. However, this means that you need some other mechanism to expose `nginx` -to the outside world, which will require a `NodePort`. The advantage of this approach, despite still requiring a -`NodePort`, is that you can have a single `NodePort` that routes to multiple services using hostnames or paths as -managed by `nginx`, as opposed to requiring a `NodePort` per `Service` you wish to expose. - -Which `Ingress Controller` type you wish to use depends on your infrastructure needs. If you have relatively few -`Services`, and you want the simplicity of a managed cloud load balancer experience, you might opt for the external -`Ingress Controllers` such as GCE and AWS ALB controllers. On the other hand, if you have thousands of micro services -that push you to the limits of the available number of ports on a host machine, you might opt for an internal `Ingress -Controller` approach. Whichever approach you decide, be sure to document your decision where you install the particular -`Ingress Controller` so that others in your team know and understand the tradeoffs you made. - -#### Configuring Ingress for your Service - -Once you have an `Ingress Controller` installed and configured on your Kuberentes cluster, you can now start creating -`Ingress` resources to add routes to it. This helm chart supports configuring an `Ingress` resource to complement the -`Service` resource that is created in the chart. - -To add an `Ingress` resource, first make sure you have a `Service` enabled on the chart. Depending on the chosen -`Ingress Controller`, the `Service` type should be `NodePort` or `ClusterIP`. Here, we will create a `NodePort` -`Service` exposing port 80: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -Then, we will add the configuration for the `Ingress` resource by specifying the `ingress` input value. For this -example, we will assume that we want to route `/app` to our `Service`, with the domain hosted on `app.yourco.com`: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - hosts: - - app.yourco.com -``` - -This will configure the load balancer backing the `Ingress Controller` that will route any traffic with host and path -prefix `app.yourco.com/app` to the `Service` on port 80. If `app.yourco.com` is configured to point to the `Ingress -Controller` load balancer, then once you deploy the helm chart you should be able to start accessing your app on that -endpoint. - -#### Registering additional paths - -Sometimes you might want to add additional path rules beyond the main service rule that is injected to the `Ingress` -resource. For example, you might want a path that routes to the sidecar containers, or you might want to reuse a single -`Ingress` for multiple different `Service` endpoints because to share load balancers. For these situations, you can use -the `additionalPaths` and `additionalPathsHigherPriority` input values. - -Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarMonitor` served on port -3000: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - sidecarMonitor: - port: 3000 - targetPort: 3000 - protocol: TCP -``` - -To route `/app` to the `app` service endpoint and `/sidecar` to the `sidecarMonitor` service endpoint, we will configure -the `app` service path rules as the main service route and the `sidecarMonitor` as an additional path rule: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - additionalPaths: - - path: /sidecar - servicePort: 3000 -``` - -Now suppose you had a sidecar service that will return a fixed response indicating server maintainance and you want to -temporarily route all requests to that endpoint without taking down the pod. You can do this by creating a route that -catches all paths as a higher priority path using the `additionalPathsHigherPriority` input value. - -Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarFixedResponse` served on -port 3000: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - sidecarFixedResponse: - port: 3000 - targetPort: 3000 - protocol: TCP -``` - -To route all traffic to the fixed response port: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - additionalPathsHigherPriority: - - path: /* - servicePort: 3000 -``` - -The `/*` rule which routes to port 3000 will always be used even when accessing the path `/app` because it will be -evaluated first when routing requests. - -back to [root README](/README.adoc#day-to-day-operations) - -### How do I expose additional ports? - -By default, this Helm Chart will deploy your application container in a Pod that exposes ports 80. Sometimes you might -want to expose additional ports in your application - for example a separate port for Prometheus metrics. You can expose -additional ports for your application by overriding `containerPorts` and `service` input values: - -```yaml - -containerPorts: - http: - port: 80 - protocol: TCP - prometheus: - port: 2020 - protocol: TCP - -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - prometheus: - port: 2020 - targetPort: 2020 - protocol: TCP - -``` - - -## How do I deploy a worker service? - -Worker services typically do not have a RPC or web server interface to access it. Instead, worker services act on their -own and typically reach out to get the data they need. These services should be deployed without any ports exposed. -However, by default `k8s-service` will deploy an internally exposed service with port 80 open. - -To disable the default port, you can use the following `values.yaml` inputs: - -``` -containerPorts: - http: - disabled: true - -service: - enabled: false -``` - -This will override the default settings such that only the `Deployment` resource is created, with no ports exposed on -the container. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I check the status of the rollout? - -This Helm Chart packages your application into a `Deployment` controller. The `Deployment` controller will be -responsible with managing the `Pods` of your application, ensuring that the Kubernetes cluster matches the desired state -configured by the chart inputs. - -When the Helm Chart installs, `helm` will mark the installation as successful when the resources are created. Under the -hood, the `Deployment` controller will do the work towards ensuring the desired number of `Pods` are up and running. - -For example, suppose you set the `replicaCount` variable to 3 when installing this chart. This will configure the -`Deployment` resource to maintain 3 replicas of the `Pod` at any given time, launching new ones if there is a deficit or -removing old ones if there is a surplus. - -To see the current status of the `Deployment`, you can query Kubernetes using `kubectl`. The `Deployment` resource of -the chart are labeled with the `applicationName` input value and the release name provided by helm. So for example, -suppose you deployed this chart using the following `values.yaml` file and command: - -```yaml -applicationName: nginx -containerImage: - repository: nginx - tag: stable -``` - -```bash -$ helm install -n edge-service gruntwork/k8s-service -``` - -In this example, the `applicationName` is set to `nginx`, while the release name is set to `edge-service`. This chart -will then install a `Deployment` resource in the default `Namespace` with the following labels that uniquely identifies -it: - -``` -app.kubernetes.io/name: nginx -app.kubernetes.io/instance: edge-service -``` - -So now you can query Kubernetes for that `Deployment` resource using these labels to see the state: - -```bash -$ kubectl get deployments -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -edge-service-nginx 3 3 3 1 24s -``` - -This includes a few useful information: - -- `DESIRED` lists the number of `Pods` that should be running in your cluster. -- `CURRENT` lists how many `Pods` are currently created in the cluster. -- `UP-TO-DATE` lists how many `Pods` are running the desired image. -- `AVAILABLE` lists how many `Pods` are currently ready to serve traffic, as defined by the `readinessProbe`. - -When all the numbers are in sync and equal, that means the `Deployment` was rolled out successfully and all the `Pods` -are passing the readiness healthchecks. - -In the example output above, note how the `Available` count is `1`, but the others are `3`. This means that all 3 `Pods` -were successfully created with the latest image, but only `1` of them successfully came up. You can dig deeper into the -individual `Pods` to check the status of the unavailable `Pods`. The `Pods` are labeled the same way, so you can pass in -the same label query to get the `Pods` managed by the deployment: - -```bash -$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s -edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s -edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s -``` - -This will show you the status of each individual `Pod` in your deployment. In this example output, there are 2 `Pods` -that are in the `Pending` status, meaning that they have not been scheduled yet. We can look into why the `Pod` failed -to schedule by getting detailed information about the `Pod` with the `describe` command. Unlike `get pods`, `describe -pod` requires a single `Pod` so we will grab the name of one of the failing `Pods` above and feed it to `describe pod`: - -```bash -$ kubectl describe pod edge-service-nginx-844c978df7-mln26 -Name: edge-service-nginx-844c978df7-mln26 -Namespace: default -Priority: 0 -PriorityClassName: -Node: -Labels: app.kubernetes.io/instance=edge-service - app.kubernetes.io/name=nginx - gruntwork.io/app-name=nginx - pod-template-hash=4007534893 -Annotations: -Status: Pending -IP: -Controlled By: ReplicaSet/edge-service-nginx-844c978df7 -Containers: - nginx: - Image: nginx:stable - Ports: 80/TCP - Host Ports: 0/TCP - Environment: - Mounts: - /var/run/secrets/kubernetes.io/serviceaccount from default-token-mgkr9 (ro) -Conditions: - Type Status - PodScheduled False -Volumes: - default-token-mgkr9: - Type: Secret (a volume populated by a Secret) - SecretName: default-token-mgkr9 - Optional: false -QoS Class: BestEffort -Node-Selectors: -Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s - node.kubernetes.io/unreachable:NoExecute for 300s -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning FailedScheduling 1m (x25 over 3m) default-scheduler 0/2 nodes are available: 2 Insufficient pods. -``` - -This will output detailed information about the `Pod`, including an event log. In this case, the roll out failed because -there is not enough capacity in the cluster to schedule the `Pod`. - -back to [root README](/README.adoc#day-to-day-operations) - ## How do I set and share configurations with the application? While you can bake most application configuration values into the application container, you might need to inject @@ -728,10 +111,9 @@ documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configur Once the `ConfigMap` is created, you can access the `ConfigMap` within the `Pod` by configuring the access during deployment. This Helm Chart provides the `configMaps` input value to configure what `ConfigMaps` should be shared with -the application container. There are two ways to inject the `ConfigMap`: +the application container. With a single-standing Job there is one way to access a `ConfigMap`: - [Accessing the `ConfigMap` as Environment Variables](#accessing-the-configmap-as-environment-variables) -- [Accessing the `ConfigMap` as Files](#accessing-the-configmap-as-files) **NOTE**: It is generally not recommended to use `ConfigMaps` to store sensitive data. For those use cases, use `Secrets` or an external secret store. @@ -762,32 +144,7 @@ Note that like directly setting environment variables, these are set at containe need to be restarted when the `ConfigMap` is updated for the new values to be propagated. You can use files instead if you wish the `ConfigMap` changes to propagate immediately. -##### Accessing the ConfigMap as Files - -You can mount the `ConfigMap` values as files on the container filesystem. To do so, you set the `as` attribute of the -`configMaps` input value to `volume`. - -For example, suppose you wanted to share the `my-config` `ConfigMap` above as the files `/etc/db/host` and -`/etc/db/port`. For this case, you would set the `configMaps` input value to: - -```yaml -configMaps: - my-config: - as: volume - mountPath: /etc/db - items: - dbhost: - filePath: host - dbport: - filePath: port -``` - -In the container, now the values for `dbhost` is stored as a text file at the path `/etc/db/host` and `dbport` is stored -at the path `/etc/db/port`. You can then read these files in in your application to get the values. -Unlike environment variables, using files has the advantage of immediately reflecting changes to the `ConfigMap`. For -example, when you update `my-config`, the files at `/etc/db` are updated automatically with the new values, without -needing a redeployment to propagate the new values to the container. ### Using Secrets @@ -948,101 +305,6 @@ approach: back to [root README](/README.adoc#day-to-day-operations) -## How do you update the application to a new version? - -To update the application to a new version, you can upgrade the Helm Release using updated values. For example, suppose -you deployed `nginx` version 1.15.4 using this Helm Chart with the following values: - -```yaml -containerImage: - repository: nginx - tag: 1.15.4 - -applicationName: nginx -``` - -In this example, we will further assume that you deployed this chart with the above values using the release name -`edge-service`, using a command similar to below: - -```bash -$ helm install -f values.yaml --name edge-service gruntwork/k8s-service -``` - -Now let's try upgrading `nginx` to version 1.15.8. To do so, we will first update our values file: - -```yaml -containerImage: - repository: nginx - tag: 1.15.8 - -applicationName: nginx -``` - -The only difference here is the `tag` of the `containerImage`. - -Next, we will upgrade our release using the updated values. To do so, we will use the `helm upgrade` command: - -```bash -$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service -``` - -This will update the created resources with the new values provided by the updated `values.yaml` file. For this example, -the only resource that will be updated is the `Deployment` resource, which will now have a new `Pod` spec that points to -`nginx:1.15.8` as opposed to `nginx:1.15.4`. This automatically triggers a rolling deployment internally to Kubernetes, -which will launch new `Pods` using the latest image, and shut down old `Pods` once those are ready. - -You can read more about how changes are rolled out on `Deployment` resources in [the official -documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment). - -Note that certain changes will lead to a replacement of the `Deployment` resource. For example, updating the -`applicationName` will cause the `Deployment` resource to be deleted, and then created. This can lead to down time -because the resources are replaced in an uncontrolled fashion. - -## How do I create a canary deployment? - -You may optionally configure a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of an arbitrary tag that will run as an individual deployment behind your configured service. This is useful for ensuring a new application tag runs without issues prior to fully rolling it out. - -To configure a canary deployment, set `canary.enabled = true` and define the `containerImage` values. Typically, you will want to specify the tag of your next release candidate: - -```yaml -canary: - enabled: true - containerImage: - repository: nginx - tag: 1.15.9 -``` -Once deployed, your service will route traffic across both your stable and canary deployments, allowing you to monitor for and catch any issues early. - -back to [root README](/README.adoc#major-changes) - -## How do I verify my canary deployment? - -Canary deployment pods have the same name as your stable deployment pods, with the additional `-canary` appended to the end, like so: - -```bash -$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s -edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s -edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s -edge-service-nginx-canary-844c978df7-bsr8 0/1 Pending 0 52s -``` - -Therefore, in this example, you could monitor your canary by running `kubectl logs -f edge-service-nginx-canary-844c978df7-bsr8` - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I roll back a canary deployment? - -Update your values.yaml file, setting `canary.enabled = false` and then upgrade your helm installation: - -```bash -$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service -``` -Following this update, Kubernetes will determine that your canary deployment is no longer desired and will delete it. - -back to [root README](/README.adoc#day-to-day-operations) - ## How do I ensure a minimum number of Pods are available across node maintenance? Sometimes, you may want to ensure that a specific number of `Pods` are always available during [voluntary @@ -1059,83 +321,6 @@ documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#h back to [root README](/README.adoc#major-changes) -## Why does the Pod have a preStop hook with a Shutdown Delay? - -When a `Pod` is removed from a Kubernetes cluster, the control plane notifies all nodes to remove the `Pod` from -registered addresses. This includes removing the `Pod` from the list of available `Pods` to service a `Service` -endpoint. However, because Kubernetes is a distributed system, there is a delay between the shutdown sequence and the -`Pod` being removed from available addresses. As a result, the `Pod` could still get traffic despite it having already -been shutdown on the node it was running on. - -Since there is no way to guarantee that the deletion has propagated across the cluster, we address this eventual -consistency issue by adding an arbitrary delay between the `Pod` being deleted and the initiation of the `Pod` shutdown -sequence. This is accomplished by adding a `sleep` command in the `preStop` hook. - -You can control the length of time to delay with the `shutdownDelay` input value. You can also disable this behavior by -setting the `shutdownDelay` to 0. - -You can read more about this topic in [our blog post -"Delaying Shutdown to Wait for Pod Deletion -Propagation"](https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304). - - -back to [root README](/README.adoc#day-to-day-operations) - -## What is a sidecar container? - -In Kubernetes, `Pods` are one or more tightly coupled containers that are deployed together. The containers in the `Pod` -share, amongst other things, the network stack, the IPC namespace, and in some cases the PID namespace. You can read -more about the resources that the containers in a `Pod` share in [the official -documentation](https://kubernetes.io/docs/concepts/workloads/pods/pod/#what-is-a-pod). - -Sidecar Containers are additional containers that you wish to deploy in the `Pod` housing your application container. -This helm chart supports deploying these containers by configuring the `sideCarContainers` input value. This input value -is a map between the side car container name and the values of the container spec. The spec is rendered directly into -the `Deployment` resource, with the `name` being set to the key. For example: - -```yaml -sideCarContainers: - datadog: - image: datadog/agent:latest - env: - - name: DD_API_KEY - value: ASDF-1234 - - name: SD_BACKEND - value: docker - nginx: - image: nginx:1.15.4 -``` - -This input will be rendered in the `Deployment` resource as: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - ... Snipped for brevity ... -spec: - ... Snipped for brevity ... - template: - spec: - containers: - ... The first entry relates to the application ... - - name: datadog - image: datadog/agent:latest - env: - - name: DD_API_KEY - value: ASDF-1234 - - name: SD_BACKEND - value: docker - - name: nginx - image: nginx:1.15.4 -``` - -In this config, the side car containers are rendered as additional containers to deploy alongside the main application -container configured by the `containerImage`, `ports`, `livenessProbe`, etc input values. Note that the -`sideCarContainers` variable directly renders the spec, meaning that the additional values for the side cars such as -`livenessProbe` should be rendered directly within the `sideCarContainers` input value. - -back to [root README](/README.adoc#core-concepts) ## How do I use a private registry? diff --git a/charts/k8s-job/templates/_job_spec.tpl b/charts/k8s-job/templates/_job_spec.tpl index bfa70f47..b07645e6 100644 --- a/charts/k8s-job/templates/_job_spec.tpl +++ b/charts/k8s-job/templates/_job_spec.tpl @@ -84,10 +84,10 @@ metadata: app.kubernetes.io/name: {{ include "k8s-job.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- range $key, $value := .Values.additionalDeploymentLabels }} + {{- range $key, $value := .Values.additionalJobLabels }} {{ $key }}: {{ $value }} {{- end}} -{{- with .Values.deploymentAnnotations }} +{{- with .Values.jobAnnotations }} annotations: {{ toYaml . | indent 4 }} {{- end }} @@ -96,11 +96,6 @@ spec: matchLabels: app.kubernetes.io/name: {{ include "k8s-job.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} - {{- if .isCanary }} - gruntwork.io/deployment-type: canary - {{- else }} - gruntwork.io/deployment-type: main - {{- end }} template: metadata: labels: diff --git a/charts/k8s-job/values.yaml b/charts/k8s-job/values.yaml index 65a8adad..ed4a7953 100644 --- a/charts/k8s-job/values.yaml +++ b/charts/k8s-job/values.yaml @@ -151,3 +151,14 @@ customResources: # fullnameOverride is a string that allows overriding the default fullname that appears as the # application name and is used as the application name by kubernetes. fullnameOverride: "" + +# jobAnnotations will add the provided map to the annotations for the Job resource created by this chart. +# The keys and values are free form, but subject to the limitations of Kubernetes resource annotations. +# NOTE: This variable is injected directly into the Job spec. +jobAnnotations: {} + +# additionalJobLabels will add the provided map to the labels for the Job resource created by this chart. +# this is in addition to the helm template related labels created by the chart +# The keys and values are free form, but subject to the limitations of Kubernetes labelling. +# NOTE: This variable is injected directly into the Job spec. +additionalJobLabels: {} From b6fe8efcc3380b713e1ccd70aacabb8914b37e03 Mon Sep 17 00:00:00 2001 From: "N. G. Perrin" Date: Thu, 21 Apr 2022 17:50:54 -0400 Subject: [PATCH 6/8] Update test/k8s_job_template_test.go Co-authored-by: Rho <13165182+rhoboat@users.noreply.github.com> --- test/k8s_job_template_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/k8s_job_template_test.go b/test/k8s_job_template_test.go index 6c8f08ad..d4b6046b 100644 --- a/test/k8s_job_template_test.go +++ b/test/k8s_job_template_test.go @@ -81,7 +81,7 @@ func TestK8SJobOptionalValuesAreOptional(t *testing.T) { } // Test that annotations render correctly to annotate the Job resource -func TestK8SJobtAnnotationsRenderCorrectly(t *testing.T) { +func TestK8SJobAnnotationsRenderCorrectly(t *testing.T) { t.Parallel() uniqueID := random.UniqueId() From e2d36d074c613820307c64dc819e70a73657f7cb Mon Sep 17 00:00:00 2001 From: "N. G. Perrin" Date: Thu, 21 Apr 2022 17:51:00 -0400 Subject: [PATCH 7/8] Update test/k8s_job_test.go Co-authored-by: Rho <13165182+rhoboat@users.noreply.github.com> --- test/k8s_job_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/k8s_job_test.go b/test/k8s_job_test.go index 2056373e..f148369f 100644 --- a/test/k8s_job_test.go +++ b/test/k8s_job_test.go @@ -34,7 +34,7 @@ func TestK8SJobBusyboxExample(t *testing.T) { //os.Setenv("SKIP_setup", "true") //os.Setenv("SKIP_create_namespace", "true") //os.Setenv("SKIP_install", "true") - //os.Setenv("SKIP_validate_initial_deployment", "true") + //os.Setenv("SKIP_validate_job_deployment", "true") //os.Setenv("SKIP_upgrade", "true") //os.Setenv("SKIP_validate_upgrade", "true") //os.Setenv("SKIP_delete", "true") From 0293c566e5de969fad2075daa065d68d0963f7f0 Mon Sep 17 00:00:00 2001 From: Nathaniel Date: Thu, 21 Apr 2022 19:02:11 -0400 Subject: [PATCH 8/8] Commit end of day changes --- charts/k8s-job/templates/_job_spec.tpl | 2 -- test/k8s_job_template_test.go | 41 ++++++++------------------ 2 files changed, 13 insertions(+), 30 deletions(-) diff --git a/charts/k8s-job/templates/_job_spec.tpl b/charts/k8s-job/templates/_job_spec.tpl index b07645e6..46d2c26e 100644 --- a/charts/k8s-job/templates/_job_spec.tpl +++ b/charts/k8s-job/templates/_job_spec.tpl @@ -124,7 +124,6 @@ spec: {{- $tag := required ".Values.containerImage.tag is required" .Values.containerImage.tag }} image: "{{ $repo }}:{{ $tag }}" imagePullPolicy: {{ .Values.containerImage.pullPolicy | default "IfNotPresent" }} - {{- end }} {{- if .Values.containerCommand }} command: {{ toYaml .Values.containerCommand | indent 12 }} @@ -137,7 +136,6 @@ spec: {{- end}} resources: {{ toYaml .Values.containerResources | indent 12 }} - {{- end }} {{- /* START ENV VAR LOGIC */ -}} {{- if index $hasInjectionTypes "hasEnvVars" }} diff --git a/test/k8s_job_template_test.go b/test/k8s_job_template_test.go index d4b6046b..a92c30b6 100644 --- a/test/k8s_job_template_test.go +++ b/test/k8s_job_template_test.go @@ -85,7 +85,8 @@ func TestK8SJobAnnotationsRenderCorrectly(t *testing.T) { t.Parallel() uniqueID := random.UniqueId() - job := renderK8SJobWithSetValues(t, map[string]string{"jobAnnotations.unique-id": uniqueID}) + // ERROR: Need to find function that can inject annotations into a job + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{"jobAnnotations.unique-id": uniqueID}) assert.Equal(t, len(job.Annotations), 1) assert.Equal(t, job.Annotations["unique-id"], uniqueID) @@ -93,7 +94,7 @@ func TestK8SJobAnnotationsRenderCorrectly(t *testing.T) { func TestK8SJobSecurityContextAnnotationRenderCorrectly(t *testing.T) { t.Parallel() - job := renderK8SJobWithSetValues( + job := renderK8SServiceDeploymentWithSetValues( t, map[string]string{ "securityContext.privileged": "true", @@ -108,25 +109,11 @@ func TestK8SJobSecurityContextAnnotationRenderCorrectly(t *testing.T) { assert.Equal(t, *testContainer.SecurityContext.RunAsUser, int64(1000)) } -func TestK8SJobSecurityContextAnnotationRenderCorrectly(t *testing.T) { - t.Parallel() - - job := renderK8SJobWithSetValues( - t, - map[string]string{ - "allowPrivilegeEscalation": "false", - }, - ) - renderedContainerSpec := job.Spec.Template.Spec - assert.NotNil(t, renderedContainerSpec.SecurityContext) - assert.Equal(t, *renderedContainerSpec.SecurityContext.allowPrivilegeEscalation, bool(false)) -} - // Test that default imagePullSecrets do not render any func TestK8SJobNoImagePullSecrets(t *testing.T) { t.Parallel() - job := renderK8SJobWithSetValues( + job := renderK8SServiceDeploymentWithSetValues( t, map[string]string{}, ) @@ -138,7 +125,7 @@ func TestK8SJobNoImagePullSecrets(t *testing.T) { func TestK8SJobMultipleImagePullSecrets(t *testing.T) { t.Parallel() - job := renderK8SJobWithSetValues( + job := renderK8SServiceDeploymentWithSetValues( t, map[string]string{ "imagePullSecrets[0]": "docker-private-registry-key", @@ -152,13 +139,11 @@ func TestK8SJobMultipleImagePullSecrets(t *testing.T) { assert.Equal(t, renderedImagePullSecrets[1].Name, "gcr-registry-key") } -// Not-reviewed - // Test that omitting containerCommand does not set command attribute on the Job container spec. func TestK8SJobDefaultHasNullCommandSpec(t *testing.T) { t.Parallel() - job := renderK8SJobWithSetValues(t, map[string]string{}) + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{}) renderedContainers := job.Spec.Template.Spec.Containers require.Equal(t, len(renderedContainers), 1) appContainer := renderedContainers[0] @@ -169,7 +154,7 @@ func TestK8SJobDefaultHasNullCommandSpec(t *testing.T) { func TestK8SJobWithContainerCommandHasCommandSpec(t *testing.T) { t.Parallel() - job := renderK8SJobWithSetValues( + job := renderK8SServiceDeploymentWithSetValues( t, map[string]string{ "containerCommand[0]": "echo", @@ -184,7 +169,7 @@ func TestK8SJobWithContainerCommandHasCommandSpec(t *testing.T) { func TestK8SJobMainJobContainersLabeledCorrectly(t *testing.T) { t.Parallel() - job := renderK8SJobWithSetValues( + job := renderK8SServiceDeploymentWithSetValues( t, map[string]string{ "containerImage.repository": "nginx", @@ -199,7 +184,7 @@ func TestK8SJobAddingAdditionalLabels(t *testing.T) { t.Parallel() first_custom_job_label_value := "first-custom-value" second_custom_job_label_value := "second-custom-value" - job := renderK8SJobWithSetValues(t, + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{"additionalJobLabels.first-label": first_custom_job_label_value, "additionalJobLabels.second-label": second_custom_job_label_value}) @@ -212,7 +197,7 @@ func TestK8SJobFullnameOverride(t *testing.T) { overiddenName := "overidden-name" - job := renderK8SJobWithSetValues(t, + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{ "fullnameOverride": overiddenName, }, @@ -225,7 +210,7 @@ func TestK8SJobEnvFrom(t *testing.T) { t.Parallel() t.Run("BothConfigMapsAndSecretsEnvFrom", func(t *testing.T) { - job := renderK8SJobWithSetValues(t, + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{ "configMaps.test-configmap.as": "envFrom", "secrets.test-secret.as": "envFrom", @@ -239,7 +224,7 @@ func TestK8SJobEnvFrom(t *testing.T) { }) t.Run("OnlyConfigMapsEnvFrom", func(t *testing.T) { - job := renderK8SJobWithSetValues(t, + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{ "configMaps.test-configmap.as": "envFrom", }, @@ -251,7 +236,7 @@ func TestK8SJobEnvFrom(t *testing.T) { }) t.Run("OnlySecretsEnvFrom", func(t *testing.T) { - job := renderK8SJobWithSetValues(t, + job := renderK8SServiceDeploymentWithSetValues(t, map[string]string{ "secrets.test-secret.as": "envFrom", },