Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add installation resources and script for preview (non-prod) environment #422

Merged
merged 12 commits into from
Feb 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 48 additions & 0 deletions install/000-install-dependency-operators.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#!/bin/bash

set -euo pipefail

CONSOLE_INSTALL_PATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)"
RESOURCE_PATH=${CONSOLE_INSTALL_PATH}/resources

export NAMESPACE="${1?Please provide the deployment namespace}"
source ${CONSOLE_INSTALL_PATH}/_common.sh

echo -e "${INFO} Create/update operator group in namespace '${NAMESPACE}'"
${YQ} '.spec.targetNamespaces[0] = strenv(NAMESPACE)' ${RESOURCE_PATH}/prometheus/console-operators.operatorgroup.yaml | ${KUBE} apply -n ${NAMESPACE} -f -

if ${KUBE} get packagemanifests amq-streams >/dev/null ; then
echo -e "${INFO} Create/update AMQ Streams Kafka Operator"
echo "apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: amq-streams-operator
spec:
name: amq-streams
channel: stable
source: redhat-operators
sourceNamespace: openshift-marketplace" | ${KUBE} apply -n ${NAMESPACE} -f -
else
echo -e "${INFO} Create/update Strimzi Kafka Operator"
echo "apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: strimzi-kafka-operator
spec:
name: strimzi-kafka-operator
channel: stable
source: community-operators
sourceNamespace: openshift-marketplace" | ${KUBE} apply -n ${NAMESPACE} -f -
fi

echo -e "${INFO} Create/update Prometheus Operator"
echo "apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: prometheus-operator
spec:
name: prometheus
channel: beta
source: community-operators
sourceNamespace: openshift-marketplace" \
| ${KUBE} apply -n ${NAMESPACE} -f -
28 changes: 28 additions & 0 deletions install/001-deploy-prometheus.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/bin/bash

set -euo pipefail

CONSOLE_INSTALL_PATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)"
RESOURCE_PATH=${CONSOLE_INSTALL_PATH}/resources

export NAMESPACE="${1?Please provide the deployment namespace}"
source ${CONSOLE_INSTALL_PATH}/_common.sh

if ! ${KUBE} get crd prometheuses.monitoring.coreos.com >/dev/null ; then
echo -e "${ERROR} Prometheus Operator custom resource(s) not found"
display_suggested_subscription "prometheus-operator" "prometheus"
exit 1
fi

### Prometheus
echo -e "${INFO} Apply Prometheus security resources"
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/prometheus/console-prometheus-server.clusterrole.yaml
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/prometheus/console-prometheus-server.serviceaccount.yaml
yq '.subjects[0].namespace = strenv(NAMESPACE)' ${RESOURCE_PATH}/prometheus/console-prometheus-server.clusterrolebinding.yaml | ${KUBE} apply -n ${NAMESPACE} -f -

echo -e "${INFO} Apply Prometheus PodMonitor and Kubernetes scrape configurations"
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/prometheus/kafka-resources.podmonitor.yaml
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/prometheus/kubernetes-scrape-configs.secret.yaml

echo -e "${INFO} Apply Prometheus instance"
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/prometheus/console-prometheus.prometheus.yaml
26 changes: 26 additions & 0 deletions install/002-deploy-console-kafka.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/bin/bash

set -euo pipefail

CONSOLE_INSTALL_PATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)"
RESOURCE_PATH=${CONSOLE_INSTALL_PATH}/resources/kafka

export NAMESPACE="${1?Please provide the deployment namespace}"
export CLUSTER_DOMAIN="${2?Please provide the base domain name for Kafka listener ingress}"

source ${CONSOLE_INSTALL_PATH}/_common.sh

${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/console-kafka-metrics.configmap.yaml

if [ "$(${KUBE} api-resources --api-group=route.openshift.io -o=name)" != "" ] ; then
LISTENER_TYPE='route'
else
LISTENER_TYPE='ingress'
fi

export LISTENER_TYPE

# Replace env variables
${YQ} '(.. | select(tag == "!!str")) |= envsubst(ne)' ${RESOURCE_PATH}/console-kafka.kafka.yaml | ${KUBE} apply -n ${NAMESPACE} -f -

${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/console-kafka-user1.kafkauser.yaml
94 changes: 94 additions & 0 deletions install/003-install-console.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
#!/bin/bash

set -euo pipefail

CONSOLE_INSTALL_PATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)"
RESOURCE_PATH=${CONSOLE_INSTALL_PATH}/resources

export NAMESPACE="${1?Please provide the deployment namespace}"

source ${CONSOLE_INSTALL_PATH}/_common.sh

if [ "$(${KUBE} api-resources --api-group=route.openshift.io -o=name 2>/dev/null)" == "" ] ; then
export CLUSTER_DOMAIN="${2?Please provide the base domain name for console ingress}"
fi

function fetch_available_packages {
local NAME_PATTERN="${1}"

for pm in $(${KUBE} get packagemanifests -o name | grep -Pe '^packagemanifest\.packages\.operators\.coreos\.com/('"${NAME_PATTERN}"')$') ; do
${KUBE} get $pm -o yaml | ${YQ} -o=json '{
"name": .status.packageName,
"channel": .status.defaultChannel,
"catalogSource": .status.catalogSource,
"catalogSourceNamespace": .status.catalogSourceNamespace
}'
done | ${YQ} ea -p=json '[.]' | ${YQ} -o=csv | tail -n +2
}

function display_suggested_subscription {
local OPERATOR_NAME="${1}"
local NAME_PATTERN="${2}"

local AVAILABLE_PKGS="$(fetch_available_packages "${NAME_PATTERN}")"
echo -e "${INFO} ${OPERATOR_NAME} may be installed by creating one of the following resources:"
COUNTER=0

while IFS=, read -r PKG_NAME PKG_CHANNEL PKG_CTLG_SRC PKG_CTLG_SRC_NS; do
COUNTER=$(( COUNTER + 1 ))
echo -e "${INFO} ----- Option ${COUNTER} -----"
echo "apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: ${OPERATOR_NAME}
namespace: ${NAMESPACE}
spec:
name: ${PKG_NAME}
channel: ${PKG_CHANNEL}
source: ${PKG_CTLG_SRC}
sourceNamespace: ${PKG_CTLG_SRC_NS}" | ${YQ}
done < <(echo "${AVAILABLE_PKGS}")
}

PROVIDED_APIS="$(${KUBE} get operatorgroup -n ${NAMESPACE} -o json | jq -r '.items[].metadata.annotations["olm.providedAPIs"]' | tr '[:upper:]' '[:lower:]')"

if ! ${KUBE} get crd kafkas.kafka.strimzi.io 1>/dev/null 2>&1 ; then
echo -e "${ERROR} Strimzi Kafka Operator custom resource(s) not found"
display_suggested_subscription "strimzi-kafka-operator" "strimzi-kafka-operator|amq-streams"
exit 1
fi

### Console
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/console/console-server.clusterrole.yaml
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/console/console-server.serviceaccount.yaml
${YQ} '.subjects[0].namespace = strenv(NAMESPACE)' ${RESOURCE_PATH}/console/console-server.clusterrolebinding.yaml | ${KUBE} apply -n ${NAMESPACE} -f -
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/console/console-ui.service.yaml

if ! ${KUBE} get secret console-ui-secrets -n ${NAMESPACE} 1>/dev/null 2>&1 ; then
echo -e "${INFO} Creating Console Credentials"
${KUBE} create secret generic console-ui-secrets -n ${NAMESPACE} \
--dry-run=client \
--from-literal=SESSION_SECRET="$(LC_CTYPE=C tr -dc A-Za-z0-9 </dev/urandom | head -c 32; echo)" \
--from-literal=NEXTAUTH_SECRET="$(LC_CTYPE=C tr -dc A-Za-z0-9 </dev/urandom | head -c 32; echo)" \
-o yaml | ${KUBE} apply -n ${NAMESPACE} -f -
else
echo -e "${WARN} Console Credential secret console-ui-secrets already exists, nothing applied"
fi

if ${KUBE} get deployment console -n ${NAMESPACE} 1>/dev/null 2>&1 ; then
${KUBE} scale --replicas=0 deployment/console -n ${NAMESPACE}
fi

if [ "$(${KUBE} api-resources --api-group=route.openshift.io -o=name)" != "" ] ; then
${KUBE} apply -n ${NAMESPACE} -f ${RESOURCE_PATH}/console/console-ui.route.yaml
CONSOLE_HOSTNAME=$(${KUBE} get route console-ui-route -n ${NAMESPACE} -o jsonpath='{.spec.host}')
else
CONSOLE_HOSTNAME="console-ui.${CLUSTER_DOMAIN}"
${YQ} '.spec.rules[0].host = strenv(CONSOLE_HOSTNAME)' ${RESOURCE_PATH}/console/console-ui.ingress.yaml | ${KUBE} apply -n ${NAMESPACE} -f -
fi

# Replace env variables
export CONSOLE_HOSTNAME
${YQ} '(.. | select(tag == "!!str")) |= envsubst(ne)' ${RESOURCE_PATH}/console/console.deployment.yaml | ${KUBE} apply -n ${NAMESPACE} -f -

echo -e "${INFO} Console deployed and available at https://${CONSOLE_HOSTNAME}"
93 changes: 93 additions & 0 deletions install/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
# Console Installation

This directory contains several resources that may be used directly or as examples for deploying the
console into a Kubernetes or OpenShift cluster. The scripts contained here may be run using the bash
shell and require that the `kubectl` (or `oc` for OpenShift) and `yq`[1] command line utilities
are available on the `PATH`.

## Prerequisites

### Strimzi & Prometheus

The console requires that the Strimzi Kafka Operator is installed and available in the cluster before
deployment. Strimzi may be installed either using Operator Lifecycle Manager (OLM, preferred) or directly
using Kubernetes resources provided by the Strimzi project.

Prometheus must also be installed and configured to scrape metrics from Kubernetes and Kafka clusters. The
Prometheus instance must be accessible to the console application using HTTP or HTTPS. If a Prometheus instance
is not available, the cluster metrics graphs on the Kafka cluster overview screens will be unavailable.

Users who do not previously have Strimzi and Promethus installed may use the `000-install-dependency-operators.sh`
and `001-deploy-prometheus.sh` scripts to bootstrap the environment. The scripts will install either the community-supported
or commercially supported (i.e. AMQ Streams) version of the two operators using OLM and deploy a Prometheus instance
configured to scrape metrics from any Kafka clusters deployed by Strimzi within the cluster.

```shell
000-install-dependency-operators.sh ${TARGET_NAMESPACE}
001-deploy-prometheus.sh ${TARGET_NAMESPACE}
```

### Apache Kafka Cluster

Once the two prerequisite components have been installed, the demo Kafka cluster may be created using the
`002-deploy-console-kafka.sh` script. This script will create a Strimzi `Kafka` custom resource as well as a
`KafkaUser` custom resource for a user to access the cluster. Additionally, the Kafka cluster will be configured via
a ConfigMap to export metrics in the way expected by the Prometheus instance created earlier.

### Authorization

In order to allow the necessary access for the console to function, a minimum level of authorization must be configured
for the principal in use for each Kafka cluster connection. While the definition of the permissions may vary depending
on the authorization framework in use (e.g. ACLs, Keycloak Authorization, OPA, or custom) the minimum required in terms
of ACL types are:

1. `DESCRIBE`, `DESCRIBE_CONFIGS` for the `CLUSTER` resource
1. `READ`, `DESCRIBE`, `DESCRIBE_CONFIGS` for all `TOPIC` resources
1. `READ`, `DESCRIBE` for all `GROUP` resources

## Installation

With the prerequisites met, the console can be deployed using the `003-install-console.sh` script. This script will
create the role, role binding, service account, services, and ingress (or route in OpenShift) necessary to run the console.
Finally, the console deployment is applied to the Kubernetes/OpenShift cluster. A link to access the application will
be printed to the script's output if no errors are encountered.

The configurations used by the console to connect to Kafka may be customized by altering the environment variables
for the `console-api` container in `resources/console/console.deployment.yaml`. The format used for the variables
is as follows.

Configurations that apply to all Kafka connections should use the format `KAFKA_CONFIG_WITH_UNDERSCORES`. For example,
if all clusters are configured to use `SASL_SSL` for the Kafka `security.protocol` property, you may set env
`KAFKA_SECURITY_PROTOCOL` to `SASL_SSL`.

Each individual cluster must be configured with a variable like `CONSOLE_KAFKA_CLUSTER1` where `CLUSTER1` is a unique
name or identifier for each cluster and the value of the env is the `${namespace}/${name}` of the `Kafka` CR that
represents the cluster.

Configurations that apply to an individual Kafka connection should use the format `CONSOLE_KAFKA_CLUSTER1_CONFIG_WITH_UNDERSCORES`.
Using the example above, if you would like to configure one cluster to use `SASL_SSL` for the Kafka `security.protocol` property,
the following env settings would be needed:

```yaml
- name: CONSOLE_KAFKA_EXAMPLE
value: example-ns/console-kafka
- name: CONSOLE_KAFKA_EXAMPLE_SECURITY_PROTOCOL
value: SASL_SSL
- name: CONSOLE_KAFKA_EXAMPLE_BOOTSTRAP_SERVERS
value: bootstrap.console-kafka.example.com:443
```

As always, configuration properties that contain sensitive information may be mounted from a `Secret`. For example, to
set the `sasl.jaas.config` property, you could use an env entry such as the following.

```yaml
- name: CONSOLE_KAFKA_EXAMPLE_SASL_JAAS_CONFIG
valueFrom:
secretKeyRef:
name: console-kafka-user1
key: sasl.jaas.config
```

## References

[1] yq [releases](https://github.com/mikefarah/yq/releases)
33 changes: 33 additions & 0 deletions install/_common.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
INFO="[ \033[38;5;33mINFO${NC} ]"
WARN="[ \033[38;5;208mWARN${NC} ]"
ERROR="[ \033[38;5;196mERROR${NC} ]"

KUBE="$(which oc 2>/dev/null || which kubectl 2>/dev/null)"

if [ "${KUBE}" == "" ] ; then
echo -e "${ERROR} Neither 'oc' or 'kubectl' command line utilities found on the PATH"
exit 1
fi

YQ="$(which yq 2>/dev/null)"

if [ "${YQ}" == "" ] ; then
echo -e "${ERROR} 'yq' command line utility found on the PATH"
exit 1
fi

if ${KUBE} get namespace/${NAMESPACE} >/dev/null 2>&1 ; then
echo -e "${INFO} Namespace '${NAMESPACE}' exists"
else
echo -e "${WARN} Namespace '${NAMESPACE}' not found... creating"
${KUBE} create namespace ${NAMESPACE} >/dev/null

if ${KUBE} get namespace/${NAMESPACE} >/dev/null 2>&1 ; then
echo -e "${INFO} Namespace '${NAMESPACE}' created"
else
echo -e "${WARN} Namespace '${NAMESPACE}' could not be created"
fi
fi
13 changes: 13 additions & 0 deletions install/resources/console/console-server.clusterrole.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: console-server
rules:
- verbs:
- get
- watch
- list
apiGroups:
- kafka.strimzi.io
resources:
- kafkas
12 changes: 12 additions & 0 deletions install/resources/console/console-server.clusterrolebinding.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: console-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: console-server
subjects:
- kind: ServiceAccount
name: console-server
namespace: ${NAMESPACE}
4 changes: 4 additions & 0 deletions install/resources/console/console-server.serviceaccount.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: console-server
23 changes: 23 additions & 0 deletions install/resources/console/console-ui.ingress.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: console-ui-ingress
annotations:
nginx.ingress.kubernetes.io/backend-protocol: HTTP
route.openshift.io/termination: edge
spec:
defaultBackend:
service:
name: console-ui
port:
number: 80
rules:
- host: ${CONSOLE_HOSTNAME}
http:
paths:
- pathType: ImplementationSpecific
backend:
service:
name: console-ui
port:
number: 80
13 changes: 13 additions & 0 deletions install/resources/console/console-ui.route.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: console-ui-route
spec:
to:
kind: Service
name: console-ui
weight: 100
tls:
termination: edge
insecureEdgeTerminationPolicy: Redirect
wildcardPolicy: None
Loading