forked from vdesabou/kafka-docker-playground
-
Notifications
You must be signed in to change notification settings - Fork 1
/
start.sh
executable file
·120 lines (102 loc) · 8.7 KB
/
start.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${DIR}/../../scripts/utils.sh
verify_memory
verify_installed "docker-compose"
DOCKER_COMPOSE_FILE_OVERRIDE=$1
# Starting kerberos,
# Avoiding starting up all services at the begining to generate the keytab first
if [ -f "${DOCKER_COMPOSE_FILE_OVERRIDE}" ]
then
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml -f ${DOCKER_COMPOSE_FILE_OVERRIDE} down -v
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml -f ${DOCKER_COMPOSE_FILE_OVERRIDE} build kdc
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml -f ${DOCKER_COMPOSE_FILE_OVERRIDE} build client
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml -f ${DOCKER_COMPOSE_FILE_OVERRIDE} up -d kdc
else
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml down -v
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml build kdc
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml build client
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml up -d kdc
fi
### Create the required identities:
# Kafka service principal:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/[email protected]" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/[email protected]" > /dev/null
# Zookeeper service principal:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zookeeper/[email protected]" > /dev/null
# Create a principal with which to connect to Zookeeper from brokers - NB use the same credential on all brokers!
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey [email protected]" > /dev/null
# Create client principals to connect in to the cluster:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey [email protected]" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer/[email protected]" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey [email protected]" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey [email protected]" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey [email protected]" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey [email protected]" > /dev/null
# Create an admin principal for the cluster, which we'll use to setup ACLs.
# Look after this - its also declared a super user in broker config.
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey admin/[email protected]" > /dev/null
# Create keytabs to use for Kafka
log "Create keytabs"
docker exec -ti kdc rm -f /var/lib/secret/broker.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/broker2.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/zookeeper.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/zookeeper-client.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-client.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-admin.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-connect.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-schemaregistry.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-controlcenter.key 2>&1 > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/broker.key -norandkey kafka/[email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/broker2.key -norandkey kafka/[email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/zookeeper.key -norandkey zookeeper/[email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/zookeeper-client.key -norandkey [email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey [email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_producer/[email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey [email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-admin.key -norandkey admin/[email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-connect.key -norandkey [email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-schemaregistry.key -norandkey [email protected] " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-controlcenter.key -norandkey [email protected] " > /dev/null
if [[ "$TAG" == *ubi8 ]] || version_gt $TAG_BASE "5.9.0"
then
# https://github.com/vdesabou/kafka-docker-playground/issues/10
# keytabs are created on kdc with root user
# ubi8 images are using appuser user
# starting from 6.0, all images are ubi8
docker exec -ti kdc chmod a+r /var/lib/secret/broker.key
docker exec -ti kdc chmod a+r /var/lib/secret/broker2.key
docker exec -ti kdc chmod a+r /var/lib/secret/zookeeper.key
docker exec -ti kdc chmod a+r /var/lib/secret/zookeeper-client.key
docker exec -ti kdc chmod a+r /var/lib/secret/kafka-client.key
docker exec -ti kdc chmod a+r /var/lib/secret/kafka-admin.key
docker exec -ti kdc chmod a+r /var/lib/secret/kafka-connect.key
docker exec -ti kdc chmod a+r /var/lib/secret/kafka-schemaregistry.key
docker exec -ti kdc chmod a+r /var/lib/secret/kafka-controlcenter.key
fi
# Starting zookeeper and kafka now that the keytab has been created with the required credentials and services
if [ -f "${DOCKER_COMPOSE_FILE_OVERRIDE}" ]
then
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml -f ${DOCKER_COMPOSE_FILE_OVERRIDE} up -d
else
docker-compose -f ../../environment/plaintext/docker-compose.yml -f ../../environment/kerberos/docker-compose.yml up -d
fi
if [ "$#" -ne 0 ]
then
shift
fi
../../scripts/wait-for-connect-and-controlcenter.sh $@
# Adding ACLs for consumer and producer user:
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server broker:9092 --command-config /etc/kafka/command.properties --add --allow-principal User:kafka_producer --producer --topic=*"
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server broker:9092 --command-config /etc/kafka/command.properties --add --allow-principal User:kafka_consumer --consumer --topic=* --group=*"
# Adding ACLs for connect user:
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server broker:9092 --command-config /etc/kafka/command.properties --add --allow-principal User:connect --consumer --topic=* --group=*"
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server broker:9092 --command-config /etc/kafka/command.properties --add --allow-principal User:connect --producer --topic=*"
# schemaregistry and controlcenter is super user
# Output example usage:
log "-----------------------------------------"
log "Example configuration to access kafka:"
log "-----------------------------------------"
log "-> docker-compose exec client bash -c 'kinit -k -t /var/lib/secret/kafka-client.key kafka_producer && kafka-console-producer --broker-list broker:9092 --topic test --producer.config /etc/kafka/producer.properties'"
log "-> docker-compose exec client bash -c 'kinit -k -t /var/lib/secret/kafka-client.key kafka_consumer && kafka-console-consumer --bootstrap-server broker:9092 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning'"