forked from metal3-io/metal3-dev-env
-
Notifications
You must be signed in to change notification settings - Fork 0
/
04_verify.sh
executable file
·312 lines (267 loc) · 10.4 KB
/
04_verify.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
#!/usr/bin/env bash
set -u
# shellcheck disable=SC1091
source lib/logging.sh
# shellcheck disable=SC1091
source lib/common.sh
# shellcheck disable=SC1091
source lib/network.sh
# shellcheck disable=SC1091
source lib/images.sh
if [ "${EPHEMERAL_CLUSTER}" == "tilt" ]; then
exit 0
fi
check_bm_hosts() {
local FAILS_CHECK="${FAILS}"
local NAME ADDRESS USER PASSWORD MAC CRED_NAME CRED_SECRET \
BM_HOSTS BM_HOST BM_VMS BM_VMNAME BM_VM_IFACES
NAME="${1}"
ADDRESS="${2}"
USER="${3}"
PASSWORD="${4}"
MAC="${5}"
BM_HOSTS="$(kubectl --kubeconfig "${KUBECONFIG}" get baremetalhosts\
-n metal3 -o json)"
BM_VMS="$(sudo virsh list --all)"
BM_VMNAME="${NAME//-/_}"
# Verify BM host exists
RESULT_STR="${NAME} Baremetalhost exist"
echo "$BM_HOSTS" | grep -w "${NAME}" > /dev/null
process_status $?
BM_HOST="$(echo "${BM_HOSTS}" | \
jq ' .items[] | select(.metadata.name=="'"${NAME}"'" )')"
# Verify addresses of the host
RESULT_STR="${NAME} Baremetalhost address correct"
equals "$(echo "${BM_HOST}" | jq -r '.spec.bmc.address')" "${ADDRESS}"
RESULT_STR="${NAME} Baremetalhost mac address correct"
equals "$(echo "${BM_HOST}" | jq -r '.spec.bootMACAddress')" \
"${MAC}"
# Verify BM host status
RESULT_STR="${NAME} Baremetalhost status OK"
equals "$(echo "${BM_HOST}" | jq -r '.status.operationalStatus')" \
"OK"
# Verify credentials exist
RESULT_STR="${NAME} Baremetalhost credentials secret exist"
CRED_NAME="$(echo "${BM_HOST}" | jq -r '.spec.bmc.credentialsName')"
CRED_SECRET="$(kubectl get secret "${CRED_NAME}" -n metal3 -o json | \
jq '.data')"
process_status $?
# Verify credentials correct
RESULT_STR="${NAME} Baremetalhost password correct"
equals "$(echo "${CRED_SECRET}" | jq -r '.password' | \
base64 --decode)" "${PASSWORD}"
RESULT_STR="${NAME} Baremetalhost user correct"
equals "$(echo "${CRED_SECRET}" | jq -r '.username' | \
base64 --decode)" "${USER}"
# Verify the VM was created
RESULT_STR="${NAME} Baremetalhost VM exist"
echo "$BM_VMS "| grep -w "${BM_VMNAME}" > /dev/null
process_status $?
#Verify the VMs interfaces
BM_VM_IFACES="$(sudo virsh domiflist "${BM_VMNAME}")"
for bridge in ${BRIDGES}; do
RESULT_STR="${NAME} Baremetalhost VM interface ${bridge} exist"
echo "$BM_VM_IFACES" | grep -w "${bridge}" > /dev/null
process_status $?
done
#Verify the introspection completed successfully
RESULT_STR="${NAME} Baremetalhost introspecting completed"
is_in "$(echo "${BM_HOST}" | jq -r '.status.provisioning.state')" \
"ready available"
echo ""
return "$((FAILS-FAILS_CHECK))"
}
# Verify that a resource exists in a type
check_k8s_entity() {
local FAILS_CHECK="${FAILS}"
local ENTITY
local TYPE="${1}"
shift
for name in "${@}"; do
# Check entity exists
RESULT_STR="${TYPE} ${name} created"
NS="$(echo "${name}" | cut -d ':' -f1)"
NAME="$(echo "${name}" | cut -d ':' -f2)"
ENTITY="$(kubectl --kubeconfig "${KUBECONFIG}" get "${TYPE}" "${NAME}" \
-n "${NS}" -o json)"
process_status $?
# Check the replicabaremetalclusters
if [[ "${BMO_RUN_LOCAL}" != true ]] && [[ "${CAPM3_RUN_LOCAL}" != true ]]
then
RESULT_STR="${name} ${TYPE} replicas correct"
equals "$(echo "${ENTITY}" | jq -r '.status.readyReplicas')" \
"$(echo "${ENTITY}" | jq -r '.status.replicas')"
fi
done
return "$((FAILS-FAILS_CHECK))"
}
# Verify that a resource exists in a type
check_k8s_rs() {
local FAILS_CHECK="${FAILS}"
local ENTITY
for name in "${@}"; do
# Check entity exists
LABEL="$(echo "$name" | cut -f1 -d:)"
NAME="$(echo "$name" | cut -f2 -d:)"
NS="$(echo "${name}" | cut -d ':' -f3)"
NB="$(echo "${name}" | cut -d ':' -f4)"
ENTITIES="$(kubectl --kubeconfig "${KUBECONFIG}" get replicasets \
-l "${LABEL}"="${NAME}" -n "${NS}" -o json)"
NB_ENTITIES="$(echo "$ENTITIES" | jq -r '.items | length')"
RESULT_STR="Replica sets with label ${LABEL}=${NAME} created"
equals "${NB_ENTITIES}" "${NB}"
# Check the replicas
if [[ "${BMO_RUN_LOCAL}" != true ]] && [[ "${CAPM3_RUN_LOCAL}" != true ]]
then
for i in $(seq 0 $((NB_ENTITIES-1))); do
RESULT_STR="${NAME} replicas correct for replica set ${i}"
equals "$(echo "${ENTITIES}" | jq -r ".items[${i}].status.readyReplicas")" \
"$(echo "${ENTITIES}" | jq -r ".items[${i}].status.replicas")"
done
fi
done
return "$((FAILS-FAILS_CHECK))"
}
# Verify that a resource exists in a type
check_k8s_pods() {
local FAILS_CHECK="${FAILS}"
local ENTITY
local NS="${2:-metal3}"
for name in "${@}"; do
# Check entity exists
LABEL=$(echo "$name" | cut -f1 -d:);
NAME=$(echo "$name" | cut -f2 -d:);
ENTITY="$(kubectl --kubeconfig "${KUBECONFIG}" get pods \
-l "${LABEL}"="${NAME}" -n "${NS}" -o json | jq '.items[0]')"
RESULT_STR="Pod ${NAME} created"
differs "${ENTITY}" "null"
done
return "$((FAILS-FAILS_CHECK))"
}
# Verify a container is running
check_container(){
local NAME="$1"
RESULT_STR="Container ${NAME} running"
sudo "${CONTAINER_RUNTIME}" ps | grep -w "$NAME$" > /dev/null
process_status $?
return $?
}
KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/config}"
EXPTD_V1ALPHAX_CRDS="clusters.cluster.x-k8s.io \
kubeadmconfigs.bootstrap.cluster.x-k8s.io \
kubeadmconfigtemplates.bootstrap.cluster.x-k8s.io \
machinedeployments.cluster.x-k8s.io \
machines.cluster.x-k8s.io \
machinesets.cluster.x-k8s.io \
baremetalhosts.metal3.io"
EXPTD_V1ALPHA3_CRDS="metal3clusters.infrastructure.cluster.x-k8s.io \
metal3machines.infrastructure.cluster.x-k8s.io \
metal3machinetemplates.infrastructure.cluster.x-k8s.io"
EXPTD_V1ALPHA3_DEPLOYMENTS="capm3-system:capm3-controller-manager \
capi-system:capi-controller-manager \
capi-kubeadm-bootstrap-system:capi-kubeadm-bootstrap-controller-manager \
capi-kubeadm-control-plane-system:capi-kubeadm-control-plane-controller-manager \
capi-webhook-system:capi-controller-manager \
capi-webhook-system:capi-kubeadm-bootstrap-controller-manager \
capi-webhook-system:capi-kubeadm-control-plane-controller-manager \
capi-webhook-system:capm3-controller-manager \
baremetal-operator-system:baremetal-operator-controller-manager"
EXPTD_V1ALPHA3_RS="cluster.x-k8s.io/provider:infrastructure-metal3:capm3-system:1 \
cluster.x-k8s.io/provider:cluster-api:capi-system:1 \
cluster.x-k8s.io/provider:bootstrap-kubeadm:capi-kubeadm-bootstrap-system:1 \
cluster.x-k8s.io/provider:control-plane-kubeadm:capi-kubeadm-control-plane-system:1 \
cluster.x-k8s.io/provider:infrastructure-metal3:capi-webhook-system:1 \
cluster.x-k8s.io/provider:cluster-api:capi-webhook-system:1 \
cluster.x-k8s.io/provider:bootstrap-kubeadm:capi-webhook-system:1 \
cluster.x-k8s.io/provider:control-plane-kubeadm:capi-webhook-system:1 \
control-plane:controller-manager:baremetal-operator-system:1"
EXPTD_V1ALPHA4_DEPLOYMENTS="capm3-system:capm3-controller-manager \
capi-system:capi-controller-manager \
capi-kubeadm-bootstrap-system:capi-kubeadm-bootstrap-controller-manager \
capi-kubeadm-control-plane-system:capi-kubeadm-control-plane-controller-manager \
capi-webhook-system:capi-controller-manager \
capi-webhook-system:capi-kubeadm-bootstrap-controller-manager \
capi-webhook-system:capi-kubeadm-control-plane-controller-manager \
capi-webhook-system:capm3-controller-manager \
capm3-system:capm3-baremetal-operator-controller-manager"
EXPTD_V1ALPHA4_RS="cluster.x-k8s.io/provider:infrastructure-metal3:capm3-system:3 \
cluster.x-k8s.io/provider:cluster-api:capi-system:1 \
cluster.x-k8s.io/provider:bootstrap-kubeadm:capi-kubeadm-bootstrap-system:1 \
cluster.x-k8s.io/provider:control-plane-kubeadm:capi-kubeadm-control-plane-system:1 \
cluster.x-k8s.io/provider:infrastructure-metal3:capi-webhook-system:2 \
cluster.x-k8s.io/provider:cluster-api:capi-webhook-system:1 \
cluster.x-k8s.io/provider:bootstrap-kubeadm:capi-webhook-system:1 \
cluster.x-k8s.io/provider:control-plane-kubeadm:capi-webhook-system:1"
BRIDGES="provisioning baremetal"
EXPTD_CONTAINERS="httpd-infra registry vbmc sushy-tools"
FAILS=0
BMO_RUN_LOCAL="${BMO_RUN_LOCAL:-false}"
CAPM3_RUN_LOCAL="${CAPM3_RUN_LOCAL:-false}"
# Verify networking
for bridge in ${BRIDGES}; do
RESULT_STR="Network ${bridge} exists"
ip link show dev "${bridge}" > /dev/null
process_status $? "Network ${bridge} exists"
done
# Verify Kubernetes cluster is reachable
RESULT_STR="Kubernetes cluster reachable"
kubectl version > /dev/null
process_status $?
echo ""
# Verify that the CRDs exist
RESULT_STR="Fetch CRDs"
CRDS="$(kubectl --kubeconfig "${KUBECONFIG}" get crds)"
process_status $? "Fetch CRDs"
LIST_OF_CRDS=("${EXPTD_V1ALPHAX_CRDS}" "${EXPTD_V1ALPHA3_CRDS}")
# shellcheck disable=SC2068
for name in ${LIST_OF_CRDS[@]}; do
RESULT_STR="CRD ${name} created"
echo "${CRDS}" | grep -w "${name}" > /dev/null
process_status $?
done
echo ""
# Verify v1alpha3+ Operators, Deployments, Replicasets
if [ "${CAPM3_VERSION}" != "v1alpha3" ]; then
iterate check_k8s_entity deployments "${EXPTD_V1ALPHA4_DEPLOYMENTS}"
iterate check_k8s_rs "${EXPTD_V1ALPHA4_RS}"
else
iterate check_k8s_entity deployments "${EXPTD_V1ALPHA3_DEPLOYMENTS}"
iterate check_k8s_rs "${EXPTD_V1ALPHA3_RS}"
fi
# Verify the baremetal hosts
## Fetch the BM CRs
RESULT_STR="Fetch Baremetalhosts"
kubectl --kubeconfig "${KUBECONFIG}" get baremetalhosts -n metal3 -o json \
> /dev/null
process_status $?
## Fetch the VMs
RESULT_STR="Fetch Baremetalhosts VMs"
sudo virsh list --all > /dev/null
process_status $?
echo ""
## Verify
while read -r name address user password mac; do
iterate check_bm_hosts "${name}" "${address}" "${user}" \
"${password}" "${mac}"
echo ""
done <<< "$(list_nodes)"
# Verify that the operator are running locally
if [[ "${BMO_RUN_LOCAL}" == true ]]; then
RESULT_STR="Baremetal operator locally running"
pgrep "operator-sdk" > /dev/null 2> /dev/null
process_status $?
fi
if [[ "${CAPM3_RUN_LOCAL}" == true ]]; then
# shellcheck disable=SC2034
RESULT_STR="CAPI operator locally running"
pgrep -f "go run ./main.go" > /dev/null 2> /dev/null
process_status $?
fi
if [[ "${BMO_RUN_LOCAL}" == true ]] || [[ "${CAPM3_RUN_LOCAL}" == true ]]; then
echo ""
fi
for container in ${EXPTD_CONTAINERS}; do
iterate check_container "$container"
done
echo -e "\nNumber of failures : $FAILS"
exit "${FAILS}"