-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path.gitlab-ci.yml
246 lines (216 loc) · 6.97 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
image: dtzar/helm-kubectl:3.8.0
stages:
- build
- review
- check
- staging
- canary
- production
- cleanup
build:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --destination "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG"
only:
- master
- branches
review:
stage: review
script:
- kubectl config view
- kubectl config use-context $KUBE_CTX
- create_secret
- deploy
- >-
curl
-n
-X POST $SLACK_URL
--data-urlencode 'payload={"channel": "#blog", "text": "Review branch is available on '"$CI_ENVIRONMENT_URL"' for branch '"$CI_COMMIT_REF_NAME"'"}'
- echo "$CI_ENVIRONMENT_URL" > env-url
environment:
name: review/$CI_COMMIT_REF_NAME
url: https://$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
on_stop: stop_review
only:
refs:
- branches
except:
- master
artifacts:
paths:
- env-url
expire_in: 1 days
linkChecker:
stage: check
image:
name: ghcr.io/linkchecker/linkchecker
entrypoint: [""]
script:
- export ENV_URL=$(cat env-url)
- loopLinckChecker $ENV_URL
only:
refs:
- branches
except:
- master
stop_review:
stage: cleanup
variables:
GIT_STRATEGY: none
script:
- kubectl config use-context $KUBE_CTX
- mkdir -p ~/.kube/
- delete
environment:
name: review/$CI_COMMIT_REF_NAME
action: stop
when: manual
allow_failure: true
only:
refs:
- branches
except:
- master
# This job continuously deploys to production on every push to `master`.
# To make this a manual process, either because you're enabling `staging`
# or `canary` deploys, or you simply want more control over when you deploy
# to production, uncomment the `when: manual` line in the `production` job.
production:
stage: production
script:
- kubectl config use-context $KUBE_CTX
- mkdir -p ~/.kube/
- create_secret
- deploy
- delete canary
environment:
name: production-blog
url: https://$CI_PROJECT_PATH_SLUG.$AUTO_DEVOPS_DOMAIN
# when: manual
only:
refs:
- master
# ---------------------------------------------------------------------------
.auto_devops: &auto_devops |
# Auto DevOps variables and functions
[[ "$TRACE" ]] && set -x
export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
export CI_APPLICATION_TAG=$CI_COMMIT_SHA
export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID}
function deploy() {
track="${1-stable}"
name="$CI_ENVIRONMENT_SLUG"
if [[ "$track" != "stable" ]]; then
name="$name-$track"
fi
replicas="1"
service_enabled="false"
postgres_enabled="$POSTGRES_ENABLED"
# canary uses stable db
[[ "$track" == "canary" ]] && postgres_enabled="false"
env_track=$( echo $track | tr -s '[:lower:]' '[:upper:]' )
env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]' )
if [[ "$track" == "stable" ]]; then
# for stable track get number of replicas from `PRODUCTION_REPLICAS`
eval new_replicas=\$${env_slug}_REPLICAS
service_enabled="true"
else
# for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS`
eval new_replicas=\$${env_track}_${env_slug}_REPLICAS
fi
if [[ -n "$new_replicas" ]]; then
replicas="$new_replicas"
fi
echo "shizzle deploy $KUBE_NAMESPACE"
helm upgrade --debug --dry-run --install \
--wait \
--set service.enabled="$service_enabled" \
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
--set image.repository="$CI_APPLICATION_REPOSITORY" \
--set image.tag="$CI_APPLICATION_TAG" \
--set image.pullPolicy=IfNotPresent \
--set ingress.host="$CI_ENVIRONMENT_URL" \
--set nameOverride="$name" \
--namespace="$KUBE_NAMESPACE" \
--version="$CI_PIPELINE_ID-$CI_JOB_ID" \
"$name" \
helm/
helm upgrade --install \
--wait \
--set service.enabled="$service_enabled" \
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
--set image.repository="$CI_APPLICATION_REPOSITORY" \
--set image.tag="$CI_APPLICATION_TAG" \
--set image.pullPolicy=IfNotPresent \
--set ingress.host="$CI_ENVIRONMENT_URL" \
--set nameOverride="$name" \
--namespace="$KUBE_NAMESPACE" \
--version="$CI_PIPELINE_ID-$CI_JOB_ID" \
"$name" \
helm/
}
function create_secret() {
kubectl create secret -n "$KUBE_NAMESPACE" \
docker-registry gitlab-registry \
--docker-server="$CI_REGISTRY" \
--docker-username="$CI_DEPLOY_USER" \
--docker-password="$CI_DEPLOY_PASSWORD" \
--docker-email="$GITLAB_USER_EMAIL" \
-o yaml --dry-run | sed 's/dockercfg/dockerconfigjson/g' | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
}
function delete() {
track="${1-stable}"
name="$CI_ENVIRONMENT_SLUG"
if [[ "$track" != "stable" ]]; then
name="$name-$track"
fi
helm delete "$name" --namespace="$KUBE_NAMESPACE" || true
}
function loopLinckChecker() {
nrOfRetries=10
urlToCheck=$1
oneError="That's it. 1 link in 1 URL checked. 0 warnings found. 1 error found.";
counter=0
while [ $counter -lt $nrOfRetries ]
do
counter=`expr $counter + 1`
echo "--------- Start Loop ... number $counter ------"
doLinkCheck $urlToCheck
countOneErrorMesssages=$(grep -c "$oneError" linkchecker-out.txt)||:
count404ErrorMesssages=$(grep -c "$four404Error" linkchecker-out.txt)||:
if [ $countOneErrorMesssages -eq 1 -a $count404ErrorMesssages -gt 0 ];
then
echo "The site is not up yet! Wait for it..."
sleep 5s
echo "Lets try again."
else
echo "Links were checked"
echo "**********Linkchecker output******************"
cat linkchecker-out.txt
echo "***************************************"
succes=$(grep -c "^That's it.*0 errors found\." linkchecker-out.txt)
if [ $succes -eq 1 ];
then
exit 0
else
exit 1
fi
fi
echo "-----------Loop $counter END----------"
done
echo "Site $urlToCheck not available after $nrOfRetries retries"
exit 404
}
function doLinkCheck(){
urlToCheck=$1
# If running with set -o pipefail, a failure at any stage in a shell pipeline will cause the entire pipeline to be considered failed.
#In order to avoid this we explicitly ignore a single failure: { linkchecker xx || :; }
result=$( (linkchecker --config=linkcheckerrc/linkcheckerrc $urlToCheck) ||:)
echo "*********Checker done******************"
}
before_script:
- *auto_devops