-
Notifications
You must be signed in to change notification settings - Fork 229
189 lines (183 loc) · 6.3 KB
/
e2e-rancher-upgrade-fleet-to-head-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
# Upgrade fleet in latest Rancher to dev version and run MC tests
name: E2E Upgrade Fleet in Rancher To HEAD
on:
schedule:
# Run everyday day at 1:00 PM
- cron: '0 13 * * *'
workflow_dispatch:
inputs:
ref:
description: "checkout git branch/tag"
required: true
default: "main"
push:
tags: [ 'v*' ]
paths-ignore:
- '*.md'
env:
GOARCH: amd64
CGO_ENABLED: 0
SETUP_K3D_VERSION: 'v5.7.1'
SETUP_K3S_VERSION: 'v1.30.2-k3s2'
jobs:
rancher-fleet-integration:
runs-on: ubuntu-latest
steps:
-
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ github.event.inputs.ref }}
-
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
check-latest: true
-
name: Install Ginkgo CLI
run: go install github.com/onsi/ginkgo/v2/ginkgo
-
uses: actions/cache@v4
id: rancher-cli-cache
with:
path: /home/runner/.local/bin
key: ${{ runner.os }}-rancher-cli-2.6.0
-
name: Install Rancher CLI
if: steps.rancher-cli-cache.outputs.cache-hit != 'true'
run: |
# download an older CLI to avoid https://github.com/rancher/rancher/issues/37574
mkdir -p /home/runner/.local/bin
wget -q https://github.com/rancher/cli/releases/download/v2.6.0/rancher-linux-amd64-v2.6.0.tar.gz
tar -xz --strip-components=2 -f rancher-linux-amd64-v2.6.0.tar.gz -C /home/runner/.local/bin
rancher --version
-
name: Build fleet binaries
run: |
./.github/scripts/build-fleet-binaries.sh
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Get uuid
id: uuid
run: echo "::set-output name=uuid::$(uuidgen)"
-
id: meta-fleet
uses: docker/metadata-action@v5
with:
images: |
ttl.sh/rancher/fleet-${{ steps.uuid.outputs.uuid }}
tags: type=raw,value=1h
-
uses: docker/build-push-action@v6
with:
context: .
file: package/Dockerfile
build-args: |
ARCH=${{ env.GOARCH }}
push: true
tags: ${{ steps.meta-fleet.outputs.tags }}
labels: ${{ steps.meta-fleet.outputs.labels }}
-
id: meta-fleet-agent
uses: docker/metadata-action@v5
with:
images: |
ttl.sh/rancher/fleet-agent-${{ steps.uuid.outputs.uuid }}
tags: type=raw,value=1h
-
uses: docker/build-push-action@v6
with:
context: .
file: package/Dockerfile.agent
build-args: |
ARCH=${{ env.GOARCH }}
push: true
tags: ${{ steps.meta-fleet-agent.outputs.tags }}
labels: ${{ steps.meta-fleet-agent.outputs.labels }}
-
name: Set up k3d control-plane cluster
uses: AbsaOSS/k3d-action@v2
with:
k3d-version: ${{ env.SETUP_K3D_VERSION }}
cluster-name: "upstream"
args: >-
-p "80:80@agent:0:direct"
-p "443:443@agent:0:direct"
--api-port 6443
--agents 1
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*'
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*'
--network "nw01"
--image docker.io/rancher/k3s:${{ env.SETUP_K3S_VERSION }}
-
name: Set up k3d downstream cluster
uses: AbsaOSS/k3d-action@v2
with:
k3d-version: ${{ env.SETUP_K3D_VERSION }}
cluster-name: "downstream"
args: >-
-p "81:80@agent:0:direct"
-p "444:443@agent:0:direct"
--api-port 6644
--agents 1
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*'
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*'
--network "nw01"
--image docker.io/rancher/k3s:${{ env.SETUP_K3S_VERSION }}
-
name: Set up latest Rancher
env:
public_hostname: "172.18.0.1.sslip.io"
run: |
./.github/scripts/setup-latest-rancher.sh
-
name: Register Rancher's downstream clusters
env:
public_hostname: "172.18.0.1.sslip.io"
run: |
./.github/scripts/wait-for-loadbalancer.sh
./.github/scripts/register-downstream-clusters.sh
# wait for cluster to settle
sleep 30
./.github/scripts/label-downstream-cluster.sh
-
name: Create example workload
run: |
kubectl apply -n fleet-local -f e2e/assets/fleet-upgrade/gitrepo-simple.yaml
kubectl apply -n fleet-default -f e2e/assets/fleet-upgrade/gitrepo-simple.yaml
# wait for bundle ready
until kubectl get bundles -n fleet-local test-simple-simple-chart -o=jsonpath='{.status.conditions[?(@.type=="Ready")].status}' | grep -q "True"; do sleep 3; done
until kubectl get bundles -n fleet-default test-simple-simple-chart -o=jsonpath='{.status.conditions[?(@.type=="Ready")].status}' | grep -q "True"; do sleep 3; done
-
name: Deploy development fleet
run: |
echo "${{ steps.meta-fleet.outputs.tags }} ${{ steps.meta-fleet-agent.outputs.tags }}"
./.github/scripts/upgrade-rancher-fleet-to-dev-fleet.sh ${{ steps.meta-fleet.outputs.tags }} ${{ steps.meta-fleet-agent.outputs.tags }}
-
name: E2E tests for examples
env:
FLEET_E2E_NS: fleet-local
FLEET_E2E_NS_DOWNSTREAM: fleet-default
run: |
kubectl config use-context k3d-upstream
ginkgo --github-output e2e/multi-cluster
-
name: Dump failed environment
if: failure()
run: |
./.github/scripts/dump-failed-k3ds.sh
-
name: Upload logs
uses: actions/upload-artifact@v4
if: failure()
with:
name: gha-fleet-rancher-logs-${{ github.sha }}-${{ github.run_id }}
path: |
tmp/*.json
tmp/*.log
retention-days: 2