forked from NVIDIA/cuda-quantum
-
Notifications
You must be signed in to change notification settings - Fork 0
183 lines (163 loc) · 6.29 KB
/
ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
on:
workflow_dispatch:
inputs:
cache_base:
required: false
type: string
description: 'The name of the branch to use as cache base.'
default: main
export_environment:
type: boolean
description: Export the build environment as tar artifact that can be imported with Docker.
# The GitHub application copy-pr-bot copies the source code for every pull request
# into the repository. Approving such upstream pushes effectively marks code as trusted,
# and is necessary to use the self-hosted NVIDIA runners.
push:
branches:
- "pull-request/[0-9]+"
name: CI # do not change name without updating workflow_run triggers
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
metadata:
name: Retrieve PR info
runs-on: ubuntu-latest
permissions:
pull-requests: read
outputs:
pull_request_number: ${{ steps.pr_info.outputs.pr_number }}
pull_request_base: ${{ steps.pr_info.outputs.pr_base }}
cache_base: ${{ steps.pr_info.outputs.pr_base }}
steps:
- id: pr_info
run: |
pr_number=`echo ${{ github.ref_name }} | grep pull-request/ | (grep -o [0-9]* || true)`
pr_number=${pr_number:-${{ github.event.pull_request.number }}}
if [ -n "$pr_number" ]; then
pr_base=`gh pr view $pr_number -R ${{ github.repository }} --json baseRefName --jq .baseRefName`
echo "pr_number=$pr_number" >> $GITHUB_OUTPUT
echo "pr_base=$pr_base" >> $GITHUB_OUTPUT
fi
env:
GH_TOKEN: ${{ github.token }}
devdeps:
name: Load dependencies
needs: metadata
strategy:
matrix:
platform: [amd64, arm64]
toolchain: [llvm, clang16, gcc12]
fail-fast: false
uses: ./.github/workflows/dev_environment.yml
with:
platforms: linux/${{ matrix.platform }}
dockerfile: build/devdeps.Dockerfile
toolchain: ${{ matrix.toolchain }}
registry_cache_from: ${{ inputs.cache_base || needs.metadata.outputs.cache_base }}
# needed only for the cloudposse GitHub action
matrix_key: ${{ matrix.platform }}-${{ matrix.toolchain }}
wheeldeps:
name: Load wheel dependencies
needs: metadata
strategy:
matrix:
platform: [amd64, arm64]
fail-fast: false
uses: ./.github/workflows/dev_environment.yml
with:
platforms: linux/${{ matrix.platform }}
dockerfile: build/devdeps.manylinux.Dockerfile
toolchain: gcc11
build_args: |
manylinux_image=manylinux_2_28
arch=${{ (matrix.platform == 'arm64' && 'aarch64') || (matrix.platform == 'amd64' && 'x86_64') || 'any' }}
distro=rhel8
registry_cache_from: ${{ inputs.cache_base || needs.metadata.outputs.cache_base }}
# needed only for the cloudposse GitHub action
matrix_key: ${{ matrix.platform }}-python
# This job is needed only when using the cloudposse GitHub action to read
# the output of a matrix job. This is a workaround due to current GitHub
# limitations that may not be needed if the work started here concludes:
# https://github.com/actions/runner/pull/2477
config:
name: Configure build
runs-on: ubuntu-latest
needs: [devdeps, wheeldeps]
outputs:
json: "${{ steps.read_json.outputs.result }}"
steps:
- uses: cloudposse/[email protected]
id: read_json
with:
matrix-step-name: dev_environment
build_and_test:
name: Build and test
needs: config
strategy:
matrix:
platform: [amd64, arm64]
toolchain: [llvm, clang16, gcc12]
fail-fast: false
uses: ./.github/workflows/test_in_devenv.yml
with:
platform: linux/${{ matrix.platform }}
devdeps_cache: ${{ fromJson(needs.config.outputs.json).cache_key[format('{0}-{1}', matrix.platform, matrix.toolchain)] }}
devdeps_archive: ${{ fromJson(needs.config.outputs.json).tar_archive[format('{0}-{1}', matrix.platform, matrix.toolchain)] }}
export_environment: ${{ github.event_name == 'workflow_dispatch' && inputs.export_environment }}
docker_image:
name: Create Docker images
needs: config
strategy:
matrix:
platform: [amd64, arm64]
fail-fast: false
uses: ./.github/workflows/docker_images.yml
with:
platforms: linux/${{ matrix.platform }}
devdeps_cache: ${{ fromJson(needs.config.outputs.json).cache_key[format('{0}-llvm', matrix.platform)] }}
devdeps_archive: ${{ fromJson(needs.config.outputs.json).tar_archive[format('{0}-llvm', matrix.platform)] }}
python_wheels:
name: Create Python wheels
needs: config
strategy:
matrix:
platform: [amd64, arm64]
python_version: ['3.9', '3.11']
fail-fast: false
uses: ./.github/workflows/python_wheels.yml
with:
platform: linux/${{ matrix.platform }}
python_version: ${{ matrix.python_version }}
devdeps_cache: ${{ fromJson(needs.config.outputs.json).cache_key[format('{0}-python', matrix.platform)] }}
devdeps_archive: ${{ fromJson(needs.config.outputs.json).tar_archive[format('{0}-python', matrix.platform)] }}
clean_up:
name: Prepare cache clean-up
runs-on: ubuntu-latest
needs: [metadata, config, build_and_test, docker_image, wheeldeps, python_wheels]
# We need to clean up even if the workflow is cancelled or fails.
if: always()
steps:
- name: Save cache keys and metadata
id: workflow_inputs
run: |
set -e
key_matrix='${{ needs.config.outputs.json }}'
keys=`echo $key_matrix | jq '.cache_key | to_entries | .[].value' --raw-output`
echo "$keys" >> cache_keys.txt
echo "pr-number: ${{ needs.metadata.outputs.pull_request_number }}" >> metadata.txt
echo "pr-base: ${{ needs.metadata.outputs.pull_request_base }}" >> metadata.txt
- name: Upload cache keys
uses: actions/upload-artifact@v3
with:
name: cache_keys_ci
path: cache_keys.txt
retention-days: 1
if-no-files-found: error
- name: Upload metadata
uses: actions/upload-artifact@v3
with:
name: metadata_ci
path: metadata.txt
retention-days: 1
if-no-files-found: error