forked from iree-org/iree
-
Notifications
You must be signed in to change notification settings - Fork 11
296 lines (281 loc) · 15.6 KB
/
benchmark.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
# Copyright 2024 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
name: Benchmark
on:
workflow_dispatch:
pull_request:
push:
branches:
- main
concurrency:
# A PR number if a pull request and otherwise the commit hash. This cancels
# queued and in-progress runs for the same PR (presubmit) or commit
# (postsubmit). The workflow name is prepended to avoid conflicts between
# different workflows.
group: ${{ github.workflow }}-${{ github.event.number || github.sha }}
cancel-in-progress: true
env:
# This needs to be in env instead of the outputs of setup because it contains
# the run attempt and we want that to be the current attempt, not whatever
# attempt the setup step last ran in.
GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
jobs:
setup:
uses: ./.github/workflows/setup.yml
build_for_benchmarks:
needs: setup
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'build_for_benchmarks') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/build_all.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
write-caches: ${{ needs.setup.outputs.write-caches }}
run-tests: false
build_benchmark_tools:
needs: [setup, build_for_benchmarks]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'build_benchmark_tools') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/build_benchmark_tools.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
install-dir: ${{ needs.build_for_benchmarks.outputs.install-dir }}
install-dir-archive: ${{ needs.build_for_benchmarks.outputs.install-dir-archive }}
install-dir-gcs-artifact: ${{ needs.build_for_benchmarks.outputs.install-dir-gcs-artifact }}
build_e2e_test_artifacts:
needs: [setup, build_for_benchmarks]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'build_e2e_test_artifacts') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/build_e2e_test_artifacts.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
install-dir: ${{ needs.build_for_benchmarks.outputs.install-dir }}
install-dir-archive: ${{ needs.build_for_benchmarks.outputs.install-dir-archive }}
install-dir-gcs-artifact: ${{ needs.build_for_benchmarks.outputs.install-dir-gcs-artifact }}
benchmark-presets: ${{ needs.setup.outputs.benchmark-presets }}
shard-count: "c2-standard-60=2,default=1"
test_benchmark_suites:
needs: [setup, build_for_benchmarks, build_e2e_test_artifacts]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'test_benchmark_suites') && needs.setup.outputs.benchmark-presets != ''
strategy:
matrix:
target:
- platform: linux
arch: riscv_64
docker_image: "gcr.io/iree-oss/riscv@sha256:62e87bad3405d691ddba6f9be0ef44eeb60461a467c8d86f0842c81a1f97da79"
run_scripts: "./build_tools/cmake/build_riscv.sh && ./build_tools/cmake/test_riscv.sh"
- platform: linux
arch: riscv_32
docker_image: "gcr.io/iree-oss/riscv@sha256:62e87bad3405d691ddba6f9be0ef44eeb60461a467c8d86f0842c81a1f97da79"
run_scripts: "./build_tools/cmake/build_riscv.sh && ./build_tools/cmake/test_riscv.sh"
- platform: linux
arch: x86_64
docker_image: "gcr.io/iree-oss/base@sha256:dc314b4fe30fc1315742512891357bffed4d1b62ffcb46258b1e0761c737b446"
run_scripts: "./build_tools/cmake/test_benchmark_suites_on_linux.sh"
# Requires Intel CascadeLake CPU.
host_machine: c2s601t
runs-on:
- self-hosted # must come first
- runner-group=${{ needs.setup.outputs.runner-group }}
- environment=${{ needs.setup.outputs.runner-env }}
- ${{ matrix.target.host_machine || 'cpu' }} # Default to generic x86_64 VM.
env:
PLATFORM: ${{ matrix.target.platform }}
ARCH: ${{ matrix.target.arch }}
DOCKER_IMAGE: ${{ matrix.target.docker_image }}
RUN_SCRIPTS: ${{ matrix.target.run_scripts }}
INSTALL_DIR: ${{ needs.build_for_benchmarks.outputs.install-dir }}
INSTALL_DIR_ARCHIVE: ${{ needs.build_for_benchmarks.outputs.install-dir-archive }}
INSTALL_DIR_GCS_ARTIFACT: ${{ needs.build_for_benchmarks.outputs.install-dir-gcs-artifact }}
TARGET_BUILD_DIR: build-${{ matrix.target.platform }}-${{ matrix.target.arch }}
E2E_TEST_ARTIFACTS_DIR: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
steps:
- name: "Checking out repository"
uses: actions/[email protected]
- name: "Checking out runtime submodules"
run: ./build_tools/scripts/git/update_runtime_submodules.sh
- name: "Downloading install dir archive"
run: gcloud storage cp "${INSTALL_DIR_GCS_ARTIFACT}" "${INSTALL_DIR_ARCHIVE}"
- name: "Extracting install directory"
run: tar -xf "${INSTALL_DIR_ARCHIVE}"
# TODO(#11136): Only download the needed artifacts instead of everything.
- name: "Downloading e2e test artifacts"
run: |
mkdir -p ${E2E_TEST_ARTIFACTS_DIR}
gcloud storage cp -r "${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}/*" "${E2E_TEST_ARTIFACTS_DIR}"
- name: "Build iree-run-module and test benchmark suite modules"
run: |
./build_tools/github_actions/docker_run.sh \
--env "IREE_TARGET_PLATFORM=${PLATFORM}" \
--env "IREE_TARGET_ARCH=${ARCH}" \
--env "IREE_TARGET_BUILD_DIR=${TARGET_BUILD_DIR}" \
--env "BUILD_PRESET=benchmark-suite-test" \
--env "IREE_HOST_BIN_DIR=${INSTALL_DIR}/bin" \
--env "E2E_TEST_ARTIFACTS_DIR=${E2E_TEST_ARTIFACTS_DIR}" \
"${DOCKER_IMAGE}" \
bash -euo pipefail -c \
"${RUN_SCRIPTS}"
compilation_benchmarks:
needs: [setup, build_e2e_test_artifacts]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'compilation_benchmarks') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/benchmark_compilation.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
e2e-test-artifacts-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
e2e-test-artifacts-gcs-artifact-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
e2e-test-artifacts-build-log: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log }}
e2e-test-artifacts-build-log-gcs-artifact: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log-gcs-artifact }}
execution_benchmarks:
needs: [setup, build_benchmark_tools, build_e2e_test_artifacts]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'execution_benchmarks') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/benchmark_execution.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
e2e-test-artifacts-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
e2e-test-artifacts-gcs-artifact-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
benchmark-tools-gcs-artifact-dir: ${{ needs.build_benchmark_tools.outputs.benchmark-tools-gcs-artifact-dir }}
process_benchmark_results:
needs: [setup, compilation_benchmarks, execution_benchmarks]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'process_benchmark_results') && needs.setup.outputs.benchmark-presets != ''
runs-on:
- self-hosted # must come first
- runner-group=${{ needs.setup.outputs.runner-group }}
- environment=${{ needs.setup.outputs.runner-env }}
- cpu
- os-family=Linux
env:
COMPILE_STATS_RESULTS: ${{ needs.compilation_benchmarks.outputs.compile-stats-results }}
COMPILE_STATS_RESULTS_GCS_ARTIFACT: ${{ needs.compilation_benchmarks.outputs.compile-stats-results-gcs-artifact }}
# Empty if no execution benchmark runs.
EXECUTION_BENCHMARK_RESULTS_DIR: ${{ needs.execution_benchmarks.outputs.benchmark-results-dir }}
# Empty if no execution benchmark runs.
EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR: ${{ needs.execution_benchmarks.outputs.benchmark-results-gcs-artifact-dir }}
steps:
- name: "Checking out repository"
uses: actions/[email protected]
with:
# We need the full history (and main branch) to generate the report.
fetch-depth: 0
- name: Downloading compilation benchmark results
run: |
gcloud storage cp \
"${COMPILE_STATS_RESULTS_GCS_ARTIFACT}" \
"${COMPILE_STATS_RESULTS}"
- name: Downloading execution benchmark results
id: download-execution-results
# Skip the download if there is no execution benchmark results (e.g. no
# benchmark matches the preset/filter). In such case, no benchmark job
# is run in benchmark_execution.yml and the output variables are empty.
if: env.EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR != ''
run: |
gcloud storage cp -r \
"${EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR}/benchmark-results-*.json" \
"${EXECUTION_BENCHMARK_RESULTS_DIR}"
echo "execution-benchmark-results-pattern=${EXECUTION_BENCHMARK_RESULTS_DIR}/benchmark-results-*.json" >> "${GITHUB_OUTPUT}"
- name: Generating comment
if: fromJson(needs.setup.outputs.is-pr)
id: generate-comment
env:
# Wildcard pattern to match all execution benchmark results. Empty if
# execution_benchmarks is skipped, which results in no match.
EXECUTION_BENCHMARK_RESULTS_PATTERN: ${{ steps.download-execution-results.outputs.execution-benchmark-results-pattern }}
IREE_BUILD_URL: https://github.com/iree-org/iree/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }}
PR_NUMBER: ${{ github.event.pull_request.number }}
BENCHMARK_COMMENT_ARTIFACT: benchmark-comment.json
run: |
build_tools/github_actions/docker_run.sh \
gcr.io/iree-oss/benchmark-report@sha256:7498c6f32f63f13faf085463cc38656d4297519c824e63e1c99c8c258147f6ff \
./build_tools/benchmarks/generate_benchmark_comment.py \
--verbose \
--pr_number="${PR_NUMBER}" \
--pr_committish="${GITHUB_SHA}" \
--pr_base_branch="origin/${GITHUB_BASE_REF}" \
--comment_type="benchmark-summary" \
--build_url="${IREE_BUILD_URL}" \
--benchmark_files="${EXECUTION_BENCHMARK_RESULTS_PATTERN}" \
--compile_stats_files="${COMPILE_STATS_RESULTS}" \
--output="${BENCHMARK_COMMENT_ARTIFACT}"
echo "benchmark-comment-artifact=${BENCHMARK_COMMENT_ARTIFACT}" >> "${GITHUB_OUTPUT}"
- name: Uploading comment artifact
# Due to security reasons, instead of posting the comment to PR, we only
# upload the comment data in presubmit workflow and trigger the posting
# workflow on the main branch. See post_benchmark_comment.yaml
if: fromJson(needs.setup.outputs.is-pr)
env:
BENCHMARK_COMMENT_ARTIFACT: ${{ steps.generate-comment.outputs.benchmark-comment-artifact }}
BENCHMARK_COMMENT_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.generate-comment.outputs.benchmark-comment-artifact }}
run: |
gcloud storage cp \
"${BENCHMARK_COMMENT_ARTIFACT}" \
"${BENCHMARK_COMMENT_GCS_ARTIFACT}"
- name: Uploading results to dashboard
if: github.ref_name == 'main'
env:
EXECUTION_BENCHMARK_RESULTS_PATTERN: ${{ steps.download-execution-results.outputs.execution-benchmark-results-pattern }}
IREE_DASHBOARD_API_TOKEN: ${{ secrets.IREE_DASHBOARD_API_TOKEN }}
run: |
build_tools/github_actions/docker_run.sh \
--env "IREE_DASHBOARD_API_TOKEN=${IREE_DASHBOARD_API_TOKEN}" \
gcr.io/iree-oss/benchmark-report@sha256:7498c6f32f63f13faf085463cc38656d4297519c824e63e1c99c8c258147f6ff \
./build_tools/benchmarks/upload_benchmarks_to_dashboard.py \
--verbose \
--benchmark_files="${EXECUTION_BENCHMARK_RESULTS_PATTERN}" \
--compile_stats_files="${COMPILE_STATS_RESULTS}"
##############################################################################
# Depends on all the other jobs to provide a single anchor that indicates the
# final status. Status reporting will become more sophisticated in the future
# and we can hopefully avoid the need to explicitly list every single job...
benchmark_summary:
# Even if you have an explicit if condition, you still need to override
# GitHub's default behavior of not running if any dependencies failed.
if: always()
runs-on: ubuntu-20.04
needs:
- setup
- build_for_benchmarks
# Benchmark pipeline
- build_benchmark_tools
- build_e2e_test_artifacts
- test_benchmark_suites
- compilation_benchmarks
- execution_benchmarks
- process_benchmark_results
steps:
- name: "Checking out repository"
uses: actions/[email protected]
- name: Getting failed jobs
id: failed_jobs
run: |
echo '${{ toJson(needs) }}'
FAILED_JOBS="$(echo '${{ toJson(needs) }}' \
| jq --raw-output \
'map_values(select(.result!="success" and .result!="skipped")) | keys | join(",")' \
)"
echo "failed-jobs=${FAILED_JOBS}" >> $GITHUB_OUTPUT
if [[ "${FAILED_JOBS}" != "" ]]; then
echo "The following jobs failed: ${FAILED_JOBS}"
exit 1
fi
- name: Show useful artifact links
if: always()
env:
# If the job of an artifact is skipped or failed, we show "NOT_PRESENT".
INSTALL_DIR_GCS_ARTIFACT: ${{ needs.build_for_benchmarks.outputs.install-dir-gcs-artifact || 'NOT_PRESENT' }}
E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir || 'NOT_PRESENT' }}
BENCHMARK_TOOLS_GCS_ARTIFACT_DIR: ${{ needs.build_benchmark_tools.outputs.benchmark-tools-gcs-artifact-dir || 'NOT_PRESENT' }}
EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR: ${{ needs.execution_benchmarks.outputs.benchmark-results-gcs-artifact-dir || 'NOT_PRESENT' }}
COMPILATION_BENCHMARK_RESULTS_GCS_ARTIFACT: ${{ needs.compilation_benchmarks.outputs.compile-stats-results-gcs-artifact || 'NOT_PRESENT' }}
run: |
envsubst < ./.github/workflows/ARTIFACT_SUMMARY_TEMPLATE.md >> "${GITHUB_STEP_SUMMARY}"
- name: Posting to Discord
uses: sarisia/[email protected]
if: failure() && github.ref_name == 'main'
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
description: "The following jobs failed: ${{ steps.failed_jobs.outputs.failed-jobs }}"
url: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }}"