-
Notifications
You must be signed in to change notification settings - Fork 54
57 lines (51 loc) · 1.99 KB
/
test_cli_rocm_pytorch.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
name: CLI ROCm Pytorch Tests
on:
workflow_dispatch:
push:
branches:
- main
pull_request:
branches:
- main
types:
- opened
- reopened
- synchronize
- labeled
- unlabeled
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
run_cli_rocm_pytorch_single_gpu_tests:
if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'cli') ||
contains( github.event.pull_request.labels.*.name, 'rocm') ||
contains( github.event.pull_request.labels.*.name, 'pytorch') ||
contains( github.event.pull_request.labels.*.name, 'single_gpu') ||
contains( github.event.pull_request.labels.*.name, 'cli_rocm_pytorch_single_gpu')
}}
uses: huggingface/hf-workflows/.github/workflows/optimum_benchmark_instinct_ci.yaml@testing
with:
machine_type: single-gpu
install_extras: testing,diffusers,timm,peft,autoawq,auto-gptq
test_file: test_cli.py
pytest_keywords: cli and cuda and pytorch and not (dp or ddp or device_map or deepspeed) and not bnb
run_cli_rocm_pytorch_multi_gpu_tests:
if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'cli') ||
contains( github.event.pull_request.labels.*.name, 'rocm') ||
contains( github.event.pull_request.labels.*.name, 'pytorch') ||
contains( github.event.pull_request.labels.*.name, 'multi_gpu') ||
contains( github.event.pull_request.labels.*.name, 'cli_rocm_pytorch_multi_gpu')
}}
uses: huggingface/hf-workflows/.github/workflows/optimum_benchmark_instinct_ci.yaml@testing
with:
machine_type: multi-gpu
install_extras: testing,diffusers,timm,peft
test_file: test_cli.py
pytest_keywords: cli and cuda and pytorch and (dp or ddp or device_map)