-
Notifications
You must be signed in to change notification settings - Fork 1
138 lines (115 loc) · 4.96 KB
/
2-algorithm-evaluation.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
name: Execute algorithm and evaluate metrics
on:
push:
branches: [ "main" ]
paths:
- 'algorithms/*.yaml'
- '.github/workflows/2-algorithm-evaluation.yml'
jobs:
execute:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git Safe Directory
run: git config --global --add safe.directory /__w/szcore/szcore
- name: Find updated algorithm
id: find-algorithm
uses: tj-actions/changed-files@v45
with:
files: algorithms/**.yaml
- name: Execute algorithm container
if: steps.find-algorithm.outputs.any_changed == 'true'
run: |
# iterate over all datasets in S3
for recording in $(aws s3api list-objects --bucket ${AWS_BUCKET} --prefix datasets --output text --query 'Contents[].[Key]' | grep '.*edf'); do
# save each recording (dataset slice) to a temp file
aws s3 cp s3://${AWS_BUCKET}/${recording} ./data/tmp.edf
# Run inference on recording for every updated algorithm
for algo in ${{ steps.find-algorithm.outputs.all_changed_files }}; do
IMAGE=$(grep '^image: ' $algo | sed 's/^image: \+//' | tr -d \'\")
ALGO_NAME=$(echo "$IMAGE" | iconv -t ascii//TRANSLIT | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr A-Z a-z)
# Create prediction directory and set permissions for the output file.
mkdir -p ./predictions
touch ./predictions/tmp.tsv
chmod 776 ./predictions/tmp.tsv
echo "Running inference for $ALGO_NAME"
docker run \
-e INPUT=tmp.edf \
-e OUTPUT=tmp.tsv \
-v ./predictions:/output:rw \
-v ./data:/data:ro \
--network none \
--name "${ALGO_NAME}" \
"${IMAGE}"
# Upload predictions to S3
subpath=${recording#*/}
prediction=${subpath%_eeg.edf}_events.tsv
aws s3 cp \
./predictions/tmp.tsv \
"s3://${AWS_BUCKET}/submissions/${ALGO_NAME}/${prediction}"
# Cleanup
rm ./predictions/tmp.tsv
docker rm "${ALGO_NAME}"
done
# Cleanup
rm ./data/tmp.edf
done
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_REGION }}
AWS_BUCKET: ${{secrets.AWS_BUCKET }}
evaluate:
runs-on: ubuntu-latest
needs: [execute]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git Safe Directory
run: git config --global --add safe.directory /__w/szcore/szcore
- name: Find updated algorithm
id: find-algorithm
uses: tj-actions/changed-files@v45
with:
files: algorithms/**.yaml
- name: Log in to GitHub Container Registry
run: |
echo "${{ secrets.github_token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Evaluate algorithm predictions
run: |
mkdir ./submissions
mkdir ./results
# Download reference annotations
if [ ${{ steps.find-algorithm.outputs.any_changed }} == "true" ]; then
aws s3 sync s3://${AWS_BUCKET}/datasets/ ./datasets --exclude "*" --include "*.tsv"
fi
# Run evaluation on every updated algorithm
for algo in ${{ steps.find-algorithm.outputs.all_changed_files }}; do
IMAGE=$(grep '^image: ' $algo | sed 's/^image: \+//' | tr -d \'\")
ALGO_NAME=$(echo "$IMAGE" | iconv -t ascii//TRANSLIT | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr A-Z a-z)
# Download algorithm hypothesis annotations
aws s3 sync s3://${AWS_BUCKET}/submissions/${ALGO_NAME}/ ./submissions/${ALGO_NAME}
mkdir ./results
for dataset in ./datasets/*; do
# Create results file
touch ./results/${dataset##*/}.json
chmod 776 ./results/${dataset##*/}.json
# Run evaluation
docker run \
-e OUT="${dataset##*/}.json" \
-v ./datasets/${dataset##*/}:/ref:ro \
-v ./submissions/${ALGO_NAME}/${dataset##*/}:/hyp:ro \
-v ./results:/results:rw \
ghcr.io/${{ github.repository }}-evaluator:main
# Upload result file
aws s3 cp ./results/${dataset##*/}.json s3://${AWS_BUCKET}/results/${ALGO_NAME}/${dataset##*/}.json
done
done
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY }}
AWS_DEFAULT_REGION: ${{ secrets.AWS_REGION }}
AWS_BUCKET: ${{secrets.AWS_BUCKET }}