Skip to content

Commit

Permalink
improving gui based on feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
gfursin committed Feb 7, 2024
1 parent 7beae91 commit 32a8271
Show file tree
Hide file tree
Showing 26 changed files with 293 additions and 12 deletions.
10 changes: 10 additions & 0 deletions cm-mlops/cfg/benchmark-hardware-compute/_cm.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"alias": "benchmark-hardware-compute",
"automation_alias": "cfg",
"automation_uid": "88dce9c160324c5d",
"tags": [
"benchmark",
"compute"
],
"uid": "ca67f372e7294afd"
}
4 changes: 4 additions & 0 deletions cm-mlops/cfg/benchmark-hardware-compute/amd-gpu.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"uid": "d8f06040f7294319",
"name": "AMD GPU"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"uid":"357a972e79614903",
"name": "Generic CPU - Arm64"
}
4 changes: 4 additions & 0 deletions cm-mlops/cfg/benchmark-hardware-compute/generic-cpu-x64.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"uid": "cdfd424c32734e38",
"name": "Generic CPU - x64"
}
4 changes: 4 additions & 0 deletions cm-mlops/cfg/benchmark-hardware-compute/google-tpu.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"uid": "b3be7ac9ef954f5a",
"name": "Google TPU"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"uid": "fe379ecd1e054a00",
"name": "Nvidia GPU - Jetson Orin"
}
4 changes: 4 additions & 0 deletions cm-mlops/cfg/benchmark-hardware-compute/nvidia-gpu.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"uid": "fe379ecd1e054a00",
"name": "Nvidia GPU"
}
4 changes: 4 additions & 0 deletions cm-mlops/cfg/benchmark-hardware-compute/qualcomm-ai100.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"uid": "fe379ecd1e054a00",
"name": "Qualcomm - AI 100"
}
19 changes: 19 additions & 0 deletions cm-mlops/cfg/benchmark-run-loadgen-dev/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
alias: benchmark-run-loadgen-dev
uid: 08483639f6d24daf

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- loadgen
- dev

name: "LoadGen - dev"

supported_compute:
- fe379ecd1e054a00
- cdfd424c32734e38
- fe379ecd1e054a00
7 changes: 7 additions & 0 deletions cm-mlops/cfg/benchmark-run-loadgen-dev/any-model-offline.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
uid: 4df38ed8dd804678

name: "Any model - offline"

supported_compute:
- cdfd424c32734e38
- 357a972e79614903
17 changes: 17 additions & 0 deletions cm-mlops/cfg/benchmark-run-mlperf-abtf-dev/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
alias: benchmark-run-mlperf-abtf-dev
uid: f2ad6b2c6e7b4352

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- abtf
- dev

name: "MLPerf-ABTF inference - dev"

supported_compute:
- cdfd424c32734e38
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
### TBD
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
uid: "fe379ecd1e054a00"

name: "RetinaNet Reference Python Torch Offline"

supported_compute:
- cdfd424c32734e38
17 changes: 17 additions & 0 deletions cm-mlops/cfg/benchmark-run-mlperf-client-dev/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
alias: benchmark-run-mlperf-client-dev
uid: 86ef1156149a4357

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- client
- dev

name: "MLPerf Client (Windows, MacOS and Linux) - dev"

supported_compute:
- cdfd424c32734e38
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
### TBD
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
uid: "fe379ecd1e054a00"

name: "RetinaNet Reference Python ONNX Offline"

supported_compute:
- cdfd424c32734e38
18 changes: 18 additions & 0 deletions cm-mlops/cfg/benchmark-run-mlperf-inference-latest/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
alias: benchmark-run-mlperf-inference-latest
uid: 6ab99c5e6dda4586

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- inference
- latest

name: "MLPerf inference - latest"

supported_compute:
- 357a972e79614903
- cdfd424c32734e38
19 changes: 19 additions & 0 deletions cm-mlops/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
alias: benchmark-run-mlperf-inference-v3.1
uid: 8eb42e27ec984185

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- inference
- v3.1

name: "MLPerf inference - v3.1"

supported_compute:
- fe379ecd1e054a00
- cdfd424c32734e38
- fe379ecd1e054a00
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
uid: 4df38ed8dd804678

name: "BERT Reference Python ONNX Offline"

supported_compute:
- cdfd424c32734e38
- 357a972e79614903
- fe379ecd1e054a00
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
uid: 4df38ed8dd804678

name: "GPT-J Reference Python Torch Offline"

supported_compute:
- fe379ecd1e054a00
19 changes: 19 additions & 0 deletions cm-mlops/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
alias: benchmark-run-mlperf-inference-v4.0
uid: 27c06c35bceb4059

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- inference
- v4.0

name: "MLPerf inference - v4.0"

supported_compute:
- fe379ecd1e054a00
- cdfd424c32734e38
- fe379ecd1e054a00
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
uid: 4df38ed8dd804678

name: "LLAMA2 Reference Python Torch Offline"

supported_compute:
- fe379ecd1e054a00
14 changes: 14 additions & 0 deletions cm-mlops/cfg/benchmark-run-mlperf-mobile-latest/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
alias: benchmark-run-mlperf-mobile-latest
uid: 1edb7903eccd436c

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- mobile
- latest

name: "MLPerf mobile - latest"
14 changes: 14 additions & 0 deletions cm-mlops/cfg/benchmark-run-mlperf-tiny-latest/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
alias: benchmark-run-mlperf-tiny-latest
uid: 8f2c2dbaf29e4485

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- tiny
- latest

name: "MLPerf tiny - latest"
14 changes: 14 additions & 0 deletions cm-mlops/cfg/benchmark-run-mlperf-training-latest/_cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
alias: benchmark-run-mlperf-training-latest
uid: 30f19848588847e0

automation_alias: cfg
automation_uid: 88dce9c160324c5d

tags:
- benchmark
- run
- mlperf
- training
- latest

name: "MLPerf training - latest"
75 changes: 63 additions & 12 deletions cm-mlops/script/launch-benchmark/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,17 @@ def postprocess(i):
##################################################################################
def load_cfg(i):

tags = i['tags']
tags = i.get('tags','')
artifact = i.get('artifact','')

key = i.get('key','')

ii={'action':'find',
'automation':'cfg',
'tags':tags}
'automation':'cfg'}
if artifact!='':
ii['artifact']=artifact
elif tags!='':
ii['tags']=tags

r=cmind.access(ii)
if r['return']>0: return r
Expand All @@ -45,7 +49,7 @@ def load_cfg(i):
prune_list = prune.get('list',[])

# Checking individual files inside CM entry
selection = [{'name':''}]
selection = []

if i.get('skip_files', False):
for l in lst:
Expand Down Expand Up @@ -88,11 +92,14 @@ def load_cfg(i):
if add:
meta['full_path']=full_path

name = meta.get('name','')
if name=='':
name = ' '.join(meta.get('tags',[]))
name = name.strip()
meta['name'] = name

selection.append(meta)

# Sort by tags
selection = sorted(selection, key = lambda v: ','.join(v.get('tags',[])))

return {'return':0, 'lst':lst, 'selection':selection}


Expand All @@ -117,11 +124,12 @@ def gui(i):
if 'compute_id' not in st.session_state: st.session_state['compute_id']=0

##############################################################
# Check first level of benchmarks
# Check the first level of benchmarks
r=load_cfg({'tags':'benchmark,run', 'skip_files':True})
if r['return']>0: return r

bench_selection = r['selection']
selection = sorted(r['selection'], key = lambda v: v['name'])
bench_selection = [{'name':''}] + selection

# Creating compute selector
bench_id = st.selectbox('Select benchmark:',
Expand All @@ -142,7 +150,9 @@ def gui(i):
'prune':{'key':'supported_compute', 'list':bench_supported_compute}})
if r['return']>0: return r

compute_selection = r['selection']
selection = sorted(r['selection'], key = lambda v: v['name'])
compute_selection = [{'name':''}] + selection


# Creating compute selector
compute_id = st.selectbox('Select target hardware:',
Expand All @@ -156,9 +166,50 @@ def gui(i):

st.rerun()

st.markdown('Bench ID: {}'.format(st.session_state['bench_id']))
st.markdown('Compute ID: {}'.format(st.session_state['compute_id']))

##############################################################
# Check tests
ii = {'tags':'benchmark,run'}

if bench_id>0:
bench_uid = bench_selection[bench_id]['uid']
ii['artifact']=bench_uid


r=load_cfg(ii)
if r['return']>0: return r

selection = sorted(r['selection'], key = lambda v: v['name'])


test_selection = [{'name':''}] + selection



# Creating compute selector
test_id = st.selectbox('Select test:',
range(len(test_selection)),
format_func=lambda x: test_selection[x]['name'],
index = 0,
key = 'test')

##############################################################
if test_id >0:
test_meta = test_selection[test_id]

test_path = test_meta['full_path']

test_md = test_meta['full_path'][:-5]+'.md'
if os.path.isfile(test_md):

r = cmind.utils.load_txt(test_md)
if r['return']>0: return r

s = r['string']

st.markdown('---')

st.markdown(s)



Expand Down

0 comments on commit 32a8271

Please sign in to comment.