diff --git a/ck/CONTRIBUTING.md b/ck/CONTRIBUTING.md
index 551cc66a9f..07d85f3537 100644
--- a/ck/CONTRIBUTING.md
+++ b/ck/CONTRIBUTING.md
@@ -69,5 +69,5 @@
* @filven
* @ValouBambou
-See more acknowledgments at the end of this [article](https://arxiv.org/abs/2011.01149)
+See more acknowledgments at the end of this [article](https://doi.org/10.1098/rsta.2020.0211)
describing Collective Knowledge v1 concepts.
diff --git a/ck/README.md b/ck/README.md
index 56ea9ff51a..e69a9947df 100644
--- a/ck/README.md
+++ b/ck/README.md
@@ -2,7 +2,7 @@
-**Note that this directory is in archive mode since the [Collective Knowledge framework (v1 and v2)](https://arxiv.org/abs/2011.01149)
+**Note that this directory is in archive mode since the [Collective Knowledge framework (v1 and v2)](https://doi.org/10.1098/rsta.2020.0211)
is now officially discontinued in favour of the new, light-weight, non-intrusive and technology-agnostic
[Collective Mind workflow automation language](https://doi.org/10.5281/zenodo.8105339) being developed, supported
and maintained by the [MLCommons](https://mlcommons.org), [cTuning.org](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org).**
@@ -280,5 +280,5 @@ The community provides Docker containers to test CK and components using differe
We would like to thank all [contributors](https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md)
and [collaborators](https://cKnowledge.org/partners.html) for their support, fruitful discussions,
-and useful feedback! See more acknowledgments in the [CK journal article](https://arxiv.org/abs/2011.01149)
+and useful feedback! See more acknowledgments in the [CK journal article](https://doi.org/10.1098/rsta.2020.0211)
and [ACM TechTalk'21](https://www.youtube.com/watch?v=7zpeIVwICa4).
diff --git a/ck/docs/mlperf-automation/tutorials/tvmcon-2021-automating-mlperf-with-tvm-and-ck.md b/ck/docs/mlperf-automation/tutorials/tvmcon-2021-automating-mlperf-with-tvm-and-ck.md
index 7864a30895..190d7dc618 100644
--- a/ck/docs/mlperf-automation/tutorials/tvmcon-2021-automating-mlperf-with-tvm-and-ck.md
+++ b/ck/docs/mlperf-automation/tutorials/tvmcon-2021-automating-mlperf-with-tvm-and-ck.md
@@ -38,7 +38,7 @@ hardware.
* [Apache TVM](https://tvm.apache.org)
* CK "plug&play" automation framework: [GitHub](https://github.com/ctuning/ck),
[Motivation](https://www.youtube.com/watch?v=7zpeIVwICa4),
- [ArXiv](https://arxiv.org/abs/2011.01149),
+ [journal paper](https://doi.org/10.1098/rsta.2020.0211),
[automation actions](https://github.com/mlcommons/ck/tree/master/ck/repo/module),
[MLOps components](https://github.com/mlcommons/ck-mlops)
* [ACM REQUEST-ASPLOS'18: the 1st Reproducible Tournament on Pareto-efficient Image Classification](https://cknow.io/c/event/repro-request-asplos2018)
diff --git a/ck/docs/src/introduction.md b/ck/docs/src/introduction.md
index d708d5d66b..c7d4e28e60 100644
--- a/ck/docs/src/introduction.md
+++ b/ck/docs/src/introduction.md
@@ -2,7 +2,7 @@
## Project overview
-* Philosophical Transactions of the Royal Society: [paper](https://arxiv.org/abs/2011.01149), [shorter pre-print](https://arxiv.org/abs/2006.07161)
+* Philosophical Transactions of the Royal Society: [paper](https://doi.org/10.1098/rsta.2020.0211), [shorter pre-print](https://arxiv.org/abs/2006.07161)
[](https://youtu.be/7zpeIVwICa4)
diff --git a/cm-mlops/automation/cfg/_cm.json b/cm-mlops/automation/cfg/_cm.json
new file mode 100644
index 0000000000..27f80fbd40
--- /dev/null
+++ b/cm-mlops/automation/cfg/_cm.json
@@ -0,0 +1,9 @@
+{
+ "alias": "cfg",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "tags": [
+ "automation"
+ ],
+ "uid": "88dce9c160324c5d"
+}
diff --git a/cm-mlops/automation/cfg/module.py b/cm-mlops/automation/cfg/module.py
new file mode 100644
index 0000000000..be8d6e7b1d
--- /dev/null
+++ b/cm-mlops/automation/cfg/module.py
@@ -0,0 +1,52 @@
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print (json.dumps(i, indent=2))
+
+ return {'return':0}
diff --git a/cm-mlops/automation/script/module.py b/cm-mlops/automation/script/module.py
index 46dd3e6978..7fc9d527e1 100644
--- a/cm-mlops/automation/script/module.py
+++ b/cm-mlops/automation/script/module.py
@@ -1312,6 +1312,27 @@ def run(self, i):
if "add_deps_recursive" in versions_meta:
self._merge_dicts_with_tags(add_deps_recursive, versions_meta['add_deps_recursive'])
+ # Run chain of docker dependencies if current run cmd is from inside a docker container
+ docker_deps = []
+ if i.get('docker_run_deps'):
+ docker_meta = meta.get('docker')
+ if docker_meta:
+ docker_deps = docker_meta.get('deps')
+ docker_deps = [ dep for dep in docker_deps if not dep.get('skip_inside_docker', False) ]
+ if len(docker_deps)>0:
+
+ if verbose:
+ print (recursion_spaces + ' - Checkingdocker run dependencies on other CM scripts:')
+
+ r = self._call_run_deps(docker_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive,
+ recursion_spaces + extra_recursion_spaces,
+ remembered_selections, variation_tags_string, False, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state)
+ if r['return']>0: return r
+
+ if verbose:
+ print (recursion_spaces + ' - Processing env after docker run dependencies ...')
+
+ update_env_with_values(env)
# Check chain of dependencies on other CM scripts
if len(deps)>0:
diff --git a/cm-mlops/automation/script/module_misc.py b/cm-mlops/automation/script/module_misc.py
index 1394196560..ef5dbd0e8e 100644
--- a/cm-mlops/automation/script/module_misc.py
+++ b/cm-mlops/automation/script/module_misc.py
@@ -1379,13 +1379,15 @@ def dockerfile(i):
i_run_cmd = r['run_cmd']
+ docker_run_cmd_prefix = i.get('docker_run_cmd_prefix', docker_settings.get('run_cmd_prefix', ''))
+
r = regenerate_script_cmd({'script_uid':script_uid,
'script_alias':script_alias,
'run_cmd':i_run_cmd,
'tags':tags,
'fake_run':True,
'docker_settings':docker_settings,
- 'docker_run_cmd_prefix':i.get('docker_run_cmd_prefix','')})
+ 'docker_run_cmd_prefix':docker_run_cmd_prefix})
if r['return']>0: return r
run_cmd = r['run_cmd_string']
@@ -1469,6 +1471,21 @@ def dockerfile(i):
return {'return':0}
+def get_container_path(value):
+ path_split = value.split(os.sep)
+ if len(path_split) == 1:
+ return value
+
+ new_value = ''
+ if "cache" in path_split and "local" in path_split:
+ new_path_split = [ "", "home", "cmuser" ]
+ repo_entry_index = path_split.index("local")
+ new_path_split += path_split[repo_entry_index:]
+ return "/".join(new_path_split)
+
+ return value
+
+
############################################################
def docker(i):
"""
@@ -1629,6 +1646,7 @@ def docker(i):
if c_input in i:
env[docker_input_mapping[c_input]] = i[c_input]
+ container_env_string = '' # env keys corresponding to container mounts are explicitly passed to the container run cmd
for index in range(len(mounts)):
mount = mounts[index]
@@ -1663,7 +1681,8 @@ def docker(i):
if tmp_values:
for tmp_value in tmp_values:
if tmp_value in env:
- new_container_mount = env[tmp_value]
+ new_container_mount = get_container_path(env[tmp_value])
+ container_env_string += "--env.{}={} ".format(tmp_value, new_container_mount)
else:# we skip those mounts
mounts[index] = None
skip = True
@@ -1694,6 +1713,8 @@ def docker(i):
docker_pre_run_cmds = i.get('docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', [])
+ docker_run_cmd_prefix = i.get('docker_run_cmd_prefix', docker_settings.get('run_cmd_prefix', ''))
+
all_gpus = i.get('docker_all_gpus', docker_settings.get('all_gpus'))
device = i.get('docker_device', docker_settings.get('device'))
@@ -1702,6 +1723,10 @@ def docker(i):
port_maps = i.get('docker_port_maps', docker_settings.get('port_maps', []))
+ shm_size = i.get('docker_shm_size', docker_settings.get('shm_size', ''))
+
+ extra_run_args = i.get('docker_extra_run_args', docker_settings.get('extra_run_args', ''))
+
if detached == '':
detached = docker_settings.get('detached', '')
@@ -1729,7 +1754,8 @@ def docker(i):
'docker_run_cmd_prefix':i.get('docker_run_cmd_prefix','')})
if r['return']>0: return r
- run_cmd = r['run_cmd_string']
+ run_cmd = r['run_cmd_string'] + ' ' + container_env_string + ' --docker_run_deps '
+
env['CM_RUN_STATE_DOCKER'] = True
if docker_settings.get('mount_current_dir','')=='yes':
@@ -1781,6 +1807,12 @@ def docker(i):
if port_maps:
cm_docker_input['port_maps'] = port_maps
+ if shm_size != '':
+ cm_docker_input['shm_size'] = shm_size
+
+ if extra_run_args != '':
+ cm_docker_input['extra_run_args'] = extra_run_args
+
print ('')
diff --git a/cm-mlops/automation/utils/module.py b/cm-mlops/automation/utils/module.py
index 33c3381c2b..2b479d5362 100644
--- a/cm-mlops/automation/utils/module.py
+++ b/cm-mlops/automation/utils/module.py
@@ -851,3 +851,30 @@ def prune_input(self, i):
return {'return':0, 'new_input':i_run_cmd_arc}
+
+ ##############################################################################
+ def uid(self, i):
+ """
+ Generate CM UID.
+
+ Args:
+ (CM input dict): empty dict
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * uid (str): CM UID
+ """
+
+ console = i.get('out') == 'con'
+
+ r = utils.gen_uid()
+
+ if console:
+ print (r['uid'])
+
+ return r
+
diff --git a/cm-mlops/challenge/add-derived-metrics-to-mlperf-inference-v3.1/README.md b/cm-mlops/challenge/add-derived-metrics-to-mlperf-inference-v3.1/README.md
index 53bb599552..516e9b0695 100644
--- a/cm-mlops/challenge/add-derived-metrics-to-mlperf-inference-v3.1/README.md
+++ b/cm-mlops/challenge/add-derived-metrics-to-mlperf-inference-v3.1/README.md
@@ -5,9 +5,7 @@ and add derived metrics such as result/No of cores, power efficiency, device cos
Add clock speed as a third dimension to graphs and improve Bar graph visualization.
-Join our public [Discord server](https://discord.gg/JjWNWXKxwT) and/or
-our [weekly conf-calls](https://docs.google.com/document/d/1zMNK1m_LhWm6jimZK6YE05hu4VH9usdbKJ3nBy-ZPAw/edit)
-to discuss this challenge with the organizers.
+Join our public [Discord server](https://discord.gg/JjWNWXKxwT) to discuss this challenge with the organizers.
Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
diff --git a/cm-mlops/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md b/cm-mlops/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md
index ab3024ba32..30b48b8060 100644
--- a/cm-mlops/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md
+++ b/cm-mlops/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md
@@ -2,9 +2,7 @@
Connect CM workflows to run MLPerf inference benchmarks with [OpenBenchmarking.org](https://openbenchmarking.org).
-Join our public [Discord server](https://discord.gg/JjWNWXKxwT) and/or
-our [weekly conf-calls](https://docs.google.com/document/d/1zMNK1m_LhWm6jimZK6YE05hu4VH9usdbKJ3nBy-ZPAw/edit)
-to discuss this challenge with the organizers.
+Join our public [Discord server](https://discord.gg/JjWNWXKxwT) to discuss this challenge with the organizers.
Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
diff --git a/cm-mlops/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json b/cm-mlops/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json
index 72b95bdbe9..aa291ba0e8 100644
--- a/cm-mlops/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json
+++ b/cm-mlops/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json
@@ -2,7 +2,7 @@
"alias": "connect-mlperf-inference-v3.1-with-openbenchmarking",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
- "date_open": "20230704",
+ "date_open": "20240204",
"date_close_extension": true,
"points": 2,
"prize_short": "co-authoring white paper",
@@ -15,11 +15,7 @@
"automate",
"openbenchmarking",
"mlperf-inference",
- "mlperf-inference-openbenchmarking",
- "mlperf-inference-openbenchmarking",
- "mlperf-inference-openbenchmarking-v3.1",
- "mlperf-inference-openbenchmarking-v3.1-2023",
- "v3.1"
+ "mlperf-inference-openbenchmarking"
],
"title": "Run MLPerf inference benchmarks via OpenBenchmarking.org",
"trophies": true,
diff --git a/cm-mlops/challenge/connect-mlperf-with-medperf/README.md b/cm-mlops/challenge/connect-mlperf-with-medperf/README.md
index 4277eba2e1..e1d2dfb6bb 100644
--- a/cm-mlops/challenge/connect-mlperf-with-medperf/README.md
+++ b/cm-mlops/challenge/connect-mlperf-with-medperf/README.md
@@ -6,9 +6,7 @@ using MLPerf loadgen and MLCommons CM automation language.
See the [Nature 2023 article about MedPerf](https://www.nature.com/articles/s42256-023-00652-2)
and [ACM REP'23 keynote about CM](https://doi.org/10.5281/zenodo.8105339) to learn more about these projects.
-Join our public [Discord server](https://discord.gg/JjWNWXKxwT) and/or
-our [weekly conf-calls](https://docs.google.com/document/d/1zMNK1m_LhWm6jimZK6YE05hu4VH9usdbKJ3nBy-ZPAw/edit)
-to discuss this challenge with the organizers.
+Join our public [Discord server](https://discord.gg/JjWNWXKxwT) to discuss this challenge with the organizers.
Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-scc2023/_cm.json b/cm-mlops/challenge/optimize-mlperf-inference-scc2023/_cm.json
index 335020e0b4..868d404d50 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-scc2023/_cm.json
+++ b/cm-mlops/challenge/optimize-mlperf-inference-scc2023/_cm.json
@@ -2,7 +2,7 @@
"alias": "optimize-mlperf-inference-scc2023",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
- "_date_close": "20231115",
+ "date_close": "20231115",
"date_open": "20230915",
"tags": [
"automate",
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-android/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-android/README.md
deleted file mode 100644
index 1af3d813a0..0000000000
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-android/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-### Challenge
-
-Some MLPerf inference benchmarks for the Edge (image classification, object detection, etc) are possible to run on Android devices.
-
-Add support to cross-compile our TFLite C++ implementation via CM to run some MLPerf inference benchmarks on any Android mobile phone.
-
-Join our public [Discord server](https://discord.gg/JjWNWXKxwT) and/or
-our [weekly conf-calls](https://docs.google.com/document/d/1zMNK1m_LhWm6jimZK6YE05hu4VH9usdbKJ3nBy-ZPAw/edit)
-to discuss this challenge with the organizers.
-
-Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
-to run reference implementations of MLPerf inference benchmarks
-using the CM automation language and use them as a base for your developments.
-
-Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
-
-
-### Prizes
-
-* *All contributors will participate in writing a common white paper about running and comparing MLPerf inference benchmarks out-of-the-box.*
-* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
-* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
-
-
-### Organizers
-
-* [MLCommons](https://cKnowledge.org/mlcommons-taskforce)
-* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation)
-* [cKnowledge.org](https://www.linkedin.com/company/cknowledge)
-
-
-
-### Results
-
-All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
-in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
-and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-android/_cm.json b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-android/_cm.json
deleted file mode 100644
index 8f3ca9f460..0000000000
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-android/_cm.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "alias": "optimize-mlperf-inference-v3.1-android",
- "automation_alias": "challenge",
- "automation_uid": "3d84abd768f34e08",
- "date_open": "20230704",
- "date_close_extension": true,
- "experiments": [],
- "points": 2,
- "prize": "300$ for the first implementation",
- "prize_short": "co-authoring white paper , $$$",
- "tags": [
- "modularize",
- "optimize",
- "reproduce",
- "replicate",
- "automate",
- "benchmark",
- "android",
- "mlperf-inference",
- "mlperf-inference-android",
- "mlperf-inference-android",
- "mlperf-inference-android-v3.1",
- "mlperf-inference-android-v3.1-2023",
- "v3.1"
- ],
- "title": "Add support to run some MLPerf inference benchmarks for Edge on Android mobile phone",
- "trophies": true,
- "uid": "3ec574afcc594574"
-}
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md
index 4dabd3ae3c..843c205e69 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md
@@ -3,9 +3,7 @@
Create any end-to-end AI application with web cam, speech recognition, chat bot, LLM
that uses any MLPerf model and CM automation.
-Join our public [Discord server](https://discord.gg/JjWNWXKxwT) and/or
-our [weekly conf-calls](https://docs.google.com/document/d/1zMNK1m_LhWm6jimZK6YE05hu4VH9usdbKJ3nBy-ZPAw/edit)
-to discuss this challenge with the organizers.
+Join our public [Discord server](https://discord.gg/JjWNWXKxwT) to discuss this challenge with the organizers.
Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision.
@@ -16,7 +14,6 @@ Looking forward to your submissions and happy hacking!
* *All submitters will participate in writing a common white paper about running and comparing MLPerf inference benchmarks out-of-the-box.*
* *All submitters will receive 1 point for submitting valid results for 1 complete benchmark on one system.*
* *All submitters will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
-* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*.
### Organizers
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md
index 40e0949a51..c4d8636579 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md
@@ -5,9 +5,7 @@ Add CM interface to run MLPerf inference benchmarks on Intel-based platforms.
You can start from reproducing any past MLPerf inference submission from Intel and their partners
and then adding CM automation.
-Join our public [Discord server](https://discord.gg/JjWNWXKxwT) and/or
-our [weekly conf-calls](https://docs.google.com/document/d/1zMNK1m_LhWm6jimZK6YE05hu4VH9usdbKJ3nBy-ZPAw/edit)
-to discuss this challenge with the organizers.
+Join our public [Discord server](https://discord.gg/JjWNWXKxwT) to discuss this challenge with the organizers.
Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json
index 11ce75a3c6..ccdd440a70 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json
@@ -2,7 +2,7 @@
"alias": "optimize-mlperf-inference-v3.1-intel-2023",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
- "date_close_extension": true,
+ "date_close": "20240104",
"date_open": "20230704",
"points": 2,
"prize": "200$ for the first implementation and 200 for the fastest implementation",
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json
index 855c66b66b..350749fc6c 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json
@@ -2,7 +2,7 @@
"alias": "optimize-mlperf-inference-v3.1-qualcomm-ai100-2023",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
- "date_close_extension": true,
+ "date_close": "20240104",
"date_open": "20230704",
"points":3,
"trophies":true,
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v4.0-2024/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v4.0-2024/README.md
new file mode 100644
index 0000000000..3e0a705411
--- /dev/null
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v4.0-2024/README.md
@@ -0,0 +1 @@
+Under preparation. Contact the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) for more details.
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v4.0-2024/_cm.yaml b/cm-mlops/challenge/optimize-mlperf-inference-v4.0-2024/_cm.yaml
new file mode 100644
index 0000000000..63ca123e47
--- /dev/null
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v4.0-2024/_cm.yaml
@@ -0,0 +1,27 @@
+alias: optimize-mlperf-inference-v4.0-2024
+uid: e6b8738383eb46d0
+
+automation_alias: challenge
+automation_uid: 3d84abd768f34e08
+
+title: Run and optimize MLPerf inference v4.0 benchmarks (Intel, Nvidia, Qualcomm, Arm64, TPU ...) and submit official results
+
+date_close: '20240225'
+date_open: '20240205'
+
+tags:
+- modularize
+- optimize
+- reproduce
+- replicate
+- automate
+- benchmark
+- mlperf
+- mlperf-inference
+- mlperf-inference-v4.0
+- mlperf-inference-v4.0-2024
+- v4.0
+
+
+experiments:
+- tags: mlperf-inference,v4.0
diff --git a/cm-mlops/challenge/participate-hipeac-reproducibilty-challenge-2024/_cm.json b/cm-mlops/challenge/participate-hipeac-reproducibilty-challenge-2024/_cm.json
index 9c9774ed6b..36016c454a 100644
--- a/cm-mlops/challenge/participate-hipeac-reproducibilty-challenge-2024/_cm.json
+++ b/cm-mlops/challenge/participate-hipeac-reproducibilty-challenge-2024/_cm.json
@@ -3,6 +3,7 @@
"alias": "participate-hipeac-reproducibilty-challenge-2024",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
+ "date_close": "20231115",
"date_open": "20230915",
"tags": [
"participate",
diff --git a/cm-mlops/challenge/repro-micro2023/_cm.json b/cm-mlops/challenge/repro-micro2023/_cm.json
index cbfae1f1fd..77fb5f773b 100644
--- a/cm-mlops/challenge/repro-micro2023/_cm.json
+++ b/cm-mlops/challenge/repro-micro2023/_cm.json
@@ -2,7 +2,7 @@
"alias": "repro-micro2023",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
- "_date_close": "20230915",
+ "date_close": "20230915",
"date_open": "20230702",
"tags": [
"reproduce",
diff --git a/cm-mlops/challenge/reproduce-automate-explain-past-mlperf-inference-results-2023/README.md b/cm-mlops/challenge/reproduce-automate-explain-past-mlperf-inference-results-2023/README.md
index dc2339ff49..13c51af648 100644
--- a/cm-mlops/challenge/reproduce-automate-explain-past-mlperf-inference-results-2023/README.md
+++ b/cm-mlops/challenge/reproduce-automate-explain-past-mlperf-inference-results-2023/README.md
@@ -10,9 +10,7 @@ them by the community.
A detailed experience report must be provided - if accepted, you will be able to present your findings
at the upcoming HiPEAC'23 workshop and our upcoming white paper with MLCommons.
-Join our public [Discord server](https://discord.gg/JjWNWXKxwT) and/or
-our [weekly conf-calls](https://docs.google.com/document/d/1zMNK1m_LhWm6jimZK6YE05hu4VH9usdbKJ3nBy-ZPAw/edit)
-to discuss this challenge with the organizers.
+Join our public [Discord server](https://discord.gg/JjWNWXKxwT) to discuss this challenge with the organizers.
Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
@@ -23,7 +21,7 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Prizes
* *All contributors will be able to present their findings at the HiPEAC workshop on reproducibility and participate in writing a common white paper about running and comparing MLPerf inference benchmarks.*
-* *All contributors will receive 2 point*
+* *All contributors will receive 2 points*
* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).*
diff --git a/cm-mlops/challenge/train-llm-for-cm-mlperf-2023/README.md b/cm-mlops/challenge/train-llm-for-cm-mlperf-2023/README.md
index 11d7d330e2..9449b02a60 100644
--- a/cm-mlops/challenge/train-llm-for-cm-mlperf-2023/README.md
+++ b/cm-mlops/challenge/train-llm-for-cm-mlperf-2023/README.md
@@ -3,9 +3,7 @@
Improve the prototype of our LLM-based assistant to suggest users how to run MLPerf inference benchmarks
using the MLCommons CM automation language: https://access.cknowledge.org/assistant .
-Join our public [Discord server](https://discord.gg/JjWNWXKxwT) and/or
-our [weekly conf-calls](https://docs.google.com/document/d/1zMNK1m_LhWm6jimZK6YE05hu4VH9usdbKJ3nBy-ZPAw/edit)
-to discuss this challenge with the organizers.
+Join our public [Discord server](https://discord.gg/JjWNWXKxwT) to discuss this challenge with the organizers.
Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md)
to run reference implementations of MLPerf inference benchmarks
diff --git a/cm-mlops/challenge/train-llm-for-cm-mlperf-2023/_cm.json b/cm-mlops/challenge/train-llm-for-cm-mlperf-2023/_cm.json
index 287b07664e..7b498af3ed 100644
--- a/cm-mlops/challenge/train-llm-for-cm-mlperf-2023/_cm.json
+++ b/cm-mlops/challenge/train-llm-for-cm-mlperf-2023/_cm.json
@@ -2,6 +2,7 @@
"alias": "train-llm-for-cm-mlperf-2023",
"automation_alias": "challenge",
"automation_uid": "3d84abd768f34e08",
+ "date_close_extension": true,
"date_open": "20230704",
"experiments": [],
"points": 3,
diff --git a/cm-mlops/script/app-mlperf-inference-reference/customize.py b/cm-mlops/script/app-mlperf-inference-reference/customize.py
index c22344df03..c0563ec0d6 100644
--- a/cm-mlops/script/app-mlperf-inference-reference/customize.py
+++ b/cm-mlops/script/app-mlperf-inference-reference/customize.py
@@ -293,6 +293,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio
env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \
scenario_extra_options + mode_extra_options + \
" --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \
+ ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \
" --model-path " + env['MODEL_DIR']
elif "3d-unet" in env['CM_MODEL']:
diff --git a/cm-mlops/script/app-mlperf-inference/README.md b/cm-mlops/script/app-mlperf-inference/README.md
index c9688ea042..a4da99e6d8 100644
--- a/cm-mlops/script/app-mlperf-inference/README.md
+++ b/cm-mlops/script/app-mlperf-inference/README.md
@@ -187,6 +187,7 @@ ___
- *CM_MLPERF_IMPLEMENTATION*: `nvidia-original`
- *CM_SQUAD_ACCURACY_DTYPE*: `float16`
- *CM_IMAGENET_ACCURACY_DTYPE*: `int32`
+ - *CM_CNNDM_ACCURACY_DTYPE*: `int32`
- *CM_LIBRISPEECH_ACCURACY_DTYPE*: `int8`
- Workflow:
1. ***Read "deps" on other CM scripts***
diff --git a/cm-mlops/script/app-mlperf-inference/_cm.yaml b/cm-mlops/script/app-mlperf-inference/_cm.yaml
index e517c90d21..994eb72f71 100644
--- a/cm-mlops/script/app-mlperf-inference/_cm.yaml
+++ b/cm-mlops/script/app-mlperf-inference/_cm.yaml
@@ -229,10 +229,13 @@ variations:
tags: _float16
librispeech-accuracy-script:
tags: _int8
+ cnndm-accuracy-script:
+ tags: _int32
env:
CM_MLPERF_IMPLEMENTATION: nvidia-original
CM_SQUAD_ACCURACY_DTYPE: float16
CM_IMAGENET_ACCURACY_DTYPE: int32
+ CM_CNNDM_ACCURACY_DTYPE: int32
CM_LIBRISPEECH_ACCURACY_DTYPE: int8
deps:
- tags: get,cuda-devices
@@ -1032,8 +1035,6 @@ invalid_variation_combinations:
- gptj
- tf
-gui_title: "CM GUI for the MLPerf inference benchmark"
-
input_description:
scenario:
desc: "MLPerf inference scenario"
@@ -1093,3 +1094,6 @@ input_description:
desc: "Generate README with the reproducibility report"
debug:
desc: "Debug MLPerf script"
+
+gui:
+ title: "CM GUI for the MLPerf inference benchmark"
diff --git a/cm-mlops/script/build-mlperf-inference-server-nvidia/_cm.yaml b/cm-mlops/script/build-mlperf-inference-server-nvidia/_cm.yaml
index 2a618e9ba3..f990b93e9a 100644
--- a/cm-mlops/script/build-mlperf-inference-server-nvidia/_cm.yaml
+++ b/cm-mlops/script/build-mlperf-inference-server-nvidia/_cm.yaml
@@ -189,21 +189,21 @@ versions:
nvidia-inference-common-code:
version: r2.1
nvidia-scratch-space:
- tags: version.2_1
+ tags: _version.2_1
r3.0:
add_deps_recursive:
nvidia-inference-common-code:
version: r3.0
nvidia-scratch-space:
- tags: version.3_0
+ tags: _version.3_0
r3.1:
add_deps_recursive:
nvidia-inference-common-code:
version: r3.1
nvidia-scratch-space:
- tags: version.3_1
+ tags: _version.4_0
deps:
- tags: install,nccl,libs,_cuda
- tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1-gptj
@@ -213,6 +213,8 @@ versions:
docker:
skip_run_cmd: 'no'
all_gpus: 'yes'
+ shm_size: '32gb'
+ extra_run_args: ' --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME'
docker_os: ubuntu
docker_real_run: False
interactive: True
@@ -230,8 +232,15 @@ docker:
scratch_path: MLPERF_SCRATCH_PATH
deps:
- tags: get,mlperf,inference,nvidia,scratch,space
+ - tags: get,mlperf,inference,results,dir
+ - tags: get,mlperf,inference,submission,dir
+ pre_run_cmds:
+ - cd CM/repos/ctuning@mlcommons-ck && git pull
+ run_cmd_prefix: sudo apt remove -y cmake && cd CM/repos/ctuning@mlcommons-ck && git pull && cd -
mounts:
- "${{ IMAGENET_PATH }}:/data/imagenet-val"
+ - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}"
+ - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}"
- "${{ RESULTS_DIR }}:/home/cmuser/results_dir"
- "${{ SUBMISSION_DIR }}:/home/cmuser/submission_dir"
- "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}"
diff --git a/cm-mlops/script/generate-mlperf-tiny-submission/README.md b/cm-mlops/script/generate-mlperf-tiny-submission/README.md
index 4615fcdaab..cebfa2e361 100644
--- a/cm-mlops/script/generate-mlperf-tiny-submission/README.md
+++ b/cm-mlops/script/generate-mlperf-tiny-submission/README.md
@@ -151,7 +151,6 @@ ___
- CM script: [set-echo-off-win](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/set-echo-off-win)
- CM script: [reproduce-ipol-paper-2022-439](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-ipol-paper-2022-439)
- CM script: [get-mlperf-inference-nvidia-common-code](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code)
- - CM script: [install-qaic-compute-sdk-from.src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-qaic-compute-sdk-from.src)
- CM script: [destroy-terraform](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/destroy-terraform)
- CM script: [get-dataset-cnndm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm)
- CM script: [build-dockerfile](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/build-dockerfile)
@@ -187,7 +186,6 @@ ___
- CM script: [get-google-test](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-google-test)
- CM script: [get-dataset-criteo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-criteo)
- CM script: [truncate-mlperf-inference-accuracy-log](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log)
- - CM script: [install-pytorch-from.src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from.src)
- CM script: [get-ml-model-retinanet-nvidia](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet-nvidia)
- CM script: [reproduce-micro-paper-2023-victima](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/reproduce-micro-paper-2023-victima)
- CM script: [process-ae-users](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/process-ae-users)
@@ -339,7 +337,6 @@ ___
- CM script: [print-hello-world-py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-hello-world-py)
- CM script: [print-hello-world-java](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/print-hello-world-java)
- CM script: [app-mlperf-training-reference](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-training-reference)
- - CM script: [install-mlperf-logging-from.src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-mlperf-logging-from.src)
- CM script: [get-zephyr-sdk](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-zephyr-sdk)
- CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3)
- CM script: [reproduce-ieee-acm-micro2023-paper-87](https://github.com/ctuning/cm-reproduce-research-projects/tree/master/script/reproduce-ieee-acm-micro2023-paper-87)
@@ -384,10 +381,21 @@ ___
- CM script: [test-script5](https://github.com/gfursin/cm-tests/tree/master/script/test-script5)
- CM script: [get-preprocessed-dataset-openorca](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-preprocessed-dataset-openorca)
- CM script: [create-conda-env](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/create-conda-env)
- - CM script: [install-ipex-from.src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-ipex-from.src)
- - CM script: [install-onednn-from.src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-onednn-from.src)
- - CM script: [install-onnxruntime-from.src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-onnxruntime-from.src)
- - CM script: [install-transformers-from.src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-transformers-from.src)
+ - CM script: [install-qaic-compute-sdk-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-qaic-compute-sdk-from-src)
+ - CM script: [fail](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/fail)
+ - CM script: [install-onnxruntime-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-onnxruntime-from-src)
+ - CM script: [install-ipex-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-ipex-from-src)
+ - CM script: [install-mlperf-logging-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-mlperf-logging-from-src)
+ - CM script: [install-onednn-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-onednn-from-src)
+ - CM script: [install-pytorch-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-from-src)
+ - CM script: [install-transformers-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-transformers-from-src)
+ - CM script: [create-patch](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/create-patch)
+ - CM script: [my-script](my-script)
+ - CM script: [get-mlperf-inference-intel-scratch-space](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-intel-scratch-space)
+ - CM script: [get-mlperf-inference-results-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results-dir)
+ - CM script: [get-mlperf-inference-submission-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-submission-dir)
+ - CM script: [install-nccl-libs](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-nccl-libs)
+ - CM script: [install-pytorch-kineto-from-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/install-pytorch-kineto-from-src)
___
diff --git a/cm-mlops/script/get-cudnn/README.md b/cm-mlops/script/get-cudnn/README.md
index b1838fb0c6..7e91089015 100644
--- a/cm-mlops/script/get-cudnn/README.md
+++ b/cm-mlops/script/get-cudnn/README.md
@@ -26,6 +26,9 @@
### About
+
+See extra [notes](README-extra.md) from the authors and contributors.
+
#### Summary
* Category: *CUDA automation.*
@@ -165,11 +168,13 @@ ___
* `+DYLD_FALLBACK_LIBRARY_PATH`
* `+LD_LIBRARY_PATH`
* `+PATH`
+* `CM_CUDA_PATH_INCLUDE_CUDNN`
* `CM_CUDA_PATH_LIB_CUDNN`
* `CM_CUDA_PATH_LIB_CUDNN_EXISTS`
* `CM_CUDNN_*`
#### New environment keys auto-detected from customize
+* `CM_CUDA_PATH_INCLUDE_CUDNN`
* `CM_CUDA_PATH_LIB_CUDNN`
* `CM_CUDA_PATH_LIB_CUDNN_EXISTS`
* `CM_CUDNN_VERSION`
diff --git a/cm-mlops/script/get-cudnn/_cm.json b/cm-mlops/script/get-cudnn/_cm.json
index e067bf629a..f339286f7a 100644
--- a/cm-mlops/script/get-cudnn/_cm.json
+++ b/cm-mlops/script/get-cudnn/_cm.json
@@ -48,6 +48,5 @@
],
"uid": "d73ee19baee14df8",
"docker": {
- "run": false
}
}
diff --git a/cm-mlops/script/get-mlperf-inference-loadgen/README.md b/cm-mlops/script/get-mlperf-inference-loadgen/README.md
index 04e049ae99..73229afa74 100644
--- a/cm-mlops/script/get-mlperf-inference-loadgen/README.md
+++ b/cm-mlops/script/get-mlperf-inference-loadgen/README.md
@@ -170,9 +170,9 @@ ___
* get,compiler
* `if (CM_HOST_OS_TYPE != windows)`
* CM names: `--adr.['compiler']...`
+ - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm)
- CM script: [get-cl](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cl)
- CM script: [get-gcc](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-gcc)
- - CM script: [get-llvm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-llvm)
* get,cl
* `if (CM_HOST_OS_TYPE == windows)`
* CM names: `--adr.['compiler']...`
diff --git a/cm-mlops/script/get-mlperf-inference-nvidia-common-code/README.md b/cm-mlops/script/get-mlperf-inference-nvidia-common-code/README.md
index 208bc78ca6..7dfd1d4c71 100644
--- a/cm-mlops/script/get-mlperf-inference-nvidia-common-code/README.md
+++ b/cm-mlops/script/get-mlperf-inference-nvidia-common-code/README.md
@@ -141,6 +141,7 @@ ___
* get,mlperf,inference,results
* CM names: `--adr.['mlperf-inference-results']...`
- CM script: [get-mlperf-inference-results](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results)
+ - CM script: [get-mlperf-inference-results-dir](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-results-dir)
1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code/customize.py)***
1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-nvidia-common-code/_cm.json)
1. ***Run native script if exists***
diff --git a/cm-mlops/script/get-tensorrt/_cm.json b/cm-mlops/script/get-tensorrt/_cm.json
index 174feeb9aa..b1aa40e06a 100644
--- a/cm-mlops/script/get-tensorrt/_cm.json
+++ b/cm-mlops/script/get-tensorrt/_cm.json
@@ -47,6 +47,5 @@
}
},
"docker": {
- "run": false
}
}
diff --git a/cm-mlops/script/gui/app.py b/cm-mlops/script/gui/app.py
index 0db457dd85..f732599bc1 100644
--- a/cm-mlops/script/gui/app.py
+++ b/cm-mlops/script/gui/app.py
@@ -8,14 +8,7 @@ def main():
var1 = '^' if os.name == 'nt' else '\\'
- compatibility = False
- try:
- query_params = st.query_params
- except:
- compatibility = True
-
- if compatibility:
- query_params = st.experimental_get_query_params()
+ query_params = misc.get_params(st)
script_path = os.environ.get('CM_GUI_SCRIPT_PATH','')
script_alias = os.environ.get('CM_GUI_SCRIPT_ALIAS','')
@@ -50,6 +43,7 @@ def main():
if len(lst)==1:
script = lst[0]
meta = script.meta
+ script_path = script.path
script_alias = meta['alias']
@@ -63,11 +57,22 @@ def main():
script_path = script.path
script_alias = meta['alias']
- if meta.get('gui_title','')!='':
- title = meta['gui_title']
+ gui_meta = meta.get('gui',{})
+
+ gui_func = gui_meta.get('use_customize_func', '')
+ if gui_func!='':
+ ii = {'streamlit_module':st,
+ 'meta':meta}
+ return cmind.utils.call_internal_module(None, os.path.join(script_path, 'dummy') ,
+ 'customize', gui_func, ii)
+
+
+ if gui_meta.get('title','')!='':
+ title = gui_meta['title']
+
# Set title
- st.title('Collective Mind GUI')
+ st.title('[Collective Mind](https://github.com/mlcommons/ck)')
if script_alias!='':
st.markdown('*CM script: "{}"*'.format(script_alias))
@@ -136,33 +141,34 @@ def main():
# Prepare variation_groups
# st.markdown("""---""")
- st.subheader('Script variations')
-
- variation_groups_order = meta.get('variation_groups_order',[])
- for variation in sorted(variation_groups):
- if variation not in variation_groups_order:
- variation_groups_order.append(variation)
-
- for group_key in variation_groups_order:
- group_key_cap = group_key.replace('-',' ').capitalize()
- if not group_key.startswith('*'):
- y = ['']
-
- index = 0
- selected_index = 0
- for variation_key in sorted(variation_groups[group_key]):
- index += 1
- y.append(variation_key)
- if variation_key in default_variations:
- selected_index=index
-
- st_variations['~'+group_key] = st.selectbox(group_key_cap, sorted(y), index=selected_index, key='~'+group_key)
- elif group_key == '*no-group*':
- for variation_key in sorted(variation_groups[group_key]):
- x = False
- if variation_key in default_variations:
- x=True
- st_variations['#'+variation_key] = st.checkbox(variation_key.capitalize(), key='#'+variation_key, value=x)
+ if len(variations)>0:
+ st.subheader('Script variations')
+
+ variation_groups_order = meta.get('variation_groups_order',[])
+ for variation in sorted(variation_groups):
+ if variation not in variation_groups_order:
+ variation_groups_order.append(variation)
+
+ for group_key in variation_groups_order:
+ group_key_cap = group_key.replace('-',' ').capitalize()
+ if not group_key.startswith('*'):
+ y = ['']
+
+ index = 0
+ selected_index = 0
+ for variation_key in sorted(variation_groups[group_key]):
+ index += 1
+ y.append(variation_key)
+ if variation_key in default_variations:
+ selected_index=index
+
+ st_variations['~'+group_key] = st.selectbox(group_key_cap, sorted(y), index=selected_index, key='~'+group_key)
+ elif group_key == '*no-group*':
+ for variation_key in sorted(variation_groups[group_key]):
+ x = False
+ if variation_key in default_variations:
+ x=True
+ st_variations['#'+variation_key] = st.checkbox(variation_key.capitalize(), key='#'+variation_key, value=x)
# Prepare inputs
@@ -293,7 +299,7 @@ def main():
if y!='':
x+=y
- st.text_area('**Install [CM interface](https://github.com/mlcommons/ck) with a few dependencies:**', x, height=170)
+ st.text_area('**Install [MLCommons CM](https://github.com/mlcommons/ck/blob/master/docs/installation.md) with a few dependencies:**', x, height=170)
st.markdown("**Run CM script from Python:**")
diff --git a/cm-mlops/script/gui/graph.py b/cm-mlops/script/gui/graph.py
index 6d779e2d4c..409e350c5b 100644
--- a/cm-mlops/script/gui/graph.py
+++ b/cm-mlops/script/gui/graph.py
@@ -60,14 +60,8 @@ def __init__(self, points, targets=None):
def main():
- compatibility = False
- try:
- params = st.query_params
- except:
- compatibility = True
-
- if compatibility:
- params = st.experimental_get_query_params()
+
+ params = misc.get_params(st)
# Set title
st.title('CM experiment visualization')
diff --git a/cm-mlops/script/gui/misc.py b/cm-mlops/script/gui/misc.py
index a72e369b09..757965c2c5 100644
--- a/cm-mlops/script/gui/misc.py
+++ b/cm-mlops/script/gui/misc.py
@@ -1,5 +1,6 @@
# Support functions
+##########################################################
def make_url(name, alias='', action='contributors', key='name', md=True):
import urllib
@@ -17,6 +18,7 @@ def make_url(name, alias='', action='contributors', key='name', md=True):
return md
+##########################################################
def convert_date(date):
# date: format YYYYMMDD to YYYY month day
@@ -30,3 +32,23 @@ def convert_date(date):
return {'return':1, 'error':'date "{}" is not of format YYYYMMDD: {}'.format(date, format(e))}
return {'return':0, 'string':year+' '+month+' '+day}
+
+##########################################################
+def get_params(st):
+ compatibility = False
+
+ try:
+ params2 = st.query_params
+ # Convert to old style
+ params = {}
+ for k in params2:
+ v = params2[k]
+ if type(v)!=list:
+ params[k]=[v]
+ except:
+ compatibility = True
+
+ if compatibility:
+ params = st.experimental_get_query_params()
+
+ return params
diff --git a/cm-mlops/script/gui/playground.py b/cm-mlops/script/gui/playground.py
index 081bd48341..f994e6eb51 100644
--- a/cm-mlops/script/gui/playground.py
+++ b/cm-mlops/script/gui/playground.py
@@ -13,14 +13,7 @@ def main():
st.set_page_config(layout="wide",
menu_items={})
- compatibility = False
- try:
- params = st.query_params
- except:
- compatibility = True
-
- if compatibility:
- params = st.experimental_get_query_params()
+ params = misc.get_params(st)
# Set style
# Green: background:#7fcf6f;
@@ -56,8 +49,8 @@ def main():
st.write('''
Collective Knowledge Playground
-
-
+
+ Collaborative Benchmarking and Optimization of AI Systems and Applications
{}
@@ -68,6 +61,7 @@ def main():
# Check action and basic menu
action = params.get('action',['contributors'])[0].lower()
+ style_action_howtorun='font-style:italic;font-weight:bold;color:#ffffff' if action=='howtorun' else ''
style_action_challenges='font-style:italic;font-weight:bold;color:#ffffff' if action=='challenges' else ''
style_action_experiments='font-style:italic;font-weight:bold;color:#ffffff' if action=='experiments' else ''
style_action_contributors='font-style:italic;font-weight:bold;color:#ffffff' if action=='contributors' else ''
@@ -76,6 +70,7 @@ def main():
st.write('''
+
@@ -85,6 +80,7 @@ def main():
'''.format(
+ style_action_howtorun,
style_action_contributors,
style_action_challenges,
style_action_experiments,
@@ -100,7 +96,10 @@ def main():
r={'return':0}
- if action == 'challenges':
+ if action == 'howtorun':
+ from playground_howtorun import page
+ r = page(st, params)
+ elif action == 'challenges':
from playground_challenges import page
r = page(st, params)
elif action == 'experiments':
diff --git a/cm-mlops/script/gui/playground_challenges.py b/cm-mlops/script/gui/playground_challenges.py
index f121e317e3..2b61101e36 100644
--- a/cm-mlops/script/gui/playground_challenges.py
+++ b/cm-mlops/script/gui/playground_challenges.py
@@ -125,12 +125,13 @@ def page(st, params):
x = '''
Ongoing reproducibility and optimization challenges