From 05f409c4fe9af9d13c32ff16b076cce20f3ca1f8 Mon Sep 17 00:00:00 2001 From: <> Date: Fri, 20 Dec 2024 15:11:18 +0000 Subject: [PATCH] Deployed b607edb with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 607 ++ _generator/api/conf.py | 248 + _generator/api/generate.bat | 16 + _generator/api/generate.sh | 11 + _generator/api/index.rst | 13 + _generator/api/static/css/custom.css | 6 + _generator/api/static/logo.png | Bin 0 -> 9949 bytes _generator/generate_api.bat | 4 + _generator/generate_api.sh | 4 + _generator/generate_toc.cmd | 38 + _generator/list_automations.cmd | 1 + _generator/list_scripts.cmd | 1 + _generator/list_scripts_test.cmd | 1 + archive/taskforce-2022/index.html | 962 +++ artifact-evaluation/checklist/index.html | 627 ++ artifact-evaluation/faq/index.html | 627 ++ .../HotCRP_Settings__Basics__PPoPP'19_AE.pdf | Bin 0 -> 99663 bytes ...otCRP_Settings__Decisions__PPoPP'19_AE.pdf | Bin 0 -> 133405 bytes ...HotCRP_Settings__Messages__PPoPP'19_AE.pdf | Bin 0 -> 100689 bytes ...CRP_Settings__Review_form__PPoPP'19_AE.pdf | Bin 0 -> 164107 bytes .../HotCRP_Settings__Reviews__PPoPP'19_AE.pdf | Bin 0 -> 142220 bytes ...Settings__Submission_form__PPoPP'19_AE.pdf | Bin 0 -> 103299 bytes ...CRP_Settings__Submissions__PPoPP'19_AE.pdf | Bin 0 -> 105277 bytes .../hotcrp-config-acm-ieee-micro-2023-ae.json | 869 +++ artifact-evaluation/hotcrp-config/index.html | 627 ++ .../image-994e7359d7760ab1-cropped.png | Bin 0 -> 74797 bytes .../image-general-workflow1.png | Bin 0 -> 42339 bytes artifact-evaluation/image-pipelines2.png | Bin 0 -> 33566 bytes artifact-evaluation/reviewing/index.html | 627 ++ artifact-evaluation/submission/index.html | 627 ++ artifact-evaluation/template/ae.tex | 1 + artifact-evaluation/template/sigplanconf.cls | 1311 ++++ assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.88dd0f4e.min.js | 16 + assets/javascripts/bundle.88dd0f4e.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.6ce7567c.min.js | 42 + .../workers/search.6ce7567c.min.js.map | 7 + assets/stylesheets/main.6f8fc17f.min.css | 1 + assets/stylesheets/main.6f8fc17f.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + cm/index.html | 777 ++ debugging/index.html | 673 ++ faq/index.html | 721 ++ getting-started/index.html | 1479 ++++ history/index.html | 655 ++ img/logo_v2.svg | 6 + index.html | 1177 +++ install/index.html | 863 +++ installation-cuda/index.html | 802 ++ installation/index.html | 909 +++ interface/index.html | 905 +++ introduction-ck/index.html | 668 ++ introduction-cm/index.html | 726 ++ list_of_automations/index.html | 989 +++ list_of_scripts/index.html | 1270 ++++ meetings/20240731/index.html | 735 ++ meetings/20240808/index.html | 645 ++ misc/ML/index.html | 635 ++ misc/MLOps/index.html | 714 ++ misc/history/index.html | 725 ++ misc/overview/index.html | 712 ++ mlperf-cm-automation-demo/index.html | 628 ++ mlperf-education-workgroup/index.html | 627 ++ mlperf/index.html | 649 ++ .../3d-unet/README_nvidia/index.html | 744 ++ .../3d-unet/README_reference/index.html | 720 ++ mlperf/inference/3d-unet/index.html | 711 ++ mlperf/inference/README_a100/index.html | 627 ++ mlperf/inference/Submission/index.html | 734 ++ .../all/README_nvidia_4090/index.html | 657 ++ .../all/README_nvidia_a100/index.html | 927 +++ .../bert/README_deepsparse/index.html | 747 ++ mlperf/inference/bert/README_intel/index.html | 709 ++ .../inference/bert/README_nvidia/index.html | 741 ++ .../inference/bert/README_qualcomm/index.html | 716 ++ .../bert/README_reference/index.html | 734 ++ mlperf/inference/bert/index.html | 743 ++ .../inference/bert/run_custom_onnx_models.sh | 43 + mlperf/inference/bert/run_sparse_models.sh | 51 + mlperf/inference/bert/tutorial/index.html | 980 +++ .../dlrm_v2/README_nvidia/index.html | 744 ++ .../dlrm_v2/README_reference/index.html | 727 ++ mlperf/inference/dlrm_v2/index.html | 711 ++ .../inference/gpt-j/README_intel/index.html | 768 ++ .../inference/gpt-j/README_nvidia/index.html | 745 ++ .../gpt-j/README_reference/index.html | 734 ++ mlperf/inference/gpt-j/index.html | 712 ++ mlperf/inference/index.html | 932 +++ .../llama2-70b/README_reference/index.html | 715 ++ mlperf/inference/llama2-70b/index.html | 710 ++ .../resnet50/README_nvidia/index.html | 727 ++ .../resnet50/README_reference/index.html | 717 ++ .../resnet50/README_tflite/index.html | 757 ++ mlperf/inference/resnet50/index.html | 725 ++ .../retinanet/README_nvidia/index.html | 740 ++ .../retinanet/README_reference/index.html | 732 ++ mlperf/inference/retinanet/index.html | 710 ++ .../inference/rnnt/README_nvidia/index.html | 740 ++ .../rnnt/README_reference/index.html | 714 ++ mlperf/inference/rnnt/index.html | 710 ++ .../README_reference/index.html | 715 ++ .../inference/stable-diffusion-xl/index.html | 709 ++ mlperf/setup/setup-aws-instance/index.html | 734 ++ mlperf/setup/setup-gcp-instance/index.html | 711 ++ .../setup/setup-nvidia-jetson-orin/index.html | 890 +++ mlperf/setup/setup-nvidia/index.html | 654 ++ news-mlperf-v3.1/index.html | 713 ++ news/index.html | 1249 +++ requirements.txt | 4 + search/search_index.json | 1 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes specs/cm-automation-script/index.html | 752 ++ specs/cm-cli/index.html | 1192 +++ specs/cm-diagram-v3.5.1.png | Bin 0 -> 331642 bytes specs/cm-python-interface/index.html | 654 ++ specs/cm-repository/index.html | 772 ++ specs/cm-tool-architecture/index.html | 679 ++ specs/index.html | 643 ++ taskforce/index.html | 841 +++ tests/index.html | 632 ++ tutorials/automate-mlperf-tiny/index.html | 863 +++ .../index.html | 1018 +++ tutorials/concept/index.html | 1296 ++++ tutorials/hello-world/index.html | 662 ++ tutorials/index.html | 650 ++ .../index.html | 855 +++ .../mlperf-inference-submission/index.html | 877 +++ .../mlperf-language-processing/index.html | 690 ++ .../modular-image-classification/index.html | 770 ++ tutorials/reproduce-mlperf-tiny/index.html | 806 ++ .../reproduce-mlperf-training/index.html | 709 ++ .../reproduce-research-paper-ipol/index.html | 739 ++ tutorials/sc22-scc-mlperf-part2/index.html | 1070 +++ tutorials/sc22-scc-mlperf-part3/index.html | 978 +++ tutorials/sc22-scc-mlperf/index.html | 1202 +++ tutorials/sc22-scc-mlperf2/index.html | 627 ++ tutorials/sc22-scc-mlperf3/index.html | 627 ++ .../scc23-mlperf-inference-bert/index.html | 636 ++ tutorials/scc24-mlperf-inference/index.html | 640 ++ tutorials/scripts/index.html | 741 ++ tutorials/test-spec-ptdaemon/index.html | 678 ++ 178 files changed, 88231 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 _generator/api/conf.py create mode 100644 _generator/api/generate.bat create mode 100644 _generator/api/generate.sh create mode 100644 _generator/api/index.rst create mode 100644 _generator/api/static/css/custom.css create mode 100644 _generator/api/static/logo.png create mode 100644 _generator/generate_api.bat create mode 100644 _generator/generate_api.sh create mode 100644 _generator/generate_toc.cmd create mode 100644 _generator/list_automations.cmd create mode 100644 _generator/list_scripts.cmd create mode 100644 _generator/list_scripts_test.cmd create mode 100644 archive/taskforce-2022/index.html create mode 100644 artifact-evaluation/checklist/index.html create mode 100644 artifact-evaluation/faq/index.html create mode 100644 artifact-evaluation/hotcrp-config/HotCRP_Settings__Basics__PPoPP'19_AE.pdf create mode 100644 artifact-evaluation/hotcrp-config/HotCRP_Settings__Decisions__PPoPP'19_AE.pdf create mode 100644 artifact-evaluation/hotcrp-config/HotCRP_Settings__Messages__PPoPP'19_AE.pdf create mode 100644 artifact-evaluation/hotcrp-config/HotCRP_Settings__Review_form__PPoPP'19_AE.pdf create mode 100644 artifact-evaluation/hotcrp-config/HotCRP_Settings__Reviews__PPoPP'19_AE.pdf create mode 100644 artifact-evaluation/hotcrp-config/HotCRP_Settings__Submission_form__PPoPP'19_AE.pdf create mode 100644 artifact-evaluation/hotcrp-config/HotCRP_Settings__Submissions__PPoPP'19_AE.pdf create mode 100644 artifact-evaluation/hotcrp-config/hotcrp-config-acm-ieee-micro-2023-ae.json create mode 100644 artifact-evaluation/hotcrp-config/index.html create mode 100644 artifact-evaluation/image-994e7359d7760ab1-cropped.png create mode 100644 artifact-evaluation/image-general-workflow1.png create mode 100644 artifact-evaluation/image-pipelines2.png create mode 100644 artifact-evaluation/reviewing/index.html create mode 100644 artifact-evaluation/submission/index.html create mode 100644 artifact-evaluation/template/ae.tex create mode 100644 artifact-evaluation/template/sigplanconf.cls create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.88dd0f4e.min.js create mode 100644 assets/javascripts/bundle.88dd0f4e.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js.map create mode 100644 assets/stylesheets/main.6f8fc17f.min.css create mode 100644 assets/stylesheets/main.6f8fc17f.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 cm/index.html create mode 100644 debugging/index.html create mode 100644 faq/index.html create mode 100644 getting-started/index.html create mode 100644 history/index.html create mode 100644 img/logo_v2.svg create mode 100644 index.html create mode 100644 install/index.html create mode 100644 installation-cuda/index.html create mode 100644 installation/index.html create mode 100644 interface/index.html create mode 100644 introduction-ck/index.html create mode 100644 introduction-cm/index.html create mode 100644 list_of_automations/index.html create mode 100644 list_of_scripts/index.html create mode 100644 meetings/20240731/index.html create mode 100644 meetings/20240808/index.html create mode 100644 misc/ML/index.html create mode 100644 misc/MLOps/index.html create mode 100644 misc/history/index.html create mode 100644 misc/overview/index.html create mode 100644 mlperf-cm-automation-demo/index.html create mode 100644 mlperf-education-workgroup/index.html create mode 100644 mlperf/index.html create mode 100644 mlperf/inference/3d-unet/README_nvidia/index.html create mode 100644 mlperf/inference/3d-unet/README_reference/index.html create mode 100644 mlperf/inference/3d-unet/index.html create mode 100644 mlperf/inference/README_a100/index.html create mode 100644 mlperf/inference/Submission/index.html create mode 100644 mlperf/inference/all/README_nvidia_4090/index.html create mode 100644 mlperf/inference/all/README_nvidia_a100/index.html create mode 100644 mlperf/inference/bert/README_deepsparse/index.html create mode 100644 mlperf/inference/bert/README_intel/index.html create mode 100644 mlperf/inference/bert/README_nvidia/index.html create mode 100644 mlperf/inference/bert/README_qualcomm/index.html create mode 100644 mlperf/inference/bert/README_reference/index.html create mode 100644 mlperf/inference/bert/index.html create mode 100644 mlperf/inference/bert/run_custom_onnx_models.sh create mode 100644 mlperf/inference/bert/run_sparse_models.sh create mode 100644 mlperf/inference/bert/tutorial/index.html create mode 100644 mlperf/inference/dlrm_v2/README_nvidia/index.html create mode 100644 mlperf/inference/dlrm_v2/README_reference/index.html create mode 100644 mlperf/inference/dlrm_v2/index.html create mode 100644 mlperf/inference/gpt-j/README_intel/index.html create mode 100644 mlperf/inference/gpt-j/README_nvidia/index.html create mode 100644 mlperf/inference/gpt-j/README_reference/index.html create mode 100644 mlperf/inference/gpt-j/index.html create mode 100644 mlperf/inference/index.html create mode 100644 mlperf/inference/llama2-70b/README_reference/index.html create mode 100644 mlperf/inference/llama2-70b/index.html create mode 100644 mlperf/inference/resnet50/README_nvidia/index.html create mode 100644 mlperf/inference/resnet50/README_reference/index.html create mode 100644 mlperf/inference/resnet50/README_tflite/index.html create mode 100644 mlperf/inference/resnet50/index.html create mode 100644 mlperf/inference/retinanet/README_nvidia/index.html create mode 100644 mlperf/inference/retinanet/README_reference/index.html create mode 100644 mlperf/inference/retinanet/index.html create mode 100644 mlperf/inference/rnnt/README_nvidia/index.html create mode 100644 mlperf/inference/rnnt/README_reference/index.html create mode 100644 mlperf/inference/rnnt/index.html create mode 100644 mlperf/inference/stable-diffusion-xl/README_reference/index.html create mode 100644 mlperf/inference/stable-diffusion-xl/index.html create mode 100644 mlperf/setup/setup-aws-instance/index.html create mode 100644 mlperf/setup/setup-gcp-instance/index.html create mode 100644 mlperf/setup/setup-nvidia-jetson-orin/index.html create mode 100644 mlperf/setup/setup-nvidia/index.html create mode 100644 news-mlperf-v3.1/index.html create mode 100644 news/index.html create mode 100644 requirements.txt create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 specs/cm-automation-script/index.html create mode 100644 specs/cm-cli/index.html create mode 100644 specs/cm-diagram-v3.5.1.png create mode 100644 specs/cm-python-interface/index.html create mode 100644 specs/cm-repository/index.html create mode 100644 specs/cm-tool-architecture/index.html create mode 100644 specs/index.html create mode 100644 taskforce/index.html create mode 100644 tests/index.html create mode 100644 tutorials/automate-mlperf-tiny/index.html create mode 100644 tutorials/common-interface-to-reproduce-research-projects/index.html create mode 100644 tutorials/concept/index.html create mode 100644 tutorials/hello-world/index.html create mode 100644 tutorials/index.html create mode 100644 tutorials/mlperf-inference-power-measurement/index.html create mode 100644 tutorials/mlperf-inference-submission/index.html create mode 100644 tutorials/mlperf-language-processing/index.html create mode 100644 tutorials/modular-image-classification/index.html create mode 100644 tutorials/reproduce-mlperf-tiny/index.html create mode 100644 tutorials/reproduce-mlperf-training/index.html create mode 100644 tutorials/reproduce-research-paper-ipol/index.html create mode 100644 tutorials/sc22-scc-mlperf-part2/index.html create mode 100644 tutorials/sc22-scc-mlperf-part3/index.html create mode 100644 tutorials/sc22-scc-mlperf/index.html create mode 100644 tutorials/sc22-scc-mlperf2/index.html create mode 100644 tutorials/sc22-scc-mlperf3/index.html create mode 100644 tutorials/scc23-mlperf-inference-bert/index.html create mode 100644 tutorials/scc24-mlperf-inference/index.html create mode 100644 tutorials/scripts/index.html create mode 100644 tutorials/test-spec-ptdaemon/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000000..e69de29bb2 diff --git a/404.html b/404.html new file mode 100644 index 0000000000..9c3f12403e --- /dev/null +++ b/404.html @@ -0,0 +1,607 @@ + + + +
+ + + + + + + + + + + + + + +v`Dpb_-v1=S6z_y>pFUIw%|5z%-ZVBobGiLPUB=8fYP-Sn%`Y5e|Tq [ Back to index ] Following our successful community submission to MLPerf inference v3.0,
+we will set up new weekly conf-calls shortly - please stay tuned for more details! Please add your topics for discussion in the meeting notes
+or via GitHub tickets. Please join our mailing list here. See our R&D roadmap for Q4 2022 and Q1 2023 MLCommons is a non-profit consortium of 50+ companies that was originally created
+to develop a common, reproducible and fair benchmarking methodology for new AI and ML hardware. MLCommons has developed an open-source reusable module called loadgen
+that efficiently and fairly measures the performance of inference systems.
+It generates traffic for scenarios that were formulated by a diverse set of experts from MLCommons
+to emulate the workloads seen in mobile devices, autonomous vehicles, robotics, and cloud-based setups. MLCommons has also prepared several reference ML tasks, models and datasets
+for vision, recommendation, language processing and speech recognition
+to let companies benchmark and compare their new hardware in terms of accuracy, latency, throughput and energy
+in a reproducible way twice a year. The first goal of this open automation and reproducibility taskforce is to
+develop a light-weight and open-source automation meta-framework
+that can make MLOps and DevOps more interoperable, reusable, portable,
+deterministic and reproducible. We then use this automation meta-framework to develop plug&play workflows
+for the MLPerf benchmarks to make it easier for the newcomers to run them
+across diverse hardware, software and data and automatically plug in
+their own ML tasks, models, data sets, engines, libraries and tools. Another goal is to use these portable MLPerf workflows to help students, researchers and
+engineers participate in crowd-benchmarking and exploration of the design space tradeoffs
+(accuracy, latency, throughput, energy, size, etc.) of their ML Systems from the cloud to the
+edge using the mature MLPerf methodology while automating the submission
+of their Pareto-efficient configurations to the open division of the MLPerf
+inference benchmark. The final goal is to help end-users reproduce MLPerf results
+and deploy the most suitable ML/SW/HW stacks in production
+based on their requirements and constraints. This MLCommons taskforce is developing an open-source and technology-neutral
+Collective Mind meta-framework (CM)
+to modularize ML Systems and automate their benchmarking, optimization
+and design space exploration across continuously changing software, hardware and data. CM is the second generation of the MLCommons CK workflow automation framework
+that was originally developed to make it easier to reproduce research papers and validate them in the real world. As a proof-of-concept, this technology was successfully used to automate
+MLPerf benchmarking and submissions
+from Qualcomm, HPE, Dell, Lenovo, dividiti, Krai, the cTuning foundation and OctoML.
+For example, it was used and extended by Arjun Suresh
+with several other engineers to automate the record-breaking MLPerf inference benchmark submission for Qualcomm AI 100 devices. The goal of this group is to help users automate all the steps to prepare and run MLPerf benchmarks
+across any ML models, data sets, frameworks, compilers and hardware
+using the MLCommons CM framework. Here is an example of current manual and error-prone MLPerf benchmark preparation steps: Here is the concept of CM-based automated workflows: We have finished prototyping the new CM framework in summer 2022 based on the feedback of CK users
+and successfully used it to modularize MLPerf and automate the submission of benchmarking results to the MLPerf inference v2.1.
+See this tutorial for more details. We continue developing CM as an open-source educational toolkit
+to help the community learn how to modularize, crowd-benchmark, optimize and deploy
+Pareto-efficient ML Systems based on the mature MLPerf methodology and portable CM scripts -
+please check the deliverables section to keep track of our community developments
+and do not hesitate to join this community effort! See our R&D roadmap for Q4 2022 and Q1 2023 HPCA'22 presentation "MLPerf design space exploration and production deployment" Tools: Google Drive (public access) This project is supported by MLCommons, OctoML
+and many great contributors. Moved to https://github.com/ctuning/artifact-evaluation/blob/master/docs/checklist.md Moved to https://github.com/ctuning/artifact-evaluation/blob/master/docs/faq.mdml
z8*w4%jeJcYKabDbJvf%uBsB71N7`cC_-(4^avGiwwTX|0;6P_}{~WQUEdA2M<7#e2
zqZ=vhiF%!!Dq+y5Vb3W5tacfReCPHWf^NKo!ym#3r^W_R2gA?nGN1udA0c9r#p1R5z
axw$I&S`xt-Ux
z3BtG6(W6~NhM4>uF>W-;R_Br!KmOv$oCHMt_7?rcg?Zs-+G`f`(($3*z0>+*@SDrv
zh)=@0t=f^$b^I~yGL<(P#n;Jn_Vbp?vehDz#(kp3%!{S0m8`5&3)5(U-l8wEv)11F
zLMY#QFkjgNT@m>DRzKavr9FA@WD56c$;f^xG%kL1N9zl(J);8j>66_v=6v_Y@`#Zi
zW@n##K=?R;ztb%ya5G(?>Wy^g7VL&Yh#jbcBM0pqXuD4&KxCsl?2z$5h-WZbTJlrO
zqU*jiI+6^MKFQjd6n|VL9^4SYj{}e5KQOF~{<8s=B9z97Sx89dTwggNlW+g(8=ZjJ
z6RhN}IRcOKbI7Y(-!Rl~*R;td>bcp2^VSNwj~Dfvt&P5EFqqG$_iN?b2zSZk8+4@l
zbYg7}K@-I_S4&X3DQf<5^%>%)HbG8dQ!m{Y(=(L-4(`E
+
+
+
+
+
+
+
+ MLCommons Taskforce on Education and Reproducibility
+Mission
+
+
+Co-chairs and tech leads
+
+
+Discord server
+
+
+Meeting notes and news
+
+
+Conf-calls
+Mailing list
+GUI for MLPerf inference
+
+On-going projects
+
+
+Purpose
+Technology
+Agenda
+2022
+
+
+2023
+
+
+Resources
+
+
+Acknowledgments
+
+
+
+
+
+
+
+
+ Checklist
+
+
+
+
+
+
+
+
+
+ Faq
+
+%20ee$gE