diff --git a/.all-contributorsrc b/.all-contributorsrc
index ca869029..aa0f1857 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -13,6 +13,13 @@
"profile": "https://github.com/profvjreddi",
"contributions": []
},
+ {
+ "login": "jasonjabbour",
+ "name": "jasonjabbour",
+ "avatar_url": "https://avatars.githubusercontent.com/jasonjabbour",
+ "profile": "https://github.com/jasonjabbour",
+ "contributions": []
+ },
{
"login": "uchendui",
"name": "Ikechukwu Uchendu",
@@ -28,10 +35,17 @@
"contributions": []
},
{
- "login": "jasonjabbour",
- "name": "jasonjabbour",
- "avatar_url": "https://avatars.githubusercontent.com/jasonjabbour",
- "profile": "https://github.com/jasonjabbour",
+ "login": "Mjrovai",
+ "name": "Marcelo Rovai",
+ "avatar_url": "https://avatars.githubusercontent.com/Mjrovai",
+ "profile": "https://github.com/Mjrovai",
+ "contributions": []
+ },
+ {
+ "login": "Sara-Khosravi",
+ "name": "Sara Khosravi",
+ "avatar_url": "https://avatars.githubusercontent.com/Sara-Khosravi",
+ "profile": "https://github.com/Sara-Khosravi",
"contributions": []
},
{
@@ -49,10 +63,10 @@
"contributions": []
},
{
- "login": "Mjrovai",
- "name": "Marcelo Rovai",
- "avatar_url": "https://avatars.githubusercontent.com/Mjrovai",
- "profile": "https://github.com/Mjrovai",
+ "login": "kai4avaya",
+ "name": "Kai Kleinbard",
+ "avatar_url": "https://avatars.githubusercontent.com/kai4avaya",
+ "profile": "https://github.com/kai4avaya",
"contributions": []
},
{
@@ -62,13 +76,6 @@
"profile": "https://github.com/eliasab16",
"contributions": []
},
- {
- "login": "kai4avaya",
- "name": "kai4avaya",
- "avatar_url": "https://avatars.githubusercontent.com/kai4avaya",
- "profile": "https://github.com/kai4avaya",
- "contributions": []
- },
{
"login": "JaredP94",
"name": "Jared Ping",
@@ -104,13 +111,6 @@
"profile": "https://github.com/jaysonzlin",
"contributions": []
},
- {
- "login": "Sara-Khosravi",
- "name": "Sara Khosravi",
- "avatar_url": "https://avatars.githubusercontent.com/Sara-Khosravi",
- "profile": "https://github.com/Sara-Khosravi",
- "contributions": []
- },
{
"login": "sophiacho1",
"name": "Sophia Cho",
@@ -146,13 +146,6 @@
"profile": "https://github.com/korneelf1",
"contributions": []
},
- {
- "login": "zishenwan",
- "name": "Zishen Wan",
- "avatar_url": "https://avatars.githubusercontent.com/zishenwan",
- "profile": "https://github.com/zishenwan",
- "contributions": []
- },
{
"login": "colbybanbury",
"name": "Colby Banbury",
@@ -161,10 +154,10 @@
"contributions": []
},
{
- "login": "DivyaAmirtharaj",
- "name": "Divya Amirtharaj",
- "avatar_url": "https://avatars.githubusercontent.com/DivyaAmirtharaj",
- "profile": "https://github.com/DivyaAmirtharaj",
+ "login": "zishenwan",
+ "name": "Zishen Wan",
+ "avatar_url": "https://avatars.githubusercontent.com/zishenwan",
+ "profile": "https://github.com/zishenwan",
"contributions": []
},
{
@@ -182,10 +175,17 @@
"contributions": []
},
{
- "login": "James-QiuHaoran",
- "name": "Haoran Qiu",
- "avatar_url": "https://avatars.githubusercontent.com/James-QiuHaoran",
- "profile": "https://github.com/James-QiuHaoran",
+ "login": "DivyaAmirtharaj",
+ "name": "Divya Amirtharaj",
+ "avatar_url": "https://avatars.githubusercontent.com/DivyaAmirtharaj",
+ "profile": "https://github.com/DivyaAmirtharaj",
+ "contributions": []
+ },
+ {
+ "login": "eezike",
+ "name": "Emeka Ezike",
+ "avatar_url": "https://avatars.githubusercontent.com/eezike",
+ "profile": "https://github.com/eezike",
"contributions": []
},
{
@@ -195,6 +195,13 @@
"profile": "https://github.com/aptl26",
"contributions": []
},
+ {
+ "login": "James-QiuHaoran",
+ "name": "Haoran Qiu",
+ "avatar_url": "https://avatars.githubusercontent.com/James-QiuHaoran",
+ "profile": "https://github.com/James-QiuHaoran",
+ "contributions": []
+ },
{
"login": "arnaumarin",
"name": "marin-llobet",
@@ -203,17 +210,17 @@
"contributions": []
},
{
- "login": "MichaelSchnebly",
- "name": "Michael Schnebly",
- "avatar_url": "https://avatars.githubusercontent.com/MichaelSchnebly",
- "profile": "https://github.com/MichaelSchnebly",
+ "login": "Ekhao",
+ "name": "Emil Njor",
+ "avatar_url": "https://avatars.githubusercontent.com/Ekhao",
+ "profile": "https://github.com/Ekhao",
"contributions": []
},
{
- "login": "oishib",
- "name": "oishib",
- "avatar_url": "https://avatars.githubusercontent.com/oishib",
- "profile": "https://github.com/oishib",
+ "login": "AditiR-42",
+ "name": "Aditi Raju",
+ "avatar_url": "https://avatars.githubusercontent.com/AditiR-42",
+ "profile": "https://github.com/AditiR-42",
"contributions": []
},
{
@@ -224,10 +231,17 @@
"contributions": []
},
{
- "login": "AditiR-42",
- "name": "Aditi Raju",
- "avatar_url": "https://avatars.githubusercontent.com/AditiR-42",
- "profile": "https://github.com/AditiR-42",
+ "login": "MichaelSchnebly",
+ "name": "Michael Schnebly",
+ "avatar_url": "https://avatars.githubusercontent.com/MichaelSchnebly",
+ "profile": "https://github.com/MichaelSchnebly",
+ "contributions": []
+ },
+ {
+ "login": "oishib",
+ "name": "oishib",
+ "avatar_url": "https://avatars.githubusercontent.com/oishib",
+ "profile": "https://github.com/oishib",
"contributions": []
},
{
@@ -237,13 +251,6 @@
"profile": "https://github.com/ELSuitorHarvard",
"contributions": []
},
- {
- "login": "Ekhao",
- "name": "Emil Njor",
- "avatar_url": "https://avatars.githubusercontent.com/Ekhao",
- "profile": "https://github.com/Ekhao",
- "contributions": []
- },
{
"login": "BaeHenryS",
"name": "Henry Bae",
@@ -251,6 +258,13 @@
"profile": "https://github.com/BaeHenryS",
"contributions": []
},
+ {
+ "login": "jaywonchung",
+ "name": "Jae-Won Chung",
+ "avatar_url": "https://avatars.githubusercontent.com/jaywonchung",
+ "profile": "https://github.com/jaywonchung",
+ "contributions": []
+ },
{
"login": "leo47007",
"name": "Yu-Shun Hsiao",
@@ -266,24 +280,10 @@
"contributions": []
},
{
- "login": "jaywonchung",
- "name": "Jae-Won Chung",
- "avatar_url": "https://avatars.githubusercontent.com/jaywonchung",
- "profile": "https://github.com/jaywonchung",
- "contributions": []
- },
- {
- "login": "ShvetankPrakash",
- "name": "Shvetank Prakash",
- "avatar_url": "https://avatars.githubusercontent.com/ShvetankPrakash",
- "profile": "https://github.com/ShvetankPrakash",
- "contributions": []
- },
- {
- "login": "pongtr",
- "name": "Pong Trairatvorakul",
- "avatar_url": "https://avatars.githubusercontent.com/pongtr",
- "profile": "https://github.com/pongtr",
+ "login": "marcozennaro",
+ "name": "Marco Zennaro",
+ "avatar_url": "https://avatars.githubusercontent.com/marcozennaro",
+ "profile": "https://github.com/marcozennaro",
"contributions": []
},
{
@@ -300,6 +300,13 @@
"profile": "https://github.com/arbass22",
"contributions": []
},
+ {
+ "login": "pongtr",
+ "name": "Pong Trairatvorakul",
+ "avatar_url": "https://avatars.githubusercontent.com/pongtr",
+ "profile": "https://github.com/pongtr",
+ "contributions": []
+ },
{
"login": "jzhou1318",
"name": "Jennifer Zhou",
@@ -308,17 +315,24 @@
"contributions": []
},
{
- "login": "marcozennaro",
- "name": "Marco Zennaro",
- "avatar_url": "https://avatars.githubusercontent.com/marcozennaro",
- "profile": "https://github.com/marcozennaro",
+ "login": "ShvetankPrakash",
+ "name": "Shvetank Prakash",
+ "avatar_url": "https://avatars.githubusercontent.com/ShvetankPrakash",
+ "profile": "https://github.com/ShvetankPrakash",
"contributions": []
},
{
- "login": "Emeka Ezike",
- "name": "Emeka Ezike",
- "avatar_url": "https://www.gravatar.com/avatar/af39c27c6090c50a1921a9b6366e81cc?d=identicon&s=100",
- "profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
+ "login": "alex-oesterling",
+ "name": "Alex Oesterling",
+ "avatar_url": "https://avatars.githubusercontent.com/alex-oesterling",
+ "profile": "https://github.com/alex-oesterling",
+ "contributions": []
+ },
+ {
+ "login": "aryatschand",
+ "name": "Arya Tschand",
+ "avatar_url": "https://avatars.githubusercontent.com/aryatschand",
+ "profile": "https://github.com/aryatschand",
"contributions": []
},
{
@@ -328,13 +342,6 @@
"profile": "https://github.com/BrunoScaglione",
"contributions": []
},
- {
- "login": "Allen-Kuang",
- "name": "Allen-Kuang",
- "avatar_url": "https://avatars.githubusercontent.com/Allen-Kuang",
- "profile": "https://github.com/Allen-Kuang",
- "contributions": []
- },
{
"login": "Gjain234",
"name": "Gauri Jain",
@@ -342,6 +349,13 @@
"profile": "https://github.com/Gjain234",
"contributions": []
},
+ {
+ "login": "Allen-Kuang",
+ "name": "Allen-Kuang",
+ "avatar_url": "https://avatars.githubusercontent.com/Allen-Kuang",
+ "profile": "https://github.com/Allen-Kuang",
+ "contributions": []
+ },
{
"login": "FinAminToastCrunch",
"name": "Fin Amin",
@@ -356,6 +370,13 @@
"profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
"contributions": []
},
+ {
+ "login": "vitasam",
+ "name": "The Random DIY",
+ "avatar_url": "https://avatars.githubusercontent.com/vitasam",
+ "profile": "https://github.com/vitasam",
+ "contributions": []
+ },
{
"login": "gnodipac886",
"name": "gnodipac886",
@@ -363,13 +384,6 @@
"profile": "https://github.com/gnodipac886",
"contributions": []
},
- {
- "login": "alex-oesterling",
- "name": "Alex Oesterling",
- "avatar_url": "https://avatars.githubusercontent.com/alex-oesterling",
- "profile": "https://github.com/alex-oesterling",
- "contributions": []
- },
{
"login": "serco425",
"name": "Sercan Ayg\u00fcn",
@@ -378,24 +392,31 @@
"contributions": []
},
{
- "login": "emmanuel2406",
- "name": "Emmanuel Rassou",
- "avatar_url": "https://avatars.githubusercontent.com/emmanuel2406",
- "profile": "https://github.com/emmanuel2406",
+ "login": "BravoBaldo",
+ "name": "Baldassarre Cesarano",
+ "avatar_url": "https://avatars.githubusercontent.com/BravoBaldo",
+ "profile": "https://github.com/BravoBaldo",
"contributions": []
},
{
- "login": "jasonlyik",
- "name": "Jason Yik",
- "avatar_url": "https://avatars.githubusercontent.com/jasonlyik",
- "profile": "https://github.com/jasonlyik",
+ "login": "AbenezerKb",
+ "name": "Abenezer",
+ "avatar_url": "https://avatars.githubusercontent.com/AbenezerKb",
+ "profile": "https://github.com/AbenezerKb",
"contributions": []
},
{
- "login": "abigailswallow",
- "name": "abigailswallow",
- "avatar_url": "https://avatars.githubusercontent.com/abigailswallow",
- "profile": "https://github.com/abigailswallow",
+ "login": "bilgeacun",
+ "name": "Bilge Acun",
+ "avatar_url": "https://avatars.githubusercontent.com/bilgeacun",
+ "profile": "https://github.com/bilgeacun",
+ "contributions": []
+ },
+ {
+ "login": "YLab-UChicago",
+ "name": "yanjingl",
+ "avatar_url": "https://avatars.githubusercontent.com/YLab-UChicago",
+ "profile": "https://github.com/YLab-UChicago",
"contributions": []
},
{
@@ -406,10 +427,17 @@
"contributions": []
},
{
- "login": "bilgeacun",
- "name": "Bilge Acun",
- "avatar_url": "https://avatars.githubusercontent.com/bilgeacun",
- "profile": "https://github.com/bilgeacun",
+ "login": "abigailswallow",
+ "name": "abigailswallow",
+ "avatar_url": "https://avatars.githubusercontent.com/abigailswallow",
+ "profile": "https://github.com/abigailswallow",
+ "contributions": []
+ },
+ {
+ "login": "jasonlyik",
+ "name": "Jason Yik",
+ "avatar_url": "https://avatars.githubusercontent.com/jasonlyik",
+ "profile": "https://github.com/jasonlyik",
"contributions": []
},
{
@@ -420,17 +448,24 @@
"contributions": []
},
{
- "login": "jessicaquaye",
- "name": "Jessica Quaye",
- "avatar_url": "https://avatars.githubusercontent.com/jessicaquaye",
- "profile": "https://github.com/jessicaquaye",
+ "login": "ciyer64",
+ "name": "Curren Iyer",
+ "avatar_url": "https://avatars.githubusercontent.com/ciyer64",
+ "profile": "https://github.com/ciyer64",
"contributions": []
},
{
- "login": "vitasam",
- "name": "The Random DIY",
- "avatar_url": "https://avatars.githubusercontent.com/vitasam",
- "profile": "https://github.com/vitasam",
+ "login": "emmanuel2406",
+ "name": "Emmanuel Rassou",
+ "avatar_url": "https://avatars.githubusercontent.com/emmanuel2406",
+ "profile": "https://github.com/emmanuel2406",
+ "contributions": []
+ },
+ {
+ "login": "skmur",
+ "name": "Sonia Murthy",
+ "avatar_url": "https://avatars.githubusercontent.com/skmur",
+ "profile": "https://github.com/skmur",
"contributions": []
},
{
@@ -441,23 +476,23 @@
"contributions": []
},
{
- "login": "skmur",
- "name": "Sonia Murthy",
- "avatar_url": "https://avatars.githubusercontent.com/skmur",
- "profile": "https://github.com/skmur",
+ "login": "jessicaquaye",
+ "name": "Jessica Quaye",
+ "avatar_url": "https://avatars.githubusercontent.com/jessicaquaye",
+ "profile": "https://github.com/jessicaquaye",
"contributions": []
},
{
- "login": "Costin-Andrei Oncescu",
- "name": "Costin-Andrei Oncescu",
- "avatar_url": "https://www.gravatar.com/avatar/fc4f3460cdfb9365ab59bdeafb06413e?d=identicon&s=100",
- "profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
+ "login": "vijay-edu",
+ "name": "Vijay Edupuganti",
+ "avatar_url": "https://avatars.githubusercontent.com/vijay-edu",
+ "profile": "https://github.com/vijay-edu",
"contributions": []
},
{
- "login": "Baldassarre Cesarano",
- "name": "Baldassarre Cesarano",
- "avatar_url": "https://www.gravatar.com/avatar/13b816dd84837bb4700a55f47a70763e?d=identicon&s=100",
+ "login": "Costin-Andrei Oncescu",
+ "name": "Costin-Andrei Oncescu",
+ "avatar_url": "https://www.gravatar.com/avatar/fc4f3460cdfb9365ab59bdeafb06413e?d=identicon&s=100",
"profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
"contributions": []
},
@@ -468,13 +503,6 @@
"profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
"contributions": []
},
- {
- "login": "Vijay Edupuganti",
- "name": "Vijay Edupuganti",
- "avatar_url": "https://www.gravatar.com/avatar/b15b6e0e9adf58099905c1a0fd474cb9?d=identicon&s=100",
- "profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
- "contributions": []
- },
{
"login": "Jothi Ramaswamy",
"name": "Jothi Ramaswamy",
@@ -489,13 +517,6 @@
"profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
"contributions": []
},
- {
- "login": "Curren Iyer",
- "name": "Curren Iyer",
- "avatar_url": "https://www.gravatar.com/avatar/bd53d146aa888548c8db4da02bf81e7a?d=identicon&s=100",
- "profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
- "contributions": []
- },
{
"login": "Fatima Shah",
"name": "Fatima Shah",
@@ -503,13 +524,6 @@
"profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
"contributions": []
},
- {
- "login": "yanjingl",
- "name": "yanjingl",
- "avatar_url": "https://www.gravatar.com/avatar/f5d58ba6aa9b00189d4c018d370e8f43?d=identicon&s=100",
- "profile": "https://github.com/harvard-edge/cs249r_book/graphs/contributors",
- "contributions": []
- },
{
"login": "a-saraf",
"name": "a-saraf",
diff --git a/Machine-Learning-Systems.log b/Machine-Learning-Systems.log
new file mode 100644
index 00000000..db5e5212
--- /dev/null
+++ b/Machine-Learning-Systems.log
@@ -0,0 +1,647 @@
+This is pdfTeX, Version 3.141592653-2.6-1.40.26 (TeX Live 2024/Homebrew) (preloaded format=pdflatex 2024.3.20) 3 NOV 2024 15:47
+entering extended mode
+ restricted \write18 enabled.
+ %&-line parsing enabled.
+**Machine-Learning-Systems.tex
+(./Machine-Learning-Systems.tex
+LaTeX2e <2023-11-01> patch level 1
+L3 programming layer <2024-02-20>
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/scrb
+ook.cls
+Document Class: scrbook 2023/07/07 v3.41 KOMA-Script document class (book)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/scrk
+base.sty
+Package: scrkbase 2023/07/07 v3.41 KOMA-Script package (KOMA-Script-dependent b
+asics and keyval usage)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/scrb
+ase.sty
+Package: scrbase 2023/07/07 v3.41 KOMA-Script package (KOMA-Script-independent
+basics and keyval usage)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/scrl
+file.sty
+Package: scrlfile 2023/07/07 v3.41 KOMA-Script package (file load hooks)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/scrl
+file-hook.sty
+Package: scrlfile-hook 2023/07/07 v3.41 KOMA-Script package (using LaTeX hooks)
+
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/scrl
+ogo.sty
+Package: scrlogo 2023/07/07 v3.41 KOMA-Script package (logo)
+)))
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/graphics/keyval.
+sty
+Package: keyval 2022/05/29 v1.15 key=value parser (DPC)
+\KV@toks@=\toks17
+)
+Applying: [2021/05/01] Usage of raw or classic option list on input line 252.
+Already applied: [0000/00/00] Usage of raw or classic option list on input line
+ 368.
+))
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/tocb
+asic.sty
+Package: tocbasic 2023/07/07 v3.41 KOMA-Script package (handling toc-files)
+\scr@dte@tocline@numberwidth=\skip48
+\scr@dte@tocline@numbox=\box51
+)
+Package tocbasic Info: omitting babel extension for `toc'
+(tocbasic) because of feature `nobabel' available
+(tocbasic) for `toc' on input line 135.
+Package scrbook Info: You've used standard option `10pt'.
+(scrbook) This is correct!
+(scrbook) Internally I'm using `fontsize=10pt'.
+(scrbook) If you'd like to set the option with \KOMAoptions,
+(scrbook) you'd have to use `fontsize=10pt' there
+(scrbook) instead of `10pt', too.
+Class scrbook Info: File `scrsize10pt.clo' used to setup font sizes on input li
+ne 2691.
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/scrs
+ize10pt.clo
+File: scrsize10pt.clo 2023/07/07 v3.41 KOMA-Script font size class option (10pt
+)
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/koma-script/type
+area.sty
+Package: typearea 2023/07/07 v3.41 KOMA-Script package (type area)
+\ta@bcor=\skip49
+\ta@div=\count188
+Package typearea Info: You've used standard option `letterpaper'.
+(typearea) This is correct!
+(typearea) Internally I'm using `paper=letter'.
+(typearea) If you'd like to set the option with \KOMAoptions,
+(typearea) you'd have to use `paper=letter' there
+(typearea) instead of `letterpaper', too.
+\ta@hblk=\skip50
+\ta@vblk=\skip51
+\ta@temp=\skip52
+\footheight=\skip53
+Package typearea Info: With paper sizes other than (almost) `A4' predefined
+(typearea) DIV values do not exist. Using DIV calculation for good
+(typearea) line width (unless using `version=3.24` or prior).
+
+DIV calculation for typearea with good linewidth.
+Package typearea Info: These are the values describing the layout:
+(typearea) DIV = 6
+(typearea) BCOR = 0.0pt
+(typearea) \paperwidth = 614.295pt
+(typearea) \textwidth = 307.14752pt
+(typearea) DIV departure = 7%
+(typearea) \evensidemargin = 132.495pt
+(typearea) \oddsidemargin = 30.1125pt
+(typearea) \paperheight = 794.96999pt
+(typearea) \textheight = 406.0pt
+(typearea) \topmargin = 27.225pt
+(typearea) \headheight = 15.0pt
+(typearea) \headsep = 18.0pt
+(typearea) \topskip = 10.0pt
+(typearea) \footskip = 42.0pt
+(typearea) \baselineskip = 12.0pt
+(typearea) on input line 1799.
+)
+\c@part=\count189
+\c@chapter=\count190
+\c@section=\count191
+\c@subsection=\count192
+\c@subsubsection=\count193
+\c@paragraph=\count194
+\c@subparagraph=\count195
+\scr@dte@chapter@maxnumwidth=\skip54
+Class scrbook Info: using compatibility default `afterindent=bysign'
+(scrbook) for `\chapter on input line 5990.
+\scr@dte@section@maxnumwidth=\skip55
+Class scrbook Info: using compatibility default `runin=bysign'
+(scrbook) for `\section on input line 6001.
+Class scrbook Info: using compatibility default `afterindent=bysign'
+(scrbook) for `\section on input line 6001.
+\scr@dte@part@maxnumwidth=\skip56
+Class scrbook Info: using compatibility default `afterindent=true'
+(scrbook) for `\part on input line 6010.
+\scr@dte@subsection@maxnumwidth=\skip57
+Class scrbook Info: using compatibility default `runin=bysign'
+(scrbook) for `\subsection on input line 6020.
+Class scrbook Info: using compatibility default `afterindent=bysign'
+(scrbook) for `\subsection on input line 6020.
+\scr@dte@subsubsection@maxnumwidth=\skip58
+Class scrbook Info: using compatibility default `runin=bysign'
+(scrbook) for `\subsubsection on input line 6030.
+Class scrbook Info: using compatibility default `afterindent=bysign'
+(scrbook) for `\subsubsection on input line 6030.
+\scr@dte@paragraph@maxnumwidth=\skip59
+Class scrbook Info: using compatibility default `runin=bysign'
+(scrbook) for `\paragraph on input line 6041.
+Class scrbook Info: using compatibility default `afterindent=bysign'
+(scrbook) for `\paragraph on input line 6041.
+\scr@dte@subparagraph@maxnumwidth=\skip60
+Class scrbook Info: using compatibility default `runin=bysign'
+(scrbook) for `\subparagraph on input line 6051.
+Class scrbook Info: using compatibility default `afterindent=bysign'
+(scrbook) for `\subparagraph on input line 6051.
+\abovecaptionskip=\skip61
+\belowcaptionskip=\skip62
+\c@pti@nb@sid@b@x=\box52
+Package tocbasic Info: omitting babel extension for `lof'
+(tocbasic) because of feature `nobabel' available
+(tocbasic) for `lof' on input line 7242.
+\scr@dte@figure@maxnumwidth=\skip63
+\c@figure=\count196
+Package tocbasic Info: omitting babel extension for `lot'
+(tocbasic) because of feature `nobabel' available
+(tocbasic) for `lot' on input line 7259.
+\scr@dte@table@maxnumwidth=\skip64
+\c@table=\count197
+Class scrbook Info: Redefining `\numberline' on input line 7430.
+\bibindent=\dimen140
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/amsmath/amsmath.
+sty
+Package: amsmath 2023/05/13 v2.17o AMS math features
+\@mathmargin=\skip65
+
+For additional information on amsmath, use the `?' option.
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/amsmath/amstext.
+sty
+Package: amstext 2021/08/26 v2.01 AMS text
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/amsmath/amsgen.s
+ty
+File: amsgen.sty 1999/11/30 v2.0 generic functions
+\@emptytoks=\toks18
+\ex@=\dimen141
+))
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/amsmath/amsbsy.s
+ty
+Package: amsbsy 1999/11/29 v1.2d Bold Symbols
+\pmbraise@=\dimen142
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/amsmath/amsopn.s
+ty
+Package: amsopn 2022/04/08 v2.04 operator names
+)
+\inf@bad=\count198
+LaTeX Info: Redefining \frac on input line 234.
+\uproot@=\count199
+\leftroot@=\count266
+LaTeX Info: Redefining \overline on input line 399.
+LaTeX Info: Redefining \colon on input line 410.
+\classnum@=\count267
+\DOTSCASE@=\count268
+LaTeX Info: Redefining \ldots on input line 496.
+LaTeX Info: Redefining \dots on input line 499.
+LaTeX Info: Redefining \cdots on input line 620.
+\Mathstrutbox@=\box53
+\strutbox@=\box54
+LaTeX Info: Redefining \big on input line 722.
+LaTeX Info: Redefining \Big on input line 723.
+LaTeX Info: Redefining \bigg on input line 724.
+LaTeX Info: Redefining \Bigg on input line 725.
+\big@size=\dimen143
+LaTeX Font Info: Redeclaring font encoding OML on input line 743.
+LaTeX Font Info: Redeclaring font encoding OMS on input line 744.
+\macc@depth=\count269
+LaTeX Info: Redefining \bmod on input line 905.
+LaTeX Info: Redefining \pmod on input line 910.
+LaTeX Info: Redefining \smash on input line 940.
+LaTeX Info: Redefining \relbar on input line 970.
+LaTeX Info: Redefining \Relbar on input line 971.
+\c@MaxMatrixCols=\count270
+\dotsspace@=\muskip16
+\c@parentequation=\count271
+\dspbrk@lvl=\count272
+\tag@help=\toks19
+\row@=\count273
+\column@=\count274
+\maxfields@=\count275
+\andhelp@=\toks20
+\eqnshift@=\dimen144
+\alignsep@=\dimen145
+\tagshift@=\dimen146
+\tagwidth@=\dimen147
+\totwidth@=\dimen148
+\lineht@=\dimen149
+\@envbody=\toks21
+\multlinegap=\skip66
+\multlinetaggap=\skip67
+\mathdisplay@stack=\toks22
+LaTeX Info: Redefining \[ on input line 2953.
+LaTeX Info: Redefining \] on input line 2954.
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/amsfonts/amssymb
+.sty
+Package: amssymb 2013/01/14 v3.01 AMS font symbols
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/amsfonts/amsfont
+s.sty
+Package: amsfonts 2013/01/14 v3.01 Basic AMSFonts support
+\symAMSa=\mathgroup4
+\symAMSb=\mathgroup5
+LaTeX Font Info: Redeclaring math symbol \hbar on input line 98.
+LaTeX Font Info: Overwriting math alphabet `\mathfrak' in version `bold'
+(Font) U/euf/m/n --> U/euf/b/n on input line 106.
+))
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/generic/iftex/iftex.st
+y
+Package: iftex 2022/02/03 v1.0f TeX engine tests
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/base/fontenc.sty
+Package: fontenc 2021/04/29 v2.0v Standard LaTeX package
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/base/inputenc.st
+y
+Package: inputenc 2021/02/14 v1.3d Input encoding file
+\inpenc@prehook=\toks23
+\inpenc@posthook=\toks24
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/base/textcomp.st
+y
+Package: textcomp 2020/02/02 v2.0n Standard LaTeX package
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/lm/lmodern.sty
+Package: lmodern 2015/05/01 v1.6.1 Latin Modern Fonts
+LaTeX Font Info: Overwriting symbol font `operators' in version `normal'
+(Font) OT1/cmr/m/n --> OT1/lmr/m/n on input line 22.
+LaTeX Font Info: Overwriting symbol font `letters' in version `normal'
+(Font) OML/cmm/m/it --> OML/lmm/m/it on input line 23.
+LaTeX Font Info: Overwriting symbol font `symbols' in version `normal'
+(Font) OMS/cmsy/m/n --> OMS/lmsy/m/n on input line 24.
+LaTeX Font Info: Overwriting symbol font `largesymbols' in version `normal'
+(Font) OMX/cmex/m/n --> OMX/lmex/m/n on input line 25.
+LaTeX Font Info: Overwriting symbol font `operators' in version `bold'
+(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 26.
+LaTeX Font Info: Overwriting symbol font `letters' in version `bold'
+(Font) OML/cmm/b/it --> OML/lmm/b/it on input line 27.
+LaTeX Font Info: Overwriting symbol font `symbols' in version `bold'
+(Font) OMS/cmsy/b/n --> OMS/lmsy/b/n on input line 28.
+LaTeX Font Info: Overwriting symbol font `largesymbols' in version `bold'
+(Font) OMX/cmex/m/n --> OMX/lmex/m/n on input line 29.
+LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `normal'
+(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 31.
+LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `normal'
+(Font) OT1/cmss/m/n --> OT1/lmss/m/n on input line 32.
+LaTeX Font Info: Overwriting math alphabet `\mathit' in version `normal'
+(Font) OT1/cmr/m/it --> OT1/lmr/m/it on input line 33.
+LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `normal'
+(Font) OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 34.
+LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `bold'
+(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 35.
+LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `bold'
+(Font) OT1/cmss/bx/n --> OT1/lmss/bx/n on input line 36.
+LaTeX Font Info: Overwriting math alphabet `\mathit' in version `bold'
+(Font) OT1/cmr/bx/it --> OT1/lmr/bx/it on input line 37.
+LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `bold'
+(Font) OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 38.
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/upquote/upquote.
+sty
+Package: upquote 2012/04/19 v1.3 upright-quote and grave-accent glyphs in verba
+tim
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/microtype/microt
+ype.sty
+Package: microtype 2023/03/13 v3.1a Micro-typographical refinements (RS)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/etoolbox/etoolbo
+x.sty
+Package: etoolbox 2020/10/05 v2.5k e-TeX tools for LaTeX (JAW)
+\etb@tempcnta=\count276
+)
+\MT@toks=\toks25
+\MT@tempbox=\box55
+\MT@count=\count277
+LaTeX Info: Redefining \noprotrusionifhmode on input line 1059.
+LaTeX Info: Redefining \leftprotrusion on input line 1060.
+\MT@prot@toks=\toks26
+LaTeX Info: Redefining \rightprotrusion on input line 1078.
+LaTeX Info: Redefining \textls on input line 1368.
+\MT@outer@kern=\dimen150
+LaTeX Info: Redefining \textmicrotypecontext on input line 1988.
+\MT@listname@count=\count278
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/microtype/microt
+ype-pdftex.def
+File: microtype-pdftex.def 2023/03/13 v3.1a Definitions specific to pdftex (RS)
+
+LaTeX Info: Redefining \lsstyle on input line 902.
+LaTeX Info: Redefining \lslig on input line 902.
+\MT@outer@space=\skip68
+)
+Package microtype Info: Loading configuration file microtype.cfg.
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/microtype/microt
+ype.cfg
+File: microtype.cfg 2023/03/13 v3.1a microtype main configuration file (RS)
+))
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/xcolor/xcolor.st
+y
+Package: xcolor 2023/11/15 v3.01 LaTeX color extensions (UK)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/graphics-cfg/col
+or.cfg
+File: color.cfg 2016/01/02 v1.6 sample color configuration
+)
+Package xcolor Info: Driver file: pdftex.def on input line 274.
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/graphics-def/pdf
+tex.def
+File: pdftex.def 2022/09/22 v1.2b Graphics/color driver for pdftex
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/graphics/mathcol
+or.ltx)
+Package xcolor Info: Model `cmy' substituted by `cmy0' on input line 1350.
+Package xcolor Info: Model `hsb' substituted by `rgb' on input line 1354.
+Package xcolor Info: Model `RGB' extended on input line 1366.
+Package xcolor Info: Model `HTML' substituted by `rgb' on input line 1368.
+Package xcolor Info: Model `Hsb' substituted by `hsb' on input line 1369.
+Package xcolor Info: Model `tHsb' substituted by `hsb' on input line 1370.
+Package xcolor Info: Model `HSB' substituted by `hsb' on input line 1371.
+Package xcolor Info: Model `Gray' substituted by `gray' on input line 1372.
+Package xcolor Info: Model `wave' substituted by `hsb' on input line 1373.
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/geometry/geometr
+y.sty
+Package: geometry 2020/01/02 v5.9 Page Geometry
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/generic/iftex/ifvtex.s
+ty
+Package: ifvtex 2019/10/25 v1.7 ifvtex legacy package. Use iftex instead.
+)
+\Gm@cnth=\count279
+\Gm@cntv=\count280
+\c@Gm@tempcnt=\count281
+\Gm@bindingoffset=\dimen151
+\Gm@wd@mp=\dimen152
+\Gm@odd@mp=\dimen153
+\Gm@even@mp=\dimen154
+\Gm@layoutwidth=\dimen155
+\Gm@layoutheight=\dimen156
+\Gm@layouthoffset=\dimen157
+\Gm@layoutvoffset=\dimen158
+\Gm@dimlist=\toks27
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/tools/longtable.
+sty
+Package: longtable 2023-11-01 v4.19 Multi-page Table package (DPC)
+\LTleft=\skip69
+\LTright=\skip70
+\LTpre=\skip71
+\LTpost=\skip72
+\LTchunksize=\count282
+\LTcapwidth=\dimen159
+\LT@head=\box56
+\LT@firsthead=\box57
+\LT@foot=\box58
+\LT@lastfoot=\box59
+\LT@gbox=\box60
+\LT@cols=\count283
+\LT@rows=\count284
+\c@LT@tables=\count285
+\c@LT@chunks=\count286
+\LT@p@ftn=\toks28
+)
+Class scrbook Info: longtable captions redefined on input line 62.
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/booktabs/booktab
+s.sty
+Package: booktabs 2020/01/12 v1.61803398 Publication quality tables
+\heavyrulewidth=\dimen160
+\lightrulewidth=\dimen161
+\cmidrulewidth=\dimen162
+\belowrulesep=\dimen163
+\belowbottomsep=\dimen164
+\aboverulesep=\dimen165
+\abovetopsep=\dimen166
+\cmidrulesep=\dimen167
+\cmidrulekern=\dimen168
+\defaultaddspace=\dimen169
+\@cmidla=\count287
+\@cmidlb=\count288
+\@aboverulesep=\dimen170
+\@belowrulesep=\dimen171
+\@thisruleclass=\count289
+\@lastruleclass=\count290
+\@thisrulewidth=\dimen172
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/tools/array.sty
+Package: array 2023/10/16 v2.5g Tabular extension package (FMi)
+\col@sep=\dimen173
+\ar@mcellbox=\box61
+\extrarowheight=\dimen174
+\NC@list=\toks29
+\extratabsurround=\skip73
+\backup@length=\skip74
+\ar@cellbox=\box62
+) (/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/tools/calc.sty
+Package: calc 2023/07/08 v4.3 Infix arithmetic (KKT,FJ)
+\calc@Acount=\count291
+\calc@Bcount=\count292
+\calc@Adimen=\dimen175
+\calc@Bdimen=\dimen176
+\calc@Askip=\skip75
+\calc@Bskip=\skip76
+LaTeX Info: Redefining \setlength on input line 80.
+LaTeX Info: Redefining \addtolength on input line 81.
+\calc@Ccount=\count293
+\calc@Cskip=\skip77
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/footnotehyper/fo
+otnotehyper.sty
+Package: footnotehyper 2021/08/13 v1.1e hyperref aware footnote.sty (JFB)
+\FNH@notes=\box63
+\FNH@width=\dimen177
+\FNH@toks=\toks30
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/graphics/graphic
+x.sty
+Package: graphicx 2021/09/16 v1.2d Enhanced LaTeX Graphics (DPC,SPQR)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/graphics/graphic
+s.sty
+Package: graphics 2022/03/10 v1.4e Standard LaTeX Graphics (DPC,SPQR)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/graphics/trig.st
+y
+Package: trig 2021/08/11 v1.11 sin cos tan (DPC)
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/graphics-cfg/gra
+phics.cfg
+File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration
+)
+Package graphics Info: Driver file: pdftex.def on input line 107.
+)
+\Gin@req@height=\dimen178
+\Gin@req@width=\dimen179
+)
+\pandoc@box=\box64
+\cslhangindent=\skip78
+\csllabelwidth=\skip79
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/generic/babel/babel.st
+y
+Package: babel 2024/02/07 v24.2 The Babel package
+\babel@savecnt=\count294
+\U@D=\dimen180
+\l@unhyphenated=\language89
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/generic/babel/txtbabel
+.def)
+\bbl@readstream=\read2
+\bbl@dirlevel=\count295
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/generic/babel-english/
+english.ldf
+Language: english 2017/06/06 v3.3r English support from the babel system
+Package babel Info: Hyphen rules for 'canadian' set to \l@english
+(babel) (\language0). Reported on input line 102.
+Package babel Info: Hyphen rules for 'australian' set to \l@ukenglish
+(babel) (\language23). Reported on input line 105.
+Package babel Info: Hyphen rules for 'newzealand' set to \l@ukenglish
+(babel) (\language23). Reported on input line 108.
+))
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/generic/babel/locale/e
+n/babel-english.tex
+Package babel Info: Importing font and identification data for english
+(babel) from babel-en.ini. Reported on input line 11.
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/caption/caption.
+sty
+Package: caption 2023/08/05 v3.6o Customizing captions (AR)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/caption/caption3
+.sty
+Package: caption3 2023/07/31 v2.4d caption3 kernel (AR)
+\caption@tempdima=\dimen181
+\captionmargin=\dimen182
+\caption@leftmargin=\dimen183
+\caption@rightmargin=\dimen184
+\caption@width=\dimen185
+\caption@indent=\dimen186
+\caption@parindent=\dimen187
+\caption@hangindent=\dimen188
+Package caption Info: KOMA-Script document class detected.
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/caption/caption-
+koma.sto
+File: caption-koma.sto 2023/09/08 v2.0e Adaption of the caption package to the
+KOMA-Script document classes (AR)
+))
+\c@caption@flags=\count296
+\c@continuedfloat=\count297
+Package caption Info: longtable package is loaded.
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/caption/ltcaptio
+n.sty
+Package: ltcaption 2021/01/08 v1.4c longtable captions (AR)
+)
+Package caption Info: KOMA-Script scrextend package detected.
+\caption@addmargin@hsize=\dimen189
+\caption@addmargin@linewidth=\dimen190
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/tools/afterpage.
+sty
+Package: afterpage 2023/07/04 v1.08 After-Page Package (DPC)
+\AP@output=\toks31
+\AP@partial=\box65
+\AP@footins=\box66
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/base/atbegshi-lt
+x.sty
+Package: atbegshi-ltx 2021/01/10 v1.0c Emulation of the original atbegshi
+package with kernel methods
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/etex-pkg/etex.st
+y
+Package: etex 2016/08/01 v2.7 eTeX basic definition package (PEB,DPC)
+
+
+Package etex Warning: Extended allocation already in use.
+(etex) etex.sty code will not be used.
+(etex) To force etex package to load, add
+(etex) \RequirePackage{etex}
+(etex) at the start of the document.
+
+)
+
+Class scrbook Warning: Usage of package `fancyhdr' together
+(scrbook) with a KOMA-Script class is not recommended.
+(scrbook) I'd suggest to use
+(scrbook) package `scrlayer' or `scrlayer-scrpage', because
+(scrbook) they support KOMA-Script classes.
+(scrbook) With `fancyhdr' several features of class `scrbook'
+(scrbook) like options `headsepline', `footsepline' or command
+(scrbook) `\MakeMarkcase' and the commands `\setkomafont' and
+(scrbook) `\addtokomafont' for the page style elements need
+(scrbook) explicite user intervention to work.
+(scrbook) Nevertheless, using requested
+(scrbook) package `fancyhdr' on input line 129.
+
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/fancyhdr/fancyhd
+r.sty
+Package: fancyhdr 2022/11/09 v4.1 Extensive control of page headers and footers
+
+\f@nch@headwidth=\skip80
+\f@nch@O@elh=\skip81
+\f@nch@O@erh=\skip82
+\f@nch@O@olh=\skip83
+\f@nch@O@orh=\skip84
+\f@nch@O@elf=\skip85
+\f@nch@O@erf=\skip86
+\f@nch@O@olf=\skip87
+\f@nch@O@orf=\skip88
+)
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/fontspec/fontspe
+c.sty
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/l3packages/xpars
+e/xparse.sty
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/l3kernel/expl3.s
+ty
+Package: expl3 2024-02-20 L3 programming layer (loader)
+
+(/usr/local/Cellar/texlive/20240312/share/texmf-dist/tex/latex/l3backend/l3back
+end-pdftex.def
+File: l3backend-pdftex.def 2024-02-20 L3 backend support: PDF output (pdfTeX)
+\l__color_backend_stack_int=\count298
+\l__pdf_internal_box=\box67
+))
+Package: xparse 2024-02-18 L3 Experimental document command parser
+)
+Package: fontspec 2024/02/13 v2.9a Font selection for XeLaTeX and LuaLaTeX
+
+
+! Fatal Package fontspec Error: The fontspec package requires either XeTeX or
+(fontspec) LuaTeX.
+(fontspec)
+(fontspec) You must change your typesetting engine to,
+(fontspec) e.g., "xelatex" or "lualatex" instead of
+(fontspec) "latex" or "pdflatex".
+
+Type
- ⭐ Help Us Reach 1,000 GitHub Stars! ⭐
- For every 25 stars, Arduino and SEEED will donate a Nicla Vision or XIAO ESP32S3 for AI education.
-
Your ⭐ makes a difference. Click below to support our mission!
+ 🌟 We Hit 1,000 GitHub Stars - Thank You! 🌟
+ Thanks to your support, Arduino and SEEED are donating Nicla Vision and XIAO ESP32S3 boards for AI education.
+
But we’re not stopping here! Every 25 stars from here on helps us bring even more resources to the community.
+ ⭐ Keep the Momentum Going - Star Our Repo! ⭐ +
+ +
@@ -68,11 +73,17 @@ If you're unsure where to start or have any questions, feel free to reach out th
Want to build the book locally? Here's how:
1. **Install Quarto**: Follow the [Quarto installation instructions](https://quarto.org/docs/download/).
-2. **Render the Book**:
+2. **Render the Book in all formats**:
```bash
cd cs249r_book
quarto render
```
+3. **Render the Book in a specific format (works faster)**:
+ ```bash
+ cd cs249r_book
+ quarto render --to epub
+ ```
+
---
## Contributors
@@ -86,24 +97,24 @@ This project follows the [all-contributors](https://allcontributors.org) specifi
\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x20\x20\x20\x20Related\x0a\x20\x20\x20\x20\x20\x20 \x20What\x20is\x20SocratiQ?\x20 Information\x20provided\x20here\x20may\x20not\x20always\x20be\x20accurate.\x20Provide\x20feedback\x20 \x20An\x20error\x20occurred.\x20Please\x20try\x20again.\x20 \s*\*\*\s*([^*](?:(?!\*\*).)*)<\/p>/g," $1 ((?:(?!\*\*).)*[^*])\s*\*\*\s*<\/p>/g," $1 \n ${e} e.controller.options.grouped)),i=n.options.stacked,a=[],o=this._cachedMeta.controller.getParsed(t),s=o&&o[n.axis],l=e=>{const t=e._parsed.find((e=>e[n.axis]===s)),r=t&&t[e.vScale.axis];if(gi(r)||isNaN(r))return!0};for(const n of r)if((void 0===t||!l(n))&&((!1===i||-1===a.indexOf(n.stack)||void 0===i&&void 0===n.stack)&&a.push(n.stack),n.index===e))break;return a.length||a.push(void 0),a}_getStackCount(e){return this._getStacks(void 0,e).length}_getStackIndex(e,t,n){const r=this._getStacks(e,n),i=void 0!==t?r.indexOf(t):-1;return-1===i?r.length-1:i}_getRuler(){const e=this.options,t=this._cachedMeta,n=t.iScale,r=[];let i,a;for(i=0,a=t.data.length;i=n?1:-1)}(u,t,o)*a,p===o&&(h-=u/2);const e=t.getPixelForDecimal(0),i=t.getPixelForDecimal(1),l=Math.min(e,i),m=Math.max(e,i);h=Math.max(Math.min(h,m),l),d=h+u,n&&!c&&(s._stacks[t.axis]._visualValues[r]=t.getValueForPixel(d)-t.getValueForPixel(h))}if(h===t.getPixelForValue(o)){const e=Yi(u)*t.getLineWidthForValue(o)/2;h+=e,u-=e}return{size:u,base:h,head:d,center:d+u/2}}_calculateBarIndexPixels(e,t){const n=t.scale,r=this.options,i=r.skipNull,a=Ei(r.maxBarThickness,1/0);let o,s;if(t.grouped){const n=i?this._getStackCount(e):t.stackCount,l="flex"===r.barThickness?function(e,t,n,r){const i=t.pixels,a=i[e];let o=e>0?i[e-1]:null,s=e
+
Vijay Janapa Reddi
jasonjabbour
Ikechukwu Uchendu
-
Naeem Khoshnevis
-
jasonjabbour
+
Douwe den Blanken
Marcelo Rovai
+
+
Sara Khosravi
Douwe den Blanken
-
shanzehbatool
+
Marcelo Rovai
Kai Kleinbard
-
Elias Nuwara
-
kai4avaya
Jared Ping
+
Jared Ping
Matthew Stewart
Itai Shapira
Maximilian Lam
-
Jayson Lin
Sara Khosravi
@@ -113,79 +124,83 @@ This project follows the [all-contributors](https://allcontributors.org) specifi
Sophia Cho
Korneel Van den Berghe
-
Zishen Wan
-
Colby Banbury
+
Divya Amirtharaj
Zishen Wan
Abdulrahman Mahmoud
+
Srivatsan Krishnan
Divya Amirtharaj
-
+
Haoran Qiu
Emeka Ezike
+
Aghyad Deeb
Haoran Qiu
-
marin-llobet
-
Michael Schnebly
+
oishib
Emil Njor
-
Jared Ni
+
Aditi Raju
+
Jared Ni
+
Michael Schnebly
oishib
-
ELSuitorHarvard
-
Emil Njor
Henry Bae
+
+
Henry Bae
Jae-Won Chung
Yu-Shun Hsiao
-
Mark Mazumder
-
Jae-Won Chung
-
Shvetank Prakash
+
Pong Trairatvorakul
Marco Zennaro
Eura Nofshin
+
Andrew Bass
Pong Trairatvorakul
-
Jennifer Zhou
-
Marco Zennaro
+
Emeka Ezike
Shvetank Prakash
+
+
Alex Oesterling
Arya Tschand
-
Bruno Scaglione
Allen-Kuang
-
Gauri Jain
-
Fin Amin
+
Fatima Shah
Allen-Kuang
+
+
Fin Amin
+
Fatima Shah
The Random DIY
-
gnodipac886
Alex Oesterling
-
Sercan Aygün
-
Emmanuel Rassou
Jason Yik
-
+
-
abigailswallow
+
Yang Zhou
+
Baldassarre Cesarano
Abenezer
+
Bilge Acun
+
yanjingl
+
Yang Zhou
+
+
abigailswallow
Jason Yik
-
happyappledog
+
Jessica Quaye
+
Curren Iyer
Emmanuel Rassou
-
-
The Random DIY
Shreya Johri
+
Sonia Murthy
+
Shreya Johri
+
Jessica Quaye
Vijay Edupuganti
-
Costin-Andrei Oncescu
Baldassarre Cesarano
-
-
Annie Laurie Cook
Vijay Edupuganti
Jothi Ramaswamy
-
Batur Arslan
-
Curren Iyer
+
-
Fatima Shah
yanjingl
+
a-saraf
diff --git a/_quarto.yml b/_quarto.yml
index 988d57ea..47ca125c 100644
--- a/_quarto.yml
+++ b/_quarto.yml
@@ -9,7 +9,7 @@ website:
announcement:
icon: star-half
dismissable: false
- content: 🌟 Help Us Reach 1,000 GitHub Stars! 🌟 For every 25 stars, Arduino and SEEED will donate a NiclaVision or XIAO ESP32S3 for AI education. Click here to ⭐
+ content: ⭐ We Hit 1,000 GitHub Stars 🎉 Thanks to you, Arduino and SEEED donated AI hardware kits for education!
songhan
Zishen
🎓 The [EDGE AI Foundation](https://www.edgeaifoundation.org/) is now matching scholarship funds for every new GitHub ⭐ (up to 10,000 stars). Click here to support! 🙏 🚀 Our mission. 1 ⭐ == 1 👩🎓 Learner. Let's make every star a symbol of engagement and support to make cutting-edge education globally accessible.
type: info
position: below-navbar
@@ -75,6 +75,22 @@ book:
inference optimization, and benchmarking methodologies. The book also
explores crucial systems considerations in areas like reliability,
privacy, responsible AI, and solution validation. Enjoy reading it!
+
+ ---
+
+ 🎙 Listen to the **AI Podcast**,
+ created using Google's Notebook LM and inspired by insights drawn from our
+ [IEEE education viewpoint paper](https://web.eng.fiu.edu/gaquan/Papers/ESWEEK24Papers/CPS-Proceedings/pdfs/CODES-ISSS/563900a043/563900a043.pdf).
+ This podcast provides an accessible overview of what this book is all about.
+
+
+
+
+ _Acknowledgment:_ Special thanks to [Marco Zennaro](https://www.ictp.it/member/marco-zennaro), one of our early community contributors who helped us with the [AI for Good](./contents/core/ai_for_good/ai_for_good.qmd) chapter, for inspiring the creation of this podcast. Thank you, Marco!
+
+ ----
repo-url: https://github.com/harvard-edge/cs249r_book
repo-branch: dev
@@ -90,54 +106,37 @@ book:
chapters:
- text: "---"
- - part: FRONT MATTER
- chapters:
- - index.qmd
- - contents/dedication.qmd
- - contents/acknowledgements/acknowledgements.qmd
- - contents/contributors.qmd
- - contents/copyright.qmd
- - contents/about.qmd
+ - index.qmd
+ - contents/copyright.qmd
+ - contents/dedication.qmd
+ - contents/core/acknowledgements/acknowledgements.qmd
+ - contents/contributors.qmd
+ - contents/about.qmd
- text: "---"
- - part: MAIN
- - part: Fundamentals
- chapters:
- - contents/introduction/introduction.qmd
- - contents/ml_systems/ml_systems.qmd
- - contents/dl_primer/dl_primer.qmd
- - part: Workflow
- chapters:
- - contents/workflow/workflow.qmd
- - contents/data_engineering/data_engineering.qmd
- - contents/frameworks/frameworks.qmd
- - part: Training
- chapters:
- - contents/training/training.qmd
- - contents/efficient_ai/efficient_ai.qmd
- - contents/optimizations/optimizations.qmd
- - contents/hw_acceleration/hw_acceleration.qmd
- - part: Deployment
- chapters:
- - contents/benchmarking/benchmarking.qmd
- - contents/ondevice_learning/ondevice_learning.qmd
- - contents/ops/ops.qmd
- - part: Advanced Topics
- chapters:
- - contents/privacy_security/privacy_security.qmd
- - contents/responsible_ai/responsible_ai.qmd
- - contents/sustainable_ai/sustainable_ai.qmd
- - contents/robust_ai/robust_ai.qmd
- - contents/generative_ai/generative_ai.qmd
- - part: Social Impact
- chapters:
- - contents/ai_for_good/ai_for_good.qmd
- - part: Closing
- chapters:
- - contents/conclusion/conclusion.qmd
+ - contents/core/introduction/introduction.qmd
+ - contents/core/ml_systems/ml_systems.qmd
+ - contents/core/dl_primer/dl_primer.qmd
+ - contents/core/workflow/workflow.qmd
+ - contents/core/data_engineering/data_engineering.qmd
+ - contents/core/frameworks/frameworks.qmd
+ - contents/core/training/training.qmd
+ - contents/core/efficient_ai/efficient_ai.qmd
+ - contents/core/optimizations/optimizations.qmd
+ - contents/core/hw_acceleration/hw_acceleration.qmd
+ - contents/core/benchmarking/benchmarking.qmd
+ - contents/core/ondevice_learning/ondevice_learning.qmd
+ - contents/core/ops/ops.qmd
+ - contents/core/privacy_security/privacy_security.qmd
+ - contents/core/responsible_ai/responsible_ai.qmd
+ - contents/core/sustainable_ai/sustainable_ai.qmd
+ - contents/core/robust_ai/robust_ai.qmd
+ - contents/core/generative_ai/generative_ai.qmd
+ - contents/core/ai_for_good/ai_for_good.qmd
+ - contents/core/conclusion/conclusion.qmd
- text: "---"
- - part: LABS
+ - part: contents/labs/labs.qmd
chapters:
- - contents/labs/labs.qmd
+ - contents/labs/overview.qmd
- contents/labs/getting_started.qmd
- part: contents/labs/arduino/nicla_vision/nicla_vision.qmd
chapters:
@@ -167,37 +166,29 @@ book:
- part: REFERENCES
chapters:
- references.qmd
- - text: "---"
- appendices:
- - contents/tools.qmd
- - contents/zoo_datasets.qmd
- - contents/zoo_models.qmd
- - contents/learning_resources.qmd
- - contents/community.qmd
- - contents/case_studies.qmd
bibliography:
# main
- - contents/introduction/introduction.bib
- - contents/ai_for_good/ai_for_good.bib
- - contents/benchmarking/benchmarking.bib
- - contents/data_engineering/data_engineering.bib
- - contents/dl_primer/dl_primer.bib
- - contents/efficient_ai/efficient_ai.bib
- - contents/ml_systems/ml_systems.bib
- - contents/frameworks/frameworks.bib
- - contents/generative_ai/generative_ai.bib
- - contents/hw_acceleration/hw_acceleration.bib
- - contents/ondevice_learning/ondevice_learning.bib
- - contents/ops/ops.bib
- - contents/optimizations/optimizations.bib
- - contents/privacy_security/privacy_security.bib
- - contents/responsible_ai/responsible_ai.bib
- - contents/robust_ai/robust_ai.bib
- - contents/sustainable_ai/sustainable_ai.bib
- - contents/training/training.bib
- - contents/workflow/workflow.bib
- - contents/conclusion/conclusion.bib
+ - contents/core/introduction/introduction.bib
+ - contents/core/ai_for_good/ai_for_good.bib
+ - contents/core/benchmarking/benchmarking.bib
+ - contents/core/data_engineering/data_engineering.bib
+ - contents/core/dl_primer/dl_primer.bib
+ - contents/core/efficient_ai/efficient_ai.bib
+ - contents/core/ml_systems/ml_systems.bib
+ - contents/core/frameworks/frameworks.bib
+ - contents/core/generative_ai/generative_ai.bib
+ - contents/core/hw_acceleration/hw_acceleration.bib
+ - contents/core/ondevice_learning/ondevice_learning.bib
+ - contents/core/ops/ops.bib
+ - contents/core/optimizations/optimizations.bib
+ - contents/core/privacy_security/privacy_security.bib
+ - contents/core/responsible_ai/responsible_ai.bib
+ - contents/core/robust_ai/robust_ai.bib
+ - contents/core/sustainable_ai/sustainable_ai.bib
+ - contents/core/training/training.bib
+ - contents/core/workflow/workflow.bib
+ - contents/core/conclusion/conclusion.bib
comments:
giscus:
@@ -273,8 +264,7 @@ format:
include-in-header:
text: |
-#
-#
+
#
#
#
diff --git a/contents/about.qmd b/contents/about.qmd
index 047e1bf6..8d06c17a 100644
--- a/contents/about.qmd
+++ b/contents/about.qmd
@@ -6,30 +6,28 @@ comments: false
## Overview
-Welcome to this collaborative project initiated by the CS249r Machine Learning Systems class at Harvard University. Our goal is to make this book a community resource that assists educators and learners in understanding ML systems. The book will be regularly updated to reflect new insights into ML systems and effective teaching methods.
+Welcome to this collaborative textbook, developed as part of the CS249r Machine Learning Systems class at Harvard University. Our goal is to provide a comprehensive resource for educators and students seeking to understand machine learning systems. This book is continually updated to incorporate the latest insights and effective teaching strategies.
-## Topics Explored
-
-This book offers a comprehensive look at various aspects of machine learning systems. We cover the entire end-to-end ML systems workflow, starting with fundamental concepts and progressing through data engineering, AI frameworks, and model training.
-
-You'll learn about optimizing models for efficiency, deploying AI on various hardware platforms, and benchmarking performance. The book also explores more advanced topics like security, privacy, responsible and sustainable AI, robust and generative AI, and the social impact of AI. By the end, you'll have a solid foundation and practical insights into both the technical and ethical dimensions of machine learning.
+## What's Inside the Book
-By the time you finish this book, we hope that you'll have a foundational understanding of machine learning and its applications. You'll also learn about real-world implementations of machine learning systems and gain practical experience through project-based labs and assignments.
+We explore the technical foundations of machine learning systems, the challenges of building and deploying these systems across the computing continuum, and the vast array of applications they enable. A unique aspect of this book is its function as a conduit to seminal scholarly works and academic research papers, aimed at enriching the reader's understanding and encouraging deeper exploration of the subject. This approach seeks to bridge the gap between pedagogical materials and cutting-edge research trends, offering a comprehensive guide that is in step with the evolving field of applied machine learning.
-### **Who Should Read This**
+To improve the learning experience, we have included a variety of supplementary materials. Throughout the book, you will find slides that summarize key concepts, videos that provide in-depth explanations and demonstrations, exercises that reinforce your understanding, and labs that offer hands-on experience with the tools and techniques discussed. These additional resources are designed to cater to different learning styles and help you gain a deeper, more practical understanding of the subject matter.
-This book is tailored for individuals at various stages in their interaction with machine learning systems. It starts with the fundamentals and progresses to more advanced topics pertinent to the ML community and broader research areas. The most relevant audiences include:
+## Topics Explored
-* **Students in Computer Science and Electrical Engineering:** Senior and graduate students in these fields will find this book invaluable. It introduces the techniques used in designing and building ML systems, focusing on fundamentals rather than depth—typically the focus of classroom instruction. This book aims to provide the necessary background and context, enabling instructors to delve deeper into advanced topics. An important aspect is the end-to-end focus, often overlooked in traditional curricula.
+This textbook offers a comprehensive exploration of various aspects of machine learning systems, covering the entire end-to-end workflow. Starting with foundational concepts, it progresses through essential areas such as data engineering, AI frameworks, and model training.
-* **Systems Engineers:** For engineers, this book serves as a guide to understanding the challenges of intelligent applications, especially on resource-constrained ML platforms. It covers the conceptual framework and practical components that constitute an ML system, extending beyond specific areas you might specialize in at your job.
+To enhance the learning experience, we included a diverse array of supplementary materials. These resources consist of slides that summarize key concepts, videos providing detailed explanations and demonstrations, exercises designed to reinforce understanding, and labs that offer hands-on experience with the discussed tools and techniques.
-* **Researchers and Academics:** Researchers will find that this book addresses the unique challenges of running machine learning algorithms on diverse platforms. Efficiency is becoming increasingly important; understanding algorithms alone is not sufficient, as a deeper understanding of systems is necessary to build more efficient models. For researchers, the book cites seminal papers, guiding you towards foundational works that have shaped the field and drawing connections between various areas with significant implications for your work.
+Readers will gain insights into optimizing models for efficiency, deploying AI across different hardware platforms, and benchmarking performance. The book also delves into advanced topics, including security, privacy, responsible and sustainable AI, robust AI, and generative AI. Additionally, it examines the social impact of AI, concluding with an emphasis on the positive contributions AI can make to society.
## Key Learning Outcomes
Readers will acquire skills in training and deploying deep neural network models on various platforms, along with understanding the broader challenges involved in their design, development, and deployment. Specifically, after completing this book, learners will be able to:
+::: {.callout-tip}
+
1. Explain core concepts and their relevance to AI systems.
2. Describe the fundamental components and architecture of AI systems.
@@ -50,6 +48,8 @@ Readers will acquire skills in training and deploying deep neural network models
10. Critically assess the ethical implications and societal impacts of AI systems.
+:::
+
## Prerequisites for Readers
* **Basic Programming Skills:** We recommend that you have some prior programming experience, ideally in Python. A grasp of variables, data types, and control structures will make it easier to engage with the book.
@@ -65,3 +65,128 @@ Readers will acquire skills in training and deploying deep neural network models
* **Resource Availability:** For the hands-on aspects, you'll need a computer with Python and the relevant libraries installed. Optional access to development boards or specific hardware will also be beneficial for experimenting with machine learning model deployment.
By meeting these prerequisites, you'll be well-positioned to deepen your understanding of machine learning systems, engage in coding exercises, and even implement practical applications on various devices.
+
+## Who Should Read This
+
+This book is designed for individuals at different stages of their journey with machine learning systems, from beginners to those more advanced in the field. It introduces fundamental concepts and progresses to complex topics relevant to the machine learning community and expansive research areas. The key audiences for this book include:
+
+* **Students in Computer Science and Electrical Engineering:** Senior and graduate students will find this book particularly valuable. It introduces the techniques essential for designing and building ML systems, focusing on foundational knowledge rather than exhaustive detail---often the focus of classroom instruction. This book will provide the necessary background and context, enabling instructors to explore advanced topics more deeply. An essential feature is its end-to-end perspective, which is often overlooked in traditional curricula.
+
+* **Systems Engineers:** This book serves as a guide for engineers seeking to understand the complexities of intelligent systems and applications, particularly involving ML. It encompasses the conceptual frameworks and practical components that comprise an ML system, extending beyond the specific areas you might encounter in your professional role.
+
+* **Researchers and Academics:** For researchers, this book addresses the distinct challenges of executing machine learning algorithms across diverse platforms. As efficiency gains importance, a robust understanding of systems, beyond algorithms alone, is crucial for developing more efficient models. The book references seminal papers, directing researchers to works that have influenced the field and establishing connections between various areas with significant implications for their research.
+
+## How to Navigate This Book
+
+To get the most out of this book, we recommend a structured learning approach that leverages the various resources provided. Each chapter includes slides, videos, exercises, and labs to cater to different learning styles and reinforce your understanding.
+
+1. **Fundamentals (Chapters 1-3):** Start by building a strong foundation with the initial chapters, which provide an introduction to AI and cover core topics like AI systems and deep learning.
+
+2. **Workflow (Chapters 4-6):** With that foundation, move on to the chapters focused on practical aspects of the AI model building process like workflows, data engineering, and frameworks.
+
+3. **Training (Chapters 7-10):** These chapters offer insights into effectively training AI models, including techniques for efficiency, optimizations, and acceleration.
+
+4. **Deployment (Chapters 11-13):** Learn about deploying AI on devices and monitoring the operationalization through methods like benchmarking, on-device learning, and MLOps.
+
+5. **Advanced Topics (Chapters 14-18):** Critically examine topics like security, privacy, ethics, sustainability, robustness, and generative AI.
+
+6. **Social Impact (Chapter 19):** Explore the positive applications and potential of AI for societal good.
+
+7. **Conclusion (Chapter 20):** Reflect on the key takeaways and future directions in AI systems.
+
+While the book is designed for progressive learning, we encourage an interconnected learning approach that allows you to navigate chapters based on your interests and needs. Throughout the book, you'll find case studies and hands-on exercises that help you relate theory to real-world applications. We also recommend participating in forums and groups to engage in [discussions](https://github.com/harvard-edge/cs249r_book/discussions), debate concepts, and share insights with fellow learners. Regularly revisiting chapters can help reinforce your learning and offer new perspectives on the concepts covered. By adopting this structured yet flexible approach and actively engaging with the content and the community, you'll embark on a fulfilling and enriching learning experience that maximizes your understanding.
+
+## Chapter-by-Chapter Insights
+
+Here's a closer look at what each chapter covers. We have structured the book into six main sections: Fundamentals, Workflow, Training, Deployment, Advanced Topics, and Impact. These sections closely reflect the major components of a typical machine learning pipeline, from understanding the basic concepts to deploying and maintaining AI systems in real-world applications. By organizing the content in this manner, we aim to provide a logical progression that mirrors the actual process of developing and implementing AI systems.
+
+### Fundamentals
+
+In the Fundamentals section, we lay the groundwork for understanding AI. This is far from being a thorough deep dive into the algorithms, but we aim to introduce key concepts, provide an overview of machine learning systems, and dive into the principles and algorithms of deep learning that power AI applications in their associated systems. This section equips you with the essential knowledge needed to grasp the subsequent chapters.
+
+1. **[Introduction:](./core/introduction/introduction.qmd)** This chapter sets the stage, providing an overview of AI and laying the groundwork for the chapters that follow.
+2. **[ML Systems:](./core/ml_systems/ml_systems.qmd)** We introduce the basics of machine learning systems, the platforms where AI algorithms are widely applied.
+3. **[Deep Learning Primer:](./core/dl_primer/dl_primer.qmd)** This chapter offers a brief introduction to the algorithms and principles that underpin AI applications in ML systems.
+
+### Workflow
+
+The Workflow section guides you through the practical aspects of building AI models. We break down the AI workflow, discuss data engineering best practices, and review popular AI frameworks. By the end of this section, you'll have a clear understanding of the steps involved in developing proficient AI applications and the tools available to streamline the process.
+
+4. **[AI Workflow:](./core/workflow/workflow.qmd)** This chapter breaks down the machine learning workflow, offering insights into the steps leading to proficient AI applications.
+5. **[Data Engineering:](./core/data_engineering/data_engineering.qmd)** We focus on the importance of data in AI systems, discussing how to effectively manage and organize data.
+6. **[AI Frameworks:](./core/frameworks/frameworks.qmd)** This chapter reviews different frameworks for developing machine learning models, guiding you in choosing the most suitable one for your projects.
+
+### Training
+
+In the Training section, we explore techniques for training efficient and reliable AI models. We cover strategies for achieving efficiency, model optimizations, and the role of specialized hardware in AI acceleration. This section empowers you with the knowledge to develop high-performing models that can be seamlessly integrated into AI systems.
+
+7. **[AI Training:](./core/training/training.qmd)** This chapter explores model training, exploring techniques for developing efficient and reliable models.
+8. **[Efficient AI:](./core/efficient_ai/efficient_ai.qmd)** Here, we discuss strategies for achieving efficiency in AI applications, from computational resource optimization to performance enhancement.
+9. **[Model Optimizations:](./core/optimizations/optimizations.qmd)** We explore various avenues for optimizing AI models for seamless integration into AI systems.
+10. **[AI Acceleration:](./core/hw_acceleration/hw_acceleration.qmd)** We discuss the role of specialized hardware in enhancing the performance of AI systems.
+
+### Deployment
+
+The Deployment section focuses on the challenges and solutions for deploying AI models. We discuss benchmarking methods to evaluate AI system performance, techniques for on-device learning to improve efficiency and privacy, and the processes involved in ML operations. This section equips you with the skills to effectively deploy and maintain AI functionalities in AI systems.
+
+11. **[Benchmarking AI:](./core/benchmarking/benchmarking.qmd)** This chapter focuses on how to evaluate AI systems through systematic benchmarking methods.
+12. **[On-Device Learning:](./core/ondevice_learning/ondevice_learning.qmd)** We explore techniques for localized learning, which enhances both efficiency and privacy.
+13. **[ML Operations:](./core/ops/ops.qmd)** This chapter looks at the processes involved in the seamless integration, monitoring, and maintenance of AI functionalities.
+
+### Advanced Topics
+
+In the Advanced Topics section, We will study the critical issues surrounding AI. We address privacy and security concerns, explore the ethical principles of responsible AI, discuss strategies for sustainable AI development, examine techniques for building robust AI models, and introduce the exciting field of generative AI. This section broadens your understanding of the complex landscape of AI and prepares you to navigate its challenges.
+
+14. **[Security & Privacy:](./core/privacy_security/privacy_security.qmd)** As AI becomes more ubiquitous, this chapter addresses the crucial aspects of privacy and security in AI systems.
+15. **[Responsible AI:](./core/responsible_ai/responsible_ai.qmd)** We discuss the ethical principles guiding the responsible use of AI, focusing on fairness, accountability, and transparency.
+16. **[Sustainable AI:](./core/sustainable_ai/sustainable_ai.qmd)** This chapter explores practices and strategies for sustainable AI, ensuring long-term viability and reduced environmental impact.
+17. **[Robust AI:](./core/robust_ai/robust_ai.qmd)** We discuss techniques for developing reliable and robust AI models that can perform consistently across various conditions.
+18. **[Generative AI:](./core/generative_ai/generative_ai.qmd)** This chapter explores the algorithms and techniques behind generative AI, opening avenues for innovation and creativity.
+
+### Social Impact
+
+The Impact section highlights the transformative potential of AI in various domains. We showcase real-world applications of TinyML in healthcare, agriculture, conservation, and other areas where AI is making a positive difference. This section inspires you to leverage the power of AI for societal good and to contribute to the development of impactful solutions.
+
+19. **[AI for Good:](./core/ai_for_good/ai_for_good.qmd)** We highlight positive applications of TinyML in areas like healthcare, agriculture, and conservation.
+
+### Closing
+
+In the Closing section, we reflect on the key learnings from the book and look ahead to the future of AI. We synthesize the concepts covered, discuss emerging trends, and provide guidance on continuing your learning journey in this rapidly evolving field. This section leaves you with a comprehensive understanding of AI and the excitement to apply your knowledge in innovative ways.
+
+20. **[Conclusion:](./core/conclusion/conclusion.qmd)** The book concludes with a reflection on the key learnings and future directions in the field of AI.
+
+## Tailored Learning
+
+We understand that readers have diverse interests; some may wish to grasp the fundamentals, while others are eager to delve into advanced topics like hardware acceleration or AI ethics. To help you navigate the book more effectively, we've created a persona-based reading guide tailored to your specific interests and goals. This guide assists you in identifying the reader persona that best matches your interests. Each persona represents a distinct reader profile with specific objectives. By selecting the persona that resonates with you, you can focus on the chapters and sections most relevant to your needs.
+
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+| Persona | Description | Chapters | Focus |
++:=======================+:=========================================================================+:==============================================+:==========================================================================================================+
+| The TinyML Newbie | You are new to the field of TinyML and eager to learn the basics. | 1-3, 8, 9, 10, 12 | Understand the fundamentals, gain insights into efficient and optimized ML, |
+| | | | and learn about on-device learning. |
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+| The EdgeML Enthusiast | You have some TinyML knowledge and are interested in exploring | 1-3, 8, 9, 10, 12, 13 | Build a strong foundation, delve into the intricacies of efficient ML, |
+| | the broader world of EdgeML. | | and explore the operational aspects of embedded systems. |
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+| The Computer Visionary | You are fascinated by computer vision and its applications in TinyML | 1-3, 5, 8-10, 12, 13, 17, 20 | Start with the basics, explore data engineering, and study methods for optimizing ML |
+| | and EdgeML. | | models. Learn about robustness and the future of ML systems. |
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+| The Data Maestro | You are passionate about data and its crucial role in ML systems. | 1-5, 8-13 | Gain a comprehensive understanding of data's role in ML systems, explore the ML |
+| | | | workflow, and dive into model optimization and deployment considerations. |
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+| The Hardware Hero | You are excited about the hardware aspects of ML systems and how | 1-3, 6, 8-10, 12, 14, 17, 20 | Build a solid foundation in ML systems and frameworks, explore challenges of |
+| | they impact model performance. | | optimizing models for efficiency, hardware-software co-design, and security aspects. |
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+| The Sustainability | You are an advocate for sustainability and want to learn how to | 1-3, 8-10, 12, 15, 16, 20 | Begin with the fundamentals of ML systems and TinyML, explore model optimization |
+| Champion | develop eco-friendly AI systems. | | techniques, and learn about responsible and sustainable AI practices. |
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+| The AI Ethicist | You are concerned about the ethical implications of AI and want to | 1-3, 5, 7, 12, 14-16, 19, 20 | Gain insights into the ethical considerations surrounding AI, including fairness, |
+| | ensure responsible development and deployment. | | privacy, sustainability, and responsible development practices. |
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+| The Full-Stack ML | You are a seasoned ML expert and want to deepen your understanding | The entire book | Understand the end-to-end process of building and deploying ML systems, from data |
+| Engineer | of the entire ML system stack. | | engineering and model optimization to hardware acceleration and ethical considerations. |
++------------------------+--------------------------------------------------------------------------+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------+
+
+## Join the Community
+
+Learning in the fast-paced world of AI is a collaborative journey. We set out to nurture a vibrant community of learners, innovators, and contributors. As you explore the concepts and engage with the exercises, we encourage you to share your insights and experiences. Whether it's a novel approach, an interesting application, or a thought-provoking question, your contributions can enrich the learning ecosystem. Engage in discussions, offer and seek guidance, and collaborate on projects to foster a culture of mutual growth and learning. By sharing knowledge, you play an important role in fostering a globally connected, informed, and empowered community.
diff --git a/contents/benchmarking/benchmarking.bib b/contents/benchmarking/benchmarking.bib
deleted file mode 100644
index 9b0cd0e6..00000000
--- a/contents/benchmarking/benchmarking.bib
+++ /dev/null
@@ -1,380 +0,0 @@
-%comment{This file was created with betterbib v5.0.11.}
-
-
-@article{bianco2018benchmark,
- author = {Bianco, Simone and Cadene, Remi and Celona, Luigi and Napoletano, Paolo},
- title = {Benchmark analysis of representative deep neural network architectures},
- journal = {IEEE access},
- volume = {6},
- pages = {64270--64277},
- year = {2018},
- publisher = {IEEE},
-}
-
-@inproceedings{adolf2016fathom,
- author = {Adolf, Robert and Rama, Saketh and Reagen, Brandon and Wei, Gu-yeon and Brooks, David},
- booktitle = {2016 IEEE International Symposium on Workload Characterization (IISWC)},
- doi = {10.1109/iiswc.2016.7581275},
- organization = {IEEE},
- pages = {1--10},
- publisher = {IEEE},
- source = {Crossref},
- title = {Fathom: {Reference} workloads for modern deep learning methods},
- url = {https://doi.org/10.1109/iiswc.2016.7581275},
- year = {2016},
- month = sep,
-}
-
-@inproceedings{antol2015vqa,
- author = {Antol, Stanislaw and Agrawal, Aishwarya and Lu, Jiasen and Mitchell, Margaret and Batra, Dhruv and Zitnick, C. Lawrence and Parikh, Devi},
- bibsource = {dblp computer science bibliography, https://dblp.org},
- biburl = {https://dblp.org/rec/conf/iccv/AntolALMBZP15.bib},
- booktitle = {2015 IEEE International Conference on Computer Vision (ICCV)},
- doi = {10.1109/iccv.2015.279},
- pages = {2425--2433},
- publisher = {IEEE},
- timestamp = {Wed, 24 May 2017 01:00:00 +0200},
- title = {{VQA:} {Visual} Question Answering},
- url = {https://doi.org/10.1109/iccv.2015.279},
- year = {2015},
- source = {Crossref},
- month = dec,
-}
-
-@article{banbury2020benchmarking,
- author = {Banbury, Colby R and Reddi, Vijay Janapa and Lam, Max and Fu, William and Fazel, Amin and Holleman, Jeremy and Huang, Xinyuan and Hurtado, Robert and Kanter, David and Lokhmotov, Anton and others},
- journal = {ArXiv preprint},
- title = {Benchmarking tinyml systems: {Challenges} and direction},
- url = {https://arxiv.org/abs/2003.04821},
- volume = {abs/2003.04821},
- year = {2020},
-}
-
-@article{beyer2020we,
- author = {Beyer, Lucas and H\'enaff, Olivier J and Kolesnikov, Alexander and Zhai, Xiaohua and Oord, A\"aron van den},
- journal = {ArXiv preprint},
- title = {Are we done with imagenet?},
- url = {https://arxiv.org/abs/2006.07159},
- volume = {abs/2006.07159},
- year = {2020},
-}
-
-@inproceedings{brown2020language,
- author = {Brown, Tom B. and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel M. and Wu, Jeffrey and Winter, Clemens and Hesse, Christopher and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario},
- editor = {Larochelle, Hugo and Ranzato, Marc'Aurelio and Hadsell, Raia and Balcan, Maria-Florina and Lin, Hsuan-Tien},
- bibsource = {dblp computer science bibliography, https://dblp.org},
- biburl = {https://dblp.org/rec/conf/nips/BrownMRSKDNSSAA20.bib},
- booktitle = {Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual},
- timestamp = {Tue, 19 Jan 2021 00:00:00 +0100},
- title = {Language Models are Few-Shot Learners},
- url = {https://proceedings.neurips.cc/paper/2020/hash/1457c0d6bfcb4967418bfb8ac142f64a-Abstract.html},
- year = {2020},
-}
-
-@inproceedings{chu2021discovering,
- author = {Chu, Grace and Arikan, Okan and Bender, Gabriel and Wang, Weijun and Brighton, Achille and Kindermans, Pieter-Jan and Liu, Hanxiao and Akin, Berkin and Gupta, Suyog and Howard, Andrew},
- bibsource = {dblp computer science bibliography, https://dblp.org},
- biburl = {https://dblp.org/rec/conf/cvpr/ChuABWBKLAG021.bib},
- booktitle = {2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)},
- doi = {10.1109/cvprw53098.2021.00337},
- pages = {3022--3031},
- publisher = {IEEE},
- timestamp = {Mon, 18 Jul 2022 01:00:00 +0200},
- title = {Discovering Multi-Hardware Mobile Models via Architecture Search},
- url = {https://doi.org/10.1109/cvprw53098.2021.00337},
- year = {2021},
- source = {Crossref},
- month = jun,
-}
-
-@article{coleman2017dawnbench,
- author = {Coleman, Cody and Kang, Daniel and Narayanan, Deepak and Nardi, Luigi and Zhao, Tian and Zhang, Jian and Bailis, Peter and Olukotun, Kunle and R\'e, Chris and Zaharia, Matei},
- doi = {10.1145/3352020.3352024},
- issn = {0163-5980},
- journal = {ACM SIGOPS Operating Systems Review},
- number = {1},
- pages = {14--25},
- publisher = {Association for Computing Machinery (ACM)},
- source = {Crossref},
- title = {Analysis of {DAWNBench,} a Time-to-Accuracy Machine Learning Performance Benchmark},
- url = {https://doi.org/10.1145/3352020.3352024},
- volume = {53},
- year = {2019},
- month = jul,
-}
-
-@inproceedings{coleman2022similarity,
- author = {Coleman, Cody and Chou, Edward and Katz-Samuels, Julian and Culatana, Sean and Bailis, Peter and Berg, Alexander C. and Nowak, Robert D. and Sumbaly, Roshan and Zaharia, Matei and Yalniz, I. Zeki},
- bibsource = {dblp computer science bibliography, https://dblp.org},
- biburl = {https://dblp.org/rec/conf/aaai/ColemanCKCBBNSZ22.bib},
- booktitle = {Thirty-Sixth AAAI Conference on Artificial Intelligence, AAAI 2022, Thirty-Fourth Conference on Innovative Applications of Artificial Intelligence, IAAI 2022, The Twelveth Symposium on Educational Advances in Artificial Intelligence, EAAI 2022 Virtual Event, February 22 - March 1, 2022},
- pages = {6402--6410},
- publisher = {AAAI Press},
- timestamp = {Mon, 11 Jul 2022 01:00:00 +0200},
- title = {Similarity Search for Efficient Active Learning and Search of Rare Concepts},
- url = {https://ojs.aaai.org/index.php/AAAI/article/view/20591},
- year = {2022},
-}
-
-@article{david2021tensorflow,
- author = {David, Robert and Duke, Jared and Jain, Advait and Janapa Reddi, Vijay and Jeffries, Nat and Li, Jian and Kreeger, Nick and Nappier, Ian and Natraj, Meghna and Wang, Tiezhen and others},
- journal = {Proceedings of Machine Learning and Systems},
- pages = {800--811},
- title = {Tensorflow lite micro: {Embedded} machine learning for tinyml systems},
- volume = {3},
- year = {2021},
-}
-
-@article{davies2018loihi,
- author = {Davies, Mike and Srinivasa, Narayan and Lin, Tsung-Han and Chinya, Gautham and Cao, Yongqiang and Choday, Sri Harsha and Dimou, Georgios and Joshi, Prasad and Imam, Nabil and Jain, Shweta and Liao, Yuyun and Lin, Chit-Kwan and Lines, Andrew and Liu, Ruokun and Mathaikutty, Deepak and McCoy, Steven and Paul, Arnab and Tse, Jonathan and Venkataramanan, Guruguhanathan and Weng, Yi-Hsin and Wild, Andreas and Yang, Yoonseok and Wang, Hong},
- doi = {10.1109/mm.2018.112130359},
- issn = {0272-1732, 1937-4143},
- journal = {IEEE Micro},
- number = {1},
- pages = {82--99},
- publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
- source = {Crossref},
- title = {Loihi: {A} Neuromorphic Manycore Processor with On-Chip Learning},
- url = {https://doi.org/10.1109/mm.2018.112130359},
- volume = {38},
- year = {2018},
- month = jan,
-}
-
-@inproceedings{devlin2018bert,
- author = {Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},
- address = {Minneapolis, Minnesota},
- booktitle = {Proceedings of the 2019 Conference of the North},
- doi = {10.18653/v1/n19-1423},
- pages = {4171--4186},
- publisher = {Association for Computational Linguistics},
- title = {{BERT:} {Pre-training} of Deep Bidirectional Transformers for Language Understanding},
- url = {https://doi.org/10.18653/v1/n19-1423},
- year = {2019},
- source = {Crossref},
-}
-
-@article{gaviria2022dollar,
- author = {Mattson, Peter and Reddi, Vijay Janapa and Cheng, Christine and Coleman, Cody and Diamos, Greg and Kanter, David and Micikevicius, Paulius and Patterson, David and Schmuelling, Guenther and Tang, Hanlin and Wei, Gu-Yeon and Wu, Carole-Jean},
- doi = {10.1109/mm.2020.2974843},
- issn = {0272-1732, 1937-4143},
- journal = {IEEE Micro},
- number = {2},
- pages = {8--16},
- publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
- source = {Crossref},
- title = {{MLPerf:} {An} Industry Standard Benchmark Suite for Machine Learning Performance},
- url = {https://doi.org/10.1109/mm.2020.2974843},
- volume = {40},
- year = {2020},
- month = mar,
-}
-
-@inproceedings{hendrycks2021natural,
- author = {Hendrycks, Dan and Zhao, Kevin and Basart, Steven and Steinhardt, Jacob and Song, Dawn},
- bibsource = {dblp computer science bibliography, https://dblp.org},
- biburl = {https://dblp.org/rec/conf/cvpr/HendrycksZBSS21.bib},
- booktitle = {2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
- doi = {10.1109/cvpr46437.2021.01501},
- pages = {15262--15271},
- publisher = {IEEE},
- timestamp = {Mon, 18 Jul 2022 01:00:00 +0200},
- title = {Natural Adversarial Examples},
- url = {https://doi.org/10.1109/cvpr46437.2021.01501},
- year = {2021},
- source = {Crossref},
- month = jun,
-}
-
-@inproceedings{ignatov2018ai,
- author = {Ignatov, Andrey and Timofte, Radu and Kulik, Andrei and Yang, Seungsoo and Wang, Ke and Baum, Felix and Wu, Max and Xu, Lirong and Van Gool, Luc},
- booktitle = {2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)},
- doi = {10.1109/iccvw.2019.00447},
- pages = {0--0},
- publisher = {IEEE},
- source = {Crossref},
- title = {{AI} Benchmark: {All} About Deep Learning on Smartphones in 2019},
- url = {https://doi.org/10.1109/iccvw.2019.00447},
- year = {2019},
- month = oct,
-}
-
-@inproceedings{kiela2021dynabench,
- author = {Kiela, Douwe and Bartolo, Max and Nie, Yixin and Kaushik, Divyansh and Geiger, Atticus and Wu, Zhengxuan and Vidgen, Bertie and Prasad, Grusha and Singh, Amanpreet and Ringshia, Pratik and Ma, Zhiyi and Thrush, Tristan and Riedel, Sebastian and Waseem, Zeerak and Stenetorp, Pontus and Jia, Robin and Bansal, Mohit and Potts, Christopher and Williams, Adina},
- address = {Online},
- booktitle = {Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
- doi = {10.18653/v1/2021.naacl-main.324},
- pages = {4110--4124},
- publisher = {Association for Computational Linguistics},
- title = {Dynabench: {Rethinking} Benchmarking in {NLP}},
- url = {https://doi.org/10.18653/v1/2021.naacl-main.324},
- year = {2021},
- source = {Crossref},
-}
-
-@inproceedings{koh2021wilds,
- author = {Koh, Pang Wei and Sagawa, Shiori and Marklund, Henrik and Xie, Sang Michael and Zhang, Marvin and Balsubramani, Akshay and Hu, Weihua and Yasunaga, Michihiro and Phillips, Richard Lanas and Gao, Irena and Lee, Tony and David, Etienne and Stavness, Ian and Guo, Wei and Earnshaw, Berton and Haque, Imran S. and Beery, Sara M. and Leskovec, Jure and Kundaje, Anshul and Pierson, Emma and Levine, Sergey and Finn, Chelsea and Liang, Percy},
- editor = {Meila, Marina and Zhang, Tong},
- bibsource = {dblp computer science bibliography, https://dblp.org},
- biburl = {https://dblp.org/rec/conf/icml/KohSMXZBHYPGLDS21.bib},
- booktitle = {Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event},
- pages = {5637--5664},
- publisher = {PMLR},
- series = {Proceedings of Machine Learning Research},
- timestamp = {Tue, 13 Dec 2022 00:00:00 +0100},
- title = {{WILDS:} {A} Benchmark of in-the-Wild Distribution Shifts},
- url = {http://proceedings.mlr.press/v139/koh21a.html},
- volume = {139},
- year = {2021},
-}
-
-@inproceedings{lin2014microsoft,
- author = {Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll\'ar, Piotr and Zitnick, C Lawrence},
- booktitle = {Computer Vision{\textendash}ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13},
- organization = {Springer},
- pages = {740--755},
- title = {Microsoft coco: {Common} objects in context},
- year = {2014},
-}
-
-@inproceedings{lundberg2017unified,
- author = {Lundberg, Scott M. and Lee, Su-In},
- editor = {Guyon, Isabelle and von Luxburg, Ulrike and Bengio, Samy and Wallach, Hanna M. and Fergus, Rob and Vishwanathan, S. V. N. and Garnett, Roman},
- bibsource = {dblp computer science bibliography, https://dblp.org},
- biburl = {https://dblp.org/rec/conf/nips/LundbergL17.bib},
- booktitle = {Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA},
- pages = {4765--4774},
- timestamp = {Thu, 21 Jan 2021 00:00:00 +0100},
- title = {A Unified Approach to Interpreting Model Predictions},
- url = {https://proceedings.neurips.cc/paper/2017/hash/8a20a8621978632d76c43dfd28b67767-Abstract.html},
- year = {2017},
-}
-
-@article{maass1997networks,
- author = {Maass, Wolfgang},
- doi = {10.1016/s0893-6080(97)00011-7},
- issn = {0893-6080},
- journal = {Neural Networks},
- number = {9},
- pages = {1659--1671},
- publisher = {Elsevier BV},
- source = {Crossref},
- title = {Networks of spiking neurons: {The} third generation of neural network models},
- url = {https://doi.org/10.1016/s0893-6080(97)00011-7},
- volume = {10},
- year = {1997},
- month = dec,
-}
-
-@article{mattson2020mlperf,
- author = {Mattson, Peter and Reddi, Vijay Janapa and Cheng, Christine and Coleman, Cody and Diamos, Greg and Kanter, David and Micikevicius, Paulius and Patterson, David and Schmuelling, Guenther and Tang, Hanlin and Wei, Gu-Yeon and Wu, Carole-Jean},
- doi = {10.1109/mm.2020.2974843},
- issn = {0272-1732, 1937-4143},
- journal = {IEEE Micro},
- number = {2},
- pages = {8--16},
- publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
- source = {Crossref},
- title = {{MLPerf:} {An} Industry Standard Benchmark Suite for Machine Learning Performance},
- url = {https://doi.org/10.1109/mm.2020.2974843},
- volume = {40},
- year = {2020},
- month = mar,
-}
-
-@article{modha2023neural,
- author = {Modha, Dharmendra S. and Akopyan, Filipp and Andreopoulos, Alexander and Appuswamy, Rathinakumar and Arthur, John V. and Cassidy, Andrew S. and Datta, Pallab and DeBole, Michael V. and Esser, Steven K. and Otero, Carlos Ortega and Sawada, Jun and Taba, Brian and Amir, Arnon and Bablani, Deepika and Carlson, Peter J. and Flickner, Myron D. and Gandhasri, Rajamohan and Garreau, Guillaume J. and Ito, Megumi and Klamo, Jennifer L. and Kusnitz, Jeffrey A. and McClatchey, Nathaniel J. and McKinstry, Jeffrey L. and Nakamura, Yutaka and Nayak, Tapan K. and Risk, William P. and Schleupen, Kai and Shaw, Ben and Sivagnaname, Jay and Smith, Daniel F. and Terrizzano, Ignacio and Ueda, Takanori},
- doi = {10.1126/science.adh1174},
- issn = {0036-8075, 1095-9203},
- journal = {Science},
- number = {6668},
- pages = {329--335},
- publisher = {American Association for the Advancement of Science (AAAS)},
- source = {Crossref},
- title = {Neural inference at the frontier of energy, space, and time},
- url = {https://doi.org/10.1126/science.adh1174},
- volume = {382},
- year = {2023},
- month = oct,
-}
-
-@inproceedings{reddi2020mlperf,
- author = {Reddi, Vijay Janapa and Cheng, Christine and Kanter, David and Mattson, Peter and Schmuelling, Guenther and Wu, Carole-Jean and Anderson, Brian and Breughe, Maximilien and Charlebois, Mark and Chou, William and Chukka, Ramesh and Coleman, Cody and Davis, Sam and Deng, Pan and Diamos, Greg and Duke, Jared and Fick, Dave and Gardner, J. Scott and Hubara, Itay and Idgunji, Sachin and Jablin, Thomas B. and Jiao, Jeff and John, Tom St. and Kanwar, Pankaj and Lee, David and Liao, Jeffery and Lokhmotov, Anton and Massa, Francisco and Meng, Peng and Micikevicius, Paulius and Osborne, Colin and Pekhimenko, Gennady and Rajan, Arun Tejusve Raghunath and Sequeira, Dilip and Sirasao, Ashish and Sun, Fei and Tang, Hanlin and Thomson, Michael and Wei, Frank and Wu, Ephrem and Xu, Lingjie and Yamada, Koichi and Yu, Bing and Yuan, George and Zhong, Aaron and Zhang, Peizhao and Zhou, Yuchen},
- booktitle = {2020 ACM/IEEE 47th Annual International Symposium on Computer Architecture (ISCA)},
- doi = {10.1109/isca45697.2020.00045},
- organization = {IEEE},
- pages = {446--459},
- publisher = {IEEE},
- source = {Crossref},
- title = {{MLPerf} Inference Benchmark},
- url = {https://doi.org/10.1109/isca45697.2020.00045},
- year = {2020},
- month = may,
-}
-
-@inproceedings{ribeiro2016should,
- author = {Ribeiro, Marco Tulio and Singh, Sameer and Guestrin, Carlos},
- booktitle = {Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining},
- pages = {1135--1144},
- title = {{\textquotedblright} Why should i trust you?{\textquotedblright} Explaining the predictions of any classifier},
- year = {2016},
-}
-
-@article{schuman2022opportunities,
- author = {Schuman, Catherine D. and Kulkarni, Shruti R. and Parsa, Maryam and Mitchell, J. Parker and Date, Prasanna and Kay, Bill},
- doi = {10.1038/s43588-021-00184-y},
- issn = {2662-8457},
- journal = {Nature Computational Science},
- number = {1},
- pages = {10--19},
- publisher = {Springer Science and Business Media LLC},
- source = {Crossref},
- title = {Opportunities for neuromorphic computing algorithms and applications},
- url = {https://doi.org/10.1038/s43588-021-00184-y},
- volume = {2},
- year = {2022},
- month = jan,
-}
-
-@article{warden2018speech,
- author = {Warden, Pete},
- journal = {ArXiv preprint},
- title = {Speech commands: {A} dataset for limited-vocabulary speech recognition},
- url = {https://arxiv.org/abs/1804.03209},
- volume = {abs/1804.03209},
- year = {2018},
-}
-
-@inproceedings{xie2020adversarial,
- author = {Xie, Cihang and Tan, Mingxing and Gong, Boqing and Wang, Jiang and Yuille, Alan L. and Le, Quoc V.},
- bibsource = {dblp computer science bibliography, https://dblp.org},
- biburl = {https://dblp.org/rec/conf/cvpr/XieTGWYL20.bib},
- booktitle = {2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
- doi = {10.1109/cvpr42600.2020.00090},
- pages = {816--825},
- publisher = {IEEE},
- timestamp = {Tue, 13 Oct 2020 01:00:00 +0200},
- title = {Adversarial Examples Improve Image Recognition},
- url = {https://doi.org/10.1109/cvpr42600.2020.00090},
- year = {2020},
- source = {Crossref},
- month = jun,
-}
-
-@article{xu2023demystifying,
- author = {Xu, Hu and Xie, Saining and Tan, Xiaoqing Ellen and Huang, Po-Yao and Howes, Russell and Sharma, Vasu and Li, Shang-Wen and Ghosh, Gargi and Zettlemoyer, Luke and Feichtenhofer, Christoph},
- journal = {ArXiv preprint},
- title = {Demystifying {CLIP} Data},
- url = {https://arxiv.org/abs/2309.16671},
- volume = {abs/2309.16671},
- year = {2023},
-}
-
-@misc{yik2023neurobench,
- author = {Yik, Jason and Ahmed, Soikat Hasan and Ahmed, Zergham and Anderson, Brian and Andreou, Andreas G. and Bartolozzi, Chiara and Basu, Arindam and den Blanken, Douwe and Bogdan, Petrut and Bohte, Sander and Bouhadjar, Younes and Buckley, Sonia and Cauwenberghs, Gert and Corradi, Federico and de Croon, Guido and Danielescu, Andreea and Daram, Anurag and Davies, Mike and Demirag, Yigit and Eshraghian, Jason and Forest, Jeremy and Furber, Steve and Furlong, Michael and Gilra, Aditya and Indiveri, Giacomo and Joshi, Siddharth and Karia, Vedant and Khacef, Lyes and Knight, James C. and Kriener, Laura and Kubendran, Rajkumar and Kudithipudi, Dhireesha and Lenz, Gregor and Manohar, Rajit and Mayr, Christian and Michmizos, Konstantinos and Muir, Dylan and Neftci, Emre and Nowotny, Thomas and Ottati, Fabrizio and Ozcelikkale, Ayca and Pacik-Nelson, Noah and Panda, Priyadarshini and Pao-Sheng, Sun and Payvand, Melika and Pehle, Christian and Petrovici, Mihai A. and Posch, Christoph and Renner, Alpha and Sandamirskaya, Yulia and Schaefer, Clemens JS and van Schaik, Andr\'e and Schemmel, Johannes and Schuman, Catherine and Seo, Jae-sun and Sheik, Sadique and Shrestha, Sumit Bam and Sifalakis, Manolis and Sironi, Amos and Stewart, Kenneth and Stewart, Terrence C. and Stratmann, Philipp and Tang, Guangzhi and Timcheck, Jonathan and Verhelst, Marian and Vineyard, Craig M. and Vogginger, Bernhard and Yousefzadeh, Amirreza and Zhou, Biyan and Zohora, Fatima Tuz and Frenkel, Charlotte and Reddi, Vijay Janapa},
- archiveprefix = {arXiv},
- eprint = {2304.04640},
- primaryclass = {cs.AI},
- title = {{NeuroBench:} {Advancing} Neuromorphic Computing through Collaborative, Fair and Representative Benchmarking},
- year = {2023},
-}
diff --git a/contents/case_studies.qmd b/contents/case_studies.qmd
deleted file mode 100644
index e346f64e..00000000
--- a/contents/case_studies.qmd
+++ /dev/null
@@ -1,8 +0,0 @@
-# Case Studies {#sec-case_studies}
-
-::: {.callout-tip}
-
-## Learning Objectives
-
-* *Coming soon.*
-:::
diff --git a/contents/community.qmd b/contents/community.qmd
deleted file mode 100644
index ed282795..00000000
--- a/contents/community.qmd
+++ /dev/null
@@ -1,50 +0,0 @@
-# Communities {#sec-communities}
-
-Welcome to our dedicated hub for TinyML enthusiasts. Whether you are a seasoned developer, a researcher, or a curious hobbyist looking to dive into the world of TinyML, this page is a non-exhaustive list of community resources and forums to help you get started and thrive in this domain. From vibrant online communities and educational platforms to blogs and social media groups, discover a world brimming with knowledge, collaboration, and innovation. Begin your TinyML journey here, where opportunities for learning and networking are just a click away!
-
-## Online Forums
-
-1. **TinyML Forum**
- Website: [TinyML Forum](https://forums.tinyml.org/)
- Description: A dedicated forum for discussions, news, and updates on TinyML.
-
-2. **Reddit**
- Subreddits: r/TinyML
- Description: Reddit community discussing various topics related to TinyML.
-
-## Blogs and Websites
-
-1. **TinyML Foundation**
- Website: [TinyML Foundation](https://tinyml.org/)
- Description: The official website offers a wealth of information including research, news, and events.
-
-2. **Edge Impulse Blog**
- Website: [Blog](https://www.edgeimpulse.com/blog)
- Description: Contains several articles, tutorials, and resources on TinyML.
-
-3. **Tiny Machine Learning Open Education Initiative (TinyMLedu)**
- Website: [TinyML Open Education Initiative](https://tinymledu.org/)
- Description: The website offers links to educational materials on TinyML, training events and research papers.
-
-## Social Media Groups
-
-1. **LinkedIn Groups**
- Description: Join TinyML groups on LinkedIn to connect with professionals and enthusiasts in the field.
-
-2. **Twitter**
- Description: Follow TinyML enthusiasts, organizations, and experts on Twitter for the latest news and updates.
- Example handles to follow:
- - [Twitter](https://twitter.com/tinymlf)
- - [EdgeImpulse](https://twitter.com/EdgeImpulse)
-
-## Conferences and Meetups
-
-1. **TinyML Summit**
- Website: [TinyML Summit](https://www.tinyml.org/)
- Description: Annual event where professionals and enthusiasts gather to discuss the latest developments in TinyML.
-
-2. **Meetup**
- Website: [Meetup](https://www.meetup.com/pro/tinyml)
- Description: Search for TinyML groups on Meetup to find local or virtual gatherings.
-
-Remember to always check the credibility and activity level of the platforms and groups before diving in to ensure a productive experience.
diff --git a/contents/contributors.bib b/contents/contributors.bib
deleted file mode 100644
index e69de29b..00000000
diff --git a/contents/contributors.qmd b/contents/contributors.qmd
index ac5cd3db..a5e4415d 100644
--- a/contents/contributors.qmd
+++ b/contents/contributors.qmd
@@ -73,24 +73,24 @@ We extend our sincere thanks to the diverse group of individuals who have genero
+
Vijay Janapa Reddi
jasonjabbour
Ikechukwu Uchendu
-
Naeem Khoshnevis
-
jasonjabbour
+
Douwe den Blanken
Marcelo Rovai
+
+
Sara Khosravi
Douwe den Blanken
-
shanzehbatool
+
Marcelo Rovai
Kai Kleinbard
-
Elias Nuwara
-
kai4avaya
Jared Ping
+
Jared Ping
Matthew Stewart
Itai Shapira
Maximilian Lam
-
Jayson Lin
Sara Khosravi
@@ -100,79 +100,83 @@ We extend our sincere thanks to the diverse group of individuals who have genero
Sophia Cho
Korneel Van den Berghe
-
Zishen Wan
-
Colby Banbury
+
Divya Amirtharaj
Zishen Wan
Abdulrahman Mahmoud
+
Srivatsan Krishnan
Divya Amirtharaj
-
+
Haoran Qiu
Emeka Ezike
+
Aghyad Deeb
Haoran Qiu
-
marin-llobet
-
Michael Schnebly
+
oishib
Emil Njor
-
Jared Ni
+
Aditi Raju
+
Jared Ni
+
Michael Schnebly
oishib
-
ELSuitorHarvard
-
Emil Njor
Henry Bae
+
+
Henry Bae
Jae-Won Chung
Yu-Shun Hsiao
-
Mark Mazumder
-
Jae-Won Chung
-
Shvetank Prakash
+
Pong Trairatvorakul
Marco Zennaro
Eura Nofshin
+
Andrew Bass
Pong Trairatvorakul
-
Jennifer Zhou
-
Marco Zennaro
+
Emeka Ezike
Shvetank Prakash
+
+
Alex Oesterling
Arya Tschand
-
Bruno Scaglione
Allen-Kuang
-
Gauri Jain
-
Fin Amin
+
Fatima Shah
Allen-Kuang
+
+
Fin Amin
+
Fatima Shah
The Random DIY
-
gnodipac886
Alex Oesterling
-
Sercan Aygün
-
Emmanuel Rassou
Jason Yik
-
+
-
abigailswallow
+
Yang Zhou
+
Baldassarre Cesarano
Abenezer
+
Bilge Acun
+
yanjingl
+
Yang Zhou
+
+
abigailswallow
Jason Yik
-
happyappledog
+
Jessica Quaye
+
Curren Iyer
Emmanuel Rassou
-
-
The Random DIY
Shreya Johri
+
Sonia Murthy
+
Shreya Johri
+
Jessica Quaye
Vijay Edupuganti
-
Costin-Andrei Oncescu
Baldassarre Cesarano
-
-
Annie Laurie Cook
Vijay Edupuganti
Jothi Ramaswamy
-
Batur Arslan
-
Curren Iyer
+
-
Fatima Shah
yanjingl
+
a-saraf
diff --git a/contents/conventions.qmd b/contents/conventions.qmd
deleted file mode 100644
index 109fc7e5..00000000
--- a/contents/conventions.qmd
+++ /dev/null
@@ -1,68 +0,0 @@
-# Conventions Used in this Book
-
-Please follow these conventions as you contribute to this online book:
-
-1. **Clear Structure and Organization:**
-
- - **Chapter Outlines:** Begin each chapter with an outline that provides an
- overview of the topics covered.
- - **Sequential Numbering:** Utilize sequential numbering for chapters,
- sections, and subsections to facilitate easy reference.
-
-2. **Accessible Language:**
-
- - **Glossary:** Include a glossary that defines technical terms and jargon.
- - **Consistent Terminology:** Maintain consistent use of terminology
- throughout the book to avoid confusion.
-
-3. **Learning Aids:**
-
- - **Diagrams and Figures:** Employ diagrams, figures, and tables to visually
- convey complex concepts.
- - **Sidebars:** Use sidebars for additional information, anecdotes, or to
- provide real-world context to the theoretical content.
-
-4. **Interactive Elements:**
-
- - **Colabs and Projects:** Integrate exercises and projects at the end of
- each chapter to encourage active learning and practical application of
- concepts.
- - **Case Studies:** Incorporate case studies to provide a deeper
- understanding of how principles are applied in real-world situations.
-
-5. **References and Further Reading:**
-
- - **Bibliography:** Include a bibliography at the end of each chapter for
- readers who wish to dive deeper into specific topics.
- - **Citations:** Maintain a consistent style for citations, adhering to
- recognized academic standards like APA, MLA, or Chicago.
-
-6. **Supporting Materials:**
-
- - **Supplementary Online Resources:** Provide links to supplementary online
- resources, such as video lectures, webinars, or interactive modules.
- - **Datasets and Code Repositories:** Share datasets and code repositories
- for hands-on practice, particularly for sections dealing with algorithms
- and applications.
-
-7. **Feedback and Community Engagement:**
-
- - **Forums and Discussion Groups:** Establish forums or discussion groups
- where readers can interact, ask questions, and share knowledge.
- - **Open Review Process:** Implement an open review process, inviting
- feedback from the community to continuously improve the content.
-
-8. **Inclusivity and Accessibility:**
-
- - **Inclusive Language:** Utilize inclusive language that respects diversity
- and promotes equality.
- - **Accessible Formats:** Ensure the textbook is available in accessible
- formats, including audio and Braille, to cater to readers with
- disabilities.
-
-9. **Index:**
- - **Comprehensive Index:** Include a comprehensive index at the end of the
- book to help readers quickly locate specific information.
-
-Implementing these conventions can contribute to creating a textbook that is
-comprehensive, accessible, and conducive to effective learning.
diff --git a/contents/acknowledgements/acknowledgements.qmd b/contents/core/acknowledgements/acknowledgements.qmd
similarity index 86%
rename from contents/acknowledgements/acknowledgements.qmd
rename to contents/core/acknowledgements/acknowledgements.qmd
index 7f285aab..5cb77312 100644
--- a/contents/acknowledgements/acknowledgements.qmd
+++ b/contents/core/acknowledgements/acknowledgements.qmd
@@ -10,13 +10,11 @@ Assembling this book has been a long journey, spanning several years of hard wor
We extend our heartfelt gratitude to the open source community of learners, teachers and sharers. Whether you contributed an entire section, a single sentence, or merely corrected a typo, your efforts have enhanced this book. We deeply appreciate everyone's time, expertise, and commitment. This book is as much yours as it is ours.
-Special thanks go to Professor Vijay Janapa Reddi, whose belief in the transformative power of open-source communities and invaluable guidance have been our guiding light from the outset.
-
We also owe a great deal to the team at GitHub and Quarto. You've revolutionized the way people collaborate, and this book stands as a testament to what can be achieved when barriers to global cooperation are removed.
## Funding Agencies and Companies
-We are immensely grateful for the generous support from the various funding agencies and companies that supported the teaching assistants (TAs) involved in this work. The organizations listed below played a crucial role in bringing this project to life with their contributions.
+We are immensely grateful for the generous support from the various funding agencies and companies that supported the teaching assistants (TAs) involved in this work. The organizations listed below played an important role in bringing this project to life with their contributions.
::: {layout-nrow=2}
diff --git a/contents/acknowledgements/images/png/HDSI.png b/contents/core/acknowledgements/images/png/HDSI.png
similarity index 100%
rename from contents/acknowledgements/images/png/HDSI.png
rename to contents/core/acknowledgements/images/png/HDSI.png
diff --git a/contents/acknowledgements/images/png/NSF.png b/contents/core/acknowledgements/images/png/NSF.png
similarity index 100%
rename from contents/acknowledgements/images/png/NSF.png
rename to contents/core/acknowledgements/images/png/NSF.png
diff --git a/contents/acknowledgements/images/png/google.png b/contents/core/acknowledgements/images/png/google.png
similarity index 100%
rename from contents/acknowledgements/images/png/google.png
rename to contents/core/acknowledgements/images/png/google.png
diff --git a/contents/acknowledgements/images/png/harvard-xtension-school.png b/contents/core/acknowledgements/images/png/harvard-xtension-school.png
similarity index 100%
rename from contents/acknowledgements/images/png/harvard-xtension-school.png
rename to contents/core/acknowledgements/images/png/harvard-xtension-school.png
diff --git a/contents/ai_for_good/ai_for_good.bib b/contents/core/ai_for_good/ai_for_good.bib
similarity index 100%
rename from contents/ai_for_good/ai_for_good.bib
rename to contents/core/ai_for_good/ai_for_good.bib
diff --git a/contents/ai_for_good/ai_for_good.qmd b/contents/core/ai_for_good/ai_for_good.qmd
similarity index 98%
rename from contents/ai_for_good/ai_for_good.qmd
rename to contents/core/ai_for_good/ai_for_good.qmd
index 9374f5ab..ca1c9087 100644
--- a/contents/ai_for_good/ai_for_good.qmd
+++ b/contents/core/ai_for_good/ai_for_good.qmd
@@ -5,7 +5,7 @@ bibliography: ai_for_good.bib
# AI for Good {#sec-ai_for_good}
::: {.content-visible when-format="html"}
-Resources: [Slides](#sec-ai-for-good-resource), [Videos](#sec-ai-for-good-resource), [Exercises](#sec-ai-for-good-resource), [Labs](#sec-ai-for-good-resource)
+Resources: [Slides](#sec-ai-for-good-resource), [Videos](#sec-ai-for-good-resource), [Exercises](#sec-ai-for-good-resource)
:::
![_DALL·E 3 Prompt: Illustration of planet Earth wrapped in shimmering neural networks, with diverse humans and AI robots working together on various projects like planting trees, cleaning the oceans, and developing sustainable energy solutions. The positive and hopeful atmosphere represents a united effort to create a better future._](images/png/cover_ai_good.png)
@@ -34,12 +34,12 @@ By aligning AI progress with human values, goals, and ethics, the ultimate goal
To give ourselves a framework around which to think about AI for social good, we will be following the UN Sustainable Development Goals (SDGs). The UN SDGs are a collection of 17 global goals, shown in @fig-sdg, adopted by the United Nations in 2015 as part of the 2030 Agenda for Sustainable Development. The SDGs address global challenges related to poverty, inequality, climate change, environmental degradation, prosperity, and peace and justice.
+![United Nations Sustainable Development Goals (SDG). Source: [United Nations](https://sdgs.un.org/goals).](https://www.un.org/sustainabledevelopment/wp-content/uploads/2015/12/english_SDG_17goals_poster_all_languages_with_UN_emblem_1.png){#fig-sdg}
+
What is special about the SDGs is that they are a collection of interlinked objectives designed to serve as a "shared blueprint for peace and prosperity for people and the planet, now and into the future." The SDGs emphasize sustainable development's interconnected environmental, social, and economic aspects by putting sustainability at their center.
A recent study [@vinuesa2020role] highlights the influence of AI on all aspects of sustainable development, particularly on the 17 Sustainable Development Goals (SDGs) and 169 targets internationally defined in the 2030 Agenda for Sustainable Development. The study shows that AI can act as an enabler for 134 targets through technological improvements, but it also highlights the challenges of AI on some targets. The study shows that AI can benefit 67 targets when considering AI and societal outcomes. Still, it also warns about the issues related to the implementation of AI in countries with different cultural values and wealth.
-![United Nations Sustainable Development Goals (SDG). Source: [United Nations](https://sdgs.un.org/goals).](https://www.un.org/sustainabledevelopment/wp-content/uploads/2015/12/english_SDG_17goals_poster_all_languages_with_UN_emblem_1.png){#fig-sdg}
-
In our book's context, TinyML could help advance at least some of these SDG goals.
* **Goal 1 - No Poverty:** TinyML could help provide low-cost solutions for crop monitoring to improve agricultural yields in developing countries.
@@ -285,15 +285,3 @@ These slides are a valuable tool for instructors to deliver lectures and for stu
* @exr-hc
:::
-
-:::{.callout-warning collapse="false"}
-
-#### Labs
-
-In addition to exercises, we offer a series of hands-on labs allowing students to gain practical experience with embedded AI technologies. These labs provide step-by-step guidance, enabling students to develop their skills in a structured and supportive environment. We are excited to announce that new labs will be available soon, further enriching the learning experience.
-
-* _Coming soon._
-:::
-
-
-
diff --git a/contents/ai_for_good/images/png/cover_ai_good.png b/contents/core/ai_for_good/images/png/cover_ai_good.png
similarity index 100%
rename from contents/ai_for_good/images/png/cover_ai_good.png
rename to contents/core/ai_for_good/images/png/cover_ai_good.png
diff --git a/contents/ai_for_good/images/png/msfarmbeats.png b/contents/core/ai_for_good/images/png/msfarmbeats.png
similarity index 100%
rename from contents/ai_for_good/images/png/msfarmbeats.png
rename to contents/core/ai_for_good/images/png/msfarmbeats.png
diff --git a/contents/core/benchmarking/benchmarking.bib b/contents/core/benchmarking/benchmarking.bib
new file mode 100644
index 00000000..c785dd69
--- /dev/null
+++ b/contents/core/benchmarking/benchmarking.bib
@@ -0,0 +1,454 @@
+%comment{This file was created with betterbib v5.0.11.}
+
+@article{bianco2018benchmark,
+ doi = {10.1109/access.2018.2877890},
+ pages = {64270--64277},
+ source = {Crossref},
+ volume = {6},
+ author = {Bianco, Simone and Cadene, Remi and Celona, Luigi and Napoletano, Paolo},
+ year = {2018},
+ url = {https://doi.org/10.1109/access.2018.2877890},
+ issn = {2169-3536},
+ journal = {IEEE Access},
+ publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
+ title = {Benchmark Analysis of Representative Deep Neural Network Architectures},
+}
+
+@inproceedings{adolf2016fathom,
+ doi = {10.1109/iiswc.2016.7581275},
+ pages = {1--10},
+ source = {Crossref},
+ author = {Adolf, Robert and Rama, Saketh and Reagen, Brandon and Wei, Gu-yeon and Brooks, David},
+ year = {2016},
+ month = sep,
+ url = {https://doi.org/10.1109/iiswc.2016.7581275},
+ booktitle = {2016 IEEE International Symposium on Workload Characterization (IISWC)},
+ publisher = {IEEE},
+ title = {Fathom: reference workloads for modern deep learning methods},
+ organization = {IEEE},
+}
+
+@inproceedings{antol2015vqa,
+ doi = {10.1109/iccv.2015.279},
+ pages = {2425--2433},
+ source = {Crossref},
+ author = {Antol, Stanislaw and Agrawal, Aishwarya and Lu, Jiasen and Mitchell, Margaret and Batra, Dhruv and Zitnick, C. Lawrence and Parikh, Devi},
+ year = {2015},
+ month = dec,
+ url = {https://doi.org/10.1109/iccv.2015.279},
+ booktitle = {2015 IEEE International Conference on Computer Vision (ICCV)},
+ publisher = {IEEE},
+ title = {VQA: Visual Question Answering},
+ bibsource = {dblp computer science bibliography, https://dblp.org},
+ biburl = {https://dblp.org/rec/conf/iccv/AntolALMBZP15.bib},
+ timestamp = {Wed, 24 May 2017 01:00:00 +0200},
+}
+
+@article{banbury2020benchmarking,
+ url = {http://arxiv.org/abs/2003.04821v4},
+ year = {2020},
+ month = mar,
+ title = {Benchmarking TinyML Systems: Challenges and Direction},
+ author = {Banbury, Colby R. and Reddi, Vijay Janapa and Lam, Max and Fu, William and Fazel, Amin and Holleman, Jeremy and Huang, Xinyuan and Hurtado, Robert and Kanter, David and Lokhmotov, Anton and Patterson, David and Pau, Danilo and Seo, Jae-sun and Sieracki, Jeff and Thakker, Urmish and Verhelst, Marian and Yadav, Poonam},
+ primaryclass = {cs.PF},
+ archiveprefix = {arXiv},
+ journal = {ArXiv preprint},
+ volume = {abs/2003.04821},
+}
+
+@article{banbury2021mlperf,
+ url = {http://arxiv.org/abs/2106.07597v4},
+ year = {2021},
+ month = jun,
+ title = {MLPerf Tiny Benchmark},
+ author = {Banbury, Colby and Reddi, Vijay Janapa and Torelli, Peter and Holleman, Jeremy and Jeffries, Nat and Kiraly, Csaba and Montino, Pietro and Kanter, David and Ahmed, Sebastian and Pau, Danilo and Thakker, Urmish and Torrini, Antonio and Warden, Peter and Cordaro, Jay and Guglielmo, Giuseppe Di and Duarte, Javier and Gibellini, Stephen and Parekh, Videet and Tran, Honson and Tran, Nhan and Wenxu, Niu and Xuesong, Xu},
+ primaryclass = {cs.LG},
+ archiveprefix = {arXiv},
+ journal = {arXiv preprint arXiv:2106.07597},
+}
+
+@article{beyer2020we,
+ url = {http://arxiv.org/abs/2006.07159v1},
+ year = {2020},
+ month = jun,
+ title = {Are we done with ImageNet?},
+ author = {Beyer, Lucas and H\'enaff, Olivier J. and Kolesnikov, Alexander and Zhai, Xiaohua and van den Oord, A\"aron},
+ primaryclass = {cs.CV},
+ archiveprefix = {arXiv},
+ journal = {ArXiv preprint},
+ volume = {abs/2006.07159},
+}
+
+@inproceedings{brown2020language,
+ author = {Brown, Tom B. and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel M. and Wu, Jeffrey and Winter, Clemens and Hesse, Christopher and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario},
+ editor = {Larochelle, Hugo and Ranzato, Marc'Aurelio and Hadsell, Raia and Balcan, Maria-Florina and Lin, Hsuan-Tien},
+ bibsource = {dblp computer science bibliography, https://dblp.org},
+ biburl = {https://dblp.org/rec/conf/nips/BrownMRSKDNSSAA20.bib},
+ booktitle = {Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual},
+ timestamp = {Tue, 19 Jan 2021 00:00:00 +0100},
+ title = {Language Models are Few-Shot Learners},
+ url = {https://proceedings.neurips.cc/paper/2020/hash/1457c0d6bfcb4967418bfb8ac142f64a-Abstract.html},
+ year = {2020},
+}
+
+@article{10.1145/3467017,
+ number = {12},
+ doi = {10.1145/3467017},
+ pages = {58--65},
+ source = {Crossref},
+ volume = {64},
+ author = {Hooker, Sara},
+ year = {2021},
+ month = nov,
+ url = {https://doi.org/10.1145/3467017},
+ issn = {0001-0782,1557-7317},
+ journal = {Communications of the ACM},
+ publisher = {Association for Computing Machinery (ACM)},
+ title = {The hardware lottery},
+ issue_date = {December 2021},
+ address = {New York, NY, USA},
+ abstract = {After decades of incentivizing the isolation of hardware, software, and algorithm development, the catalysts for closer collaboration are changing the paradigm.},
+ numpages = {8},
+}
+
+@inproceedings{chu2021discovering,
+ doi = {10.1109/cvprw53098.2021.00337},
+ pages = {3016--3025},
+ source = {Crossref},
+ author = {Chu, Grace and Arikan, Okan and Bender, Gabriel and Wang, Weijun and Brighton, Achille and Kindermans, Pieter-Jan and Liu, Hanxiao and Akin, Berkin and Gupta, Suyog and Howard, Andrew},
+ year = {2021},
+ month = jun,
+ url = {https://doi.org/10.1109/cvprw53098.2021.00337},
+ booktitle = {2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)},
+ publisher = {IEEE},
+ title = {Discovering Multi-Hardware Mobile Models via Architecture Search},
+ bibsource = {dblp computer science bibliography, https://dblp.org},
+ biburl = {https://dblp.org/rec/conf/cvpr/ChuABWBKLAG021.bib},
+ timestamp = {Mon, 18 Jul 2022 01:00:00 +0200},
+}
+
+@article{coleman2017dawnbench,
+ number = {1},
+ doi = {10.1145/3352020.3352024},
+ pages = {14--25},
+ source = {Crossref},
+ volume = {53},
+ author = {Coleman, Cody and Kang, Daniel and Narayanan, Deepak and Nardi, Luigi and Zhao, Tian and Zhang, Jian and Bailis, Peter and Olukotun, Kunle and R\'e, Chris and Zaharia, Matei},
+ year = {2019},
+ month = jul,
+ url = {https://doi.org/10.1145/3352020.3352024},
+ issn = {0163-5980},
+ journal = {ACM SIGOPS Operating Systems Review},
+ publisher = {Association for Computing Machinery (ACM)},
+ title = {Analysis of DAWNBench, a Time-to-Accuracy Machine Learning Performance Benchmark},
+}
+
+@article{coleman2022similarity,
+ number = {6},
+ doi = {10.1609/aaai.v36i6.20591},
+ pages = {6402--6410},
+ source = {Crossref},
+ volume = {36},
+ author = {Coleman, Cody and Chou, Edward and Katz-Samuels, Julian and Culatana, Sean and Bailis, Peter and Berg, Alexander C. and Nowak, Robert and Sumbaly, Roshan and Zaharia, Matei and Yalniz, I. Zeki},
+ year = {2022},
+ month = jun,
+ url = {https://doi.org/10.1609/aaai.v36i6.20591},
+ issn = {2374-3468,2159-5399},
+ journal = {Proceedings of the AAAI Conference on Artificial Intelligence},
+ publisher = {Association for the Advancement of Artificial Intelligence (AAAI)},
+ title = {Similarity Search for Efficient Active Learning and Search of Rare Concepts},
+ bibsource = {dblp computer science bibliography, https://dblp.org},
+ biburl = {https://dblp.org/rec/conf/aaai/ColemanCKCBBNSZ22.bib},
+ booktitle = {Thirty-Sixth AAAI Conference on Artificial Intelligence, AAAI 2022, Thirty-Fourth Conference on Innovative Applications of Artificial Intelligence, IAAI 2022, The Twelveth Symposium on Educational Advances in Artificial Intelligence, EAAI 2022 Virtual Event, February 22 - March 1, 2022},
+ timestamp = {Mon, 11 Jul 2022 01:00:00 +0200},
+}
+
+@article{david2021tensorflow,
+ author = {David, Robert and Duke, Jared and Jain, Advait and Janapa Reddi, Vijay and Jeffries, Nat and Li, Jian and Kreeger, Nick and Nappier, Ian and Natraj, Meghna and Wang, Tiezhen and others},
+ journal = {Proceedings of Machine Learning and Systems},
+ pages = {800--811},
+ title = {Tensorflow lite micro: Embedded machine learning for tinyml systems},
+ volume = {3},
+ year = {2021},
+}
+
+@article{davies2018loihi,
+ number = {1},
+ doi = {10.1109/mm.2018.112130359},
+ pages = {82--99},
+ source = {Crossref},
+ volume = {38},
+ author = {Davies, Mike and Srinivasa, Narayan and Lin, Tsung-Han and Chinya, Gautham and Cao, Yongqiang and Choday, Sri Harsha and Dimou, Georgios and Joshi, Prasad and Imam, Nabil and Jain, Shweta and Liao, Yuyun and Lin, Chit-Kwan and Lines, Andrew and Liu, Ruokun and Mathaikutty, Deepak and McCoy, Steven and Paul, Arnab and Tse, Jonathan and Venkataramanan, Guruguhanathan and Weng, Yi-Hsin and Wild, Andreas and Yang, Yoonseok and Wang, Hong},
+ year = {2018},
+ month = jan,
+ url = {https://doi.org/10.1109/mm.2018.112130359},
+ issn = {0272-1732,1937-4143},
+ journal = {IEEE Micro},
+ publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
+ title = {Loihi: A Neuromorphic Manycore Processor with On-Chip Learning},
+}
+
+@inproceedings{devlin2018bert,
+ doi = {10.18653/v1/n19-1423},
+ source = {Crossref},
+ author = {Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},
+ year = {2019},
+ url = {https://doi.org/10.18653/v1/n19-1423},
+ booktitle = {Proceedings of the 2019 Conference of the North},
+ publisher = {Association for Computational Linguistics},
+ title = {None},
+ address = {Minneapolis, Minnesota},
+ pages = {4171--4186},
+}
+
+@article{gaviria2022dollar,
+ number = {2},
+ doi = {10.1109/mm.2020.2974843},
+ pages = {8--16},
+ source = {Crossref},
+ volume = {40},
+ author = {Mattson, Peter and Reddi, Vijay Janapa and Cheng, Christine and Coleman, Cody and Diamos, Greg and Kanter, David and Micikevicius, Paulius and Patterson, David and Schmuelling, Guenther and Tang, Hanlin and Wei, Gu-Yeon and Wu, Carole-Jean},
+ year = {2020},
+ month = mar,
+ url = {https://doi.org/10.1109/mm.2020.2974843},
+ issn = {0272-1732,1937-4143},
+ journal = {IEEE Micro},
+ publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
+ title = {MLPerf: An Industry Standard Benchmark Suite for Machine Learning Performance},
+}
+
+@inproceedings{hendrycks2021natural,
+ doi = {10.1109/cvpr46437.2021.01501},
+ pages = {15257--15266},
+ source = {Crossref},
+ author = {Hendrycks, Dan and Zhao, Kevin and Basart, Steven and Steinhardt, Jacob and Song, Dawn},
+ year = {2021},
+ month = jun,
+ url = {https://doi.org/10.1109/cvpr46437.2021.01501},
+ booktitle = {2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
+ publisher = {IEEE},
+ title = {Natural Adversarial Examples},
+ bibsource = {dblp computer science bibliography, https://dblp.org},
+ biburl = {https://dblp.org/rec/conf/cvpr/HendrycksZBSS21.bib},
+ timestamp = {Mon, 18 Jul 2022 01:00:00 +0200},
+}
+
+@inproceedings{ignatov2018ai,
+ doi = {10.1109/iccvw.2019.00447},
+ pages = {3617--3635},
+ source = {Crossref},
+ author = {Ignatov, Andrey and Timofte, Radu and Kulik, Andrei and Yang, Seungsoo and Wang, Ke and Baum, Felix and Wu, Max and Xu, Lirong and Van Gool, Luc},
+ year = {2019},
+ month = oct,
+ url = {https://doi.org/10.1109/iccvw.2019.00447},
+ booktitle = {2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)},
+ publisher = {IEEE},
+ title = {AI Benchmark: All About Deep Learning on Smartphones in 2019},
+}
+
+@inproceedings{kiela2021dynabench,
+ doi = {10.18653/v1/2021.naacl-main.324},
+ source = {Crossref},
+ author = {Kiela, Douwe and Bartolo, Max and Nie, Yixin and Kaushik, Divyansh and Geiger, Atticus and Wu, Zhengxuan and Vidgen, Bertie and Prasad, Grusha and Singh, Amanpreet and Ringshia, Pratik and Ma, Zhiyi and Thrush, Tristan and Riedel, Sebastian and Waseem, Zeerak and Stenetorp, Pontus and Jia, Robin and Bansal, Mohit and Potts, Christopher and Williams, Adina},
+ year = {2021},
+ url = {https://doi.org/10.18653/v1/2021.naacl-main.324},
+ booktitle = {Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
+ publisher = {Association for Computational Linguistics},
+ title = {Dynabench: Rethinking Benchmarking in NLP},
+ address = {Online},
+ pages = {4110--4124},
+}
+
+@inproceedings{koh2021wilds,
+ author = {Koh, Pang Wei and Sagawa, Shiori and Marklund, Henrik and Xie, Sang Michael and Zhang, Marvin and Balsubramani, Akshay and Hu, Weihua and Yasunaga, Michihiro and Phillips, Richard Lanas and Gao, Irena and Lee, Tony and David, Etienne and Stavness, Ian and 0002, Wei Guo and Earnshaw, Berton and Haque, Imran S. and Beery, Sara M. and Leskovec, Jure and Kundaje, Anshul and Pierson, Emma and Levine, Sergey and Finn, Chelsea and Liang, Percy},
+ title = {WILDS: A Benchmark of in-the-Wild Distribution Shifts.},
+ journal = {ICML},
+ pages = {5637--5664},
+ year = {2021},
+ url = {http://proceedings.mlr.press/v139/koh21a.html},
+ source = {DBLP},
+ editor = {Meila, Marina and Zhang, Tong},
+ bibsource = {dblp computer science bibliography, https://dblp.org},
+ biburl = {https://dblp.org/rec/conf/icml/KohSMXZBHYPGLDS21.bib},
+ booktitle = {Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event},
+ publisher = {PMLR},
+ series = {Proceedings of Machine Learning Research},
+ timestamp = {Tue, 13 Dec 2022 00:00:00 +0100},
+ volume = {139},
+}
+
+@incollection{lin2014microsoft,
+ doi = {10.1007/978-3-319-10602-1\_48},
+ pages = {740--755},
+ source = {Crossref},
+ author = {Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll\'ar, Piotr and Zitnick, C. Lawrence},
+ year = {2014},
+ isbn = {9783319106014,9783319106021},
+ url = {https://doi.org/10.1007/978-3-319-10602-1\_48},
+ issn = {0302-9743,1611-3349},
+ booktitle = {Computer Vision -- ECCV 2014},
+ publisher = {Springer International Publishing},
+ title = {Microsoft COCO: Common Objects in Context},
+ organization = {Springer},
+}
+
+@inproceedings{lundberg2017unified,
+ author = {Lundberg, Scott M. and Lee, Su-In},
+ editor = {Guyon, Isabelle and von Luxburg, Ulrike and Bengio, Samy and Wallach, Hanna M. and Fergus, Rob and Vishwanathan, S. V. N. and Garnett, Roman},
+ bibsource = {dblp computer science bibliography, https://dblp.org},
+ biburl = {https://dblp.org/rec/conf/nips/LundbergL17.bib},
+ booktitle = {Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA},
+ pages = {4765--4774},
+ timestamp = {Thu, 21 Jan 2021 00:00:00 +0100},
+ title = {A Unified Approach to Interpreting Model Predictions},
+ url = {https://proceedings.neurips.cc/paper/2017/hash/8a20a8621978632d76c43dfd28b67767-Abstract.html},
+ year = {2017},
+}
+
+@article{maass1997networks,
+ number = {9},
+ doi = {10.1016/s0893-6080(97)00011-7},
+ pages = {1659--1671},
+ source = {Crossref},
+ volume = {10},
+ author = {Maass, Wolfgang},
+ year = {1997},
+ month = dec,
+ url = {https://doi.org/10.1016/s0893-6080(97)00011-7},
+ issn = {0893-6080},
+ journal = {Neural Networks},
+ publisher = {Elsevier BV},
+ title = {Networks of spiking neurons: The third generation of neural network models},
+}
+
+@article{mattson2020mlperf,
+ number = {2},
+ doi = {10.1109/mm.2020.2974843},
+ pages = {8--16},
+ source = {Crossref},
+ volume = {40},
+ author = {Mattson, Peter and Reddi, Vijay Janapa and Cheng, Christine and Coleman, Cody and Diamos, Greg and Kanter, David and Micikevicius, Paulius and Patterson, David and Schmuelling, Guenther and Tang, Hanlin and Wei, Gu-Yeon and Wu, Carole-Jean},
+ year = {2020},
+ month = mar,
+ url = {https://doi.org/10.1109/mm.2020.2974843},
+ issn = {0272-1732,1937-4143},
+ journal = {IEEE Micro},
+ publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
+ title = {MLPerf: An Industry Standard Benchmark Suite for Machine Learning Performance},
+}
+
+@article{modha2023neural,
+ number = {6668},
+ doi = {10.1126/science.adh1174},
+ pages = {329--335},
+ source = {Crossref},
+ volume = {382},
+ author = {Modha, Dharmendra S. and Akopyan, Filipp and Andreopoulos, Alexander and Appuswamy, Rathinakumar and Arthur, John V. and Cassidy, Andrew S. and Datta, Pallab and DeBole, Michael V. and Esser, Steven K. and Otero, Carlos Ortega and Sawada, Jun and Taba, Brian and Amir, Arnon and Bablani, Deepika and Carlson, Peter J. and Flickner, Myron D. and Gandhasri, Rajamohan and Garreau, Guillaume J. and Ito, Megumi and Klamo, Jennifer L. and Kusnitz, Jeffrey A. and McClatchey, Nathaniel J. and McKinstry, Jeffrey L. and Nakamura, Yutaka and Nayak, Tapan K. and Risk, William P. and Schleupen, Kai and Shaw, Ben and Sivagnaname, Jay and Smith, Daniel F. and Terrizzano, Ignacio and Ueda, Takanori},
+ year = {2023},
+ month = oct,
+ url = {https://doi.org/10.1126/science.adh1174},
+ issn = {0036-8075,1095-9203},
+ journal = {Science},
+ publisher = {American Association for the Advancement of Science (AAAS)},
+ title = {Neural inference at the frontier of energy, space, and time},
+}
+
+@inproceedings{reddi2020mlperf,
+ doi = {10.1109/isca45697.2020.00045},
+ pages = {446--459},
+ source = {Crossref},
+ author = {Reddi, Vijay Janapa and Cheng, Christine and Kanter, David and Mattson, Peter and Schmuelling, Guenther and Wu, Carole-Jean and Anderson, Brian and Breughe, Maximilien and Charlebois, Mark and Chou, William and Chukka, Ramesh and Coleman, Cody and Davis, Sam and Deng, Pan and Diamos, Greg and Duke, Jared and Fick, Dave and Gardner, J. Scott and Hubara, Itay and Idgunji, Sachin and Jablin, Thomas B. and Jiao, Jeff and John, Tom St. and Kanwar, Pankaj and Lee, David and Liao, Jeffery and Lokhmotov, Anton and Massa, Francisco and Meng, Peng and Micikevicius, Paulius and Osborne, Colin and Pekhimenko, Gennady and Rajan, Arun Tejusve Raghunath and Sequeira, Dilip and Sirasao, Ashish and Sun, Fei and Tang, Hanlin and Thomson, Michael and Wei, Frank and Wu, Ephrem and Xu, Lingjie and Yamada, Koichi and Yu, Bing and Yuan, George and Zhong, Aaron and Zhang, Peizhao and Zhou, Yuchen},
+ year = {2020},
+ month = may,
+ url = {https://doi.org/10.1109/isca45697.2020.00045},
+ booktitle = {2020 ACM/IEEE 47th Annual International Symposium on Computer Architecture (ISCA)},
+ publisher = {IEEE},
+ title = {MLPerf Inference Benchmark},
+ organization = {IEEE},
+}
+
+@inproceedings{ribeiro2016should,
+ author = {Ribeiro, Marco Tulio and Singh, Sameer and Guestrin, Carlos},
+ booktitle = {Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining},
+ pages = {1135--1144},
+ title = {'' Why should i trust you?'' Explaining the predictions of any classifier},
+ year = {2016},
+}
+
+@article{schuman2022opportunities,
+ number = {1},
+ doi = {10.1038/s43588-021-00184-y},
+ pages = {10--19},
+ source = {Crossref},
+ volume = {2},
+ author = {Schuman, Catherine D. and Kulkarni, Shruti R. and Parsa, Maryam and Mitchell, J. Parker and Date, Prasanna and Kay, Bill},
+ year = {2022},
+ month = jan,
+ url = {https://doi.org/10.1038/s43588-021-00184-y},
+ issn = {2662-8457},
+ journal = {Nature Computational Science},
+ publisher = {Springer Science and Business Media LLC},
+ title = {Opportunities for neuromorphic computing algorithms and applications},
+}
+
+@article{warden2018speech,
+ url = {http://arxiv.org/abs/1804.03209v1},
+ year = {2018},
+ month = apr,
+ title = {Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition},
+ author = {Warden, Pete},
+ primaryclass = {cs.CL},
+ archiveprefix = {arXiv},
+ journal = {ArXiv preprint},
+ volume = {abs/1804.03209},
+}
+
+@inproceedings{xie2020adversarial,
+ doi = {10.1109/cvpr42600.2020.00090},
+ source = {Crossref},
+ author = {Xie, Cihang and Tan, Mingxing and Gong, Boqing and Wang, Jiang and Yuille, Alan L. and Le, Quoc V.},
+ year = {2020},
+ month = jun,
+ url = {https://doi.org/10.1109/cvpr42600.2020.00090},
+ booktitle = {2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
+ publisher = {IEEE},
+ title = {Adversarial Examples Improve Image Recognition},
+ bibsource = {dblp computer science bibliography, https://dblp.org},
+ biburl = {https://dblp.org/rec/conf/cvpr/XieTGWYL20.bib},
+ pages = {816--825},
+ timestamp = {Tue, 13 Oct 2020 01:00:00 +0200},
+}
+
+@article{xu2023demystifying,
+ url = {http://arxiv.org/abs/2309.16671v4},
+ year = {2023},
+ month = sep,
+ title = {Demystifying CLIP Data},
+ author = {Xu, Hu and Xie, Saining and Tan, Xiaoqing Ellen and Huang, Po-Yao and Howes, Russell and Sharma, Vasu and Li, Shang-Wen and Ghosh, Gargi and Zettlemoyer, Luke and Feichtenhofer, Christoph},
+ primaryclass = {cs.CV},
+ archiveprefix = {arXiv},
+ journal = {ArXiv preprint},
+ volume = {abs/2309.16671},
+}
+
+@article{yik2023neurobench,
+ url = {http://arxiv.org/abs/2304.04640v3},
+ year = {2023},
+ month = apr,
+ title = {NeuroBench: A Framework for Benchmarking Neuromorphic Computing Algorithms and Systems},
+ author = {Yik, Jason and den Berghe, Korneel Van and den Blanken, Douwe and Bouhadjar, Younes and Fabre, Maxime and Hueber, Paul and Kleyko, Denis and Pacik-Nelson, Noah and Sun, Pao-Sheng Vincent and Tang, Guangzhi and Wang, Shenqi and Zhou, Biyan and Ahmed, Soikat Hasan and Joseph, George Vathakkattil and Leto, Benedetto and Micheli, Aurora and Mishra, Anurag Kumar and Lenz, Gregor and Sun, Tao and Ahmed, Zergham and Akl, Mahmoud and Anderson, Brian and Andreou, Andreas G. and Bartolozzi, Chiara and Basu, Arindam and Bogdan, Petrut and Bohte, Sander and Buckley, Sonia and Cauwenberghs, Gert and Chicca, Elisabetta and Corradi, Federico and de Croon, Guido and Danielescu, Andreea and Daram, Anurag and Davies, Mike and Demirag, Yigit and Eshraghian, Jason and Fischer, Tobias and Forest, Jeremy and Fra, Vittorio and Furber, Steve and Furlong, P. Michael and Gilpin, William and Gilra, Aditya and Gonzalez, Hector A. and Indiveri, Giacomo and Joshi, Siddharth and Karia, Vedant and Khacef, Lyes and Knight, James C. and Kriener, Laura and Kubendran, Rajkumar and Kudithipudi, Dhireesha and Liu, Yao-Hong and Liu, Shih-Chii and Ma, Haoyuan and Manohar, Rajit and Margarit-Taul\'e, Josep Maria and Mayr, Christian and Michmizos, Konstantinos and Muir, Dylan and Neftci, Emre and Nowotny, Thomas and Ottati, Fabrizio and Ozcelikkale, Ayca and Panda, Priyadarshini and Park, Jongkil and Payvand, Melika and Pehle, Christian and Petrovici, Mihai A. and Pierro, Alessandro and Posch, Christoph and Renner, Alpha and Sandamirskaya, Yulia and Schaefer, Clemens JS and van Schaik, Andr\'e and Schemmel, Johannes and Schmidgall, Samuel and Schuman, Catherine and Seo, Jae-sun and Sheik, Sadique and Shrestha, Sumit Bam and Sifalakis, Manolis and Sironi, Amos and Stewart, Matthew and Stewart, Kenneth and Stewart, Terrence C. and Stratmann, Philipp and Timcheck, Jonathan and T\"omen, Nergis and Urgese, Gianvito and Verhelst, Marian and Vineyard, Craig M. and Vogginger, Bernhard and Yousefzadeh, Amirreza and Zohora, Fatima Tuz and Frenkel, Charlotte and Reddi, Vijay Janapa},
+ primaryclass = {cs.AI},
+ archiveprefix = {arXiv},
+ eprint = {2304.04640},
+}
+
+@article{tschand2024mlperf,
+ url = {http://arxiv.org/abs/2410.12032v1},
+ year = {2024},
+ month = oct,
+ title = {MLPerf Power: Benchmarking the Energy Efficiency of Machine Learning Systems from Microwatts to Megawatts for Sustainable AI},
+ author = {Tschand, Arya and Rajan, Arun Tejusve Raghunath and Idgunji, Sachin and Ghosh, Anirban and Holleman, Jeremy and Kiraly, Csaba and Ambalkar, Pawan and Borkar, Ritika and Chukka, Ramesh and Cockrell, Trevor and Curtis, Oliver and Fursin, Grigori and Hodak, Miro and Kassa, Hiwot and Lokhmotov, Anton and Miskovic, Dejan and Pan, Yuechao and Manmathan, Manu Prasad and Raymond, Liz and John, Tom St. and Suresh, Arjun and Taubitz, Rowan and Zhan, Sean and Wasson, Scott and Kanter, David and Reddi, Vijay Janapa},
+ primaryclass = {cs.AR},
+ archiveprefix = {arXiv},
+ journal = {arXiv preprint arXiv:2410.12032},
+}
\ No newline at end of file
diff --git a/contents/benchmarking/benchmarking.qmd b/contents/core/benchmarking/benchmarking.qmd
similarity index 75%
rename from contents/benchmarking/benchmarking.qmd
rename to contents/core/benchmarking/benchmarking.qmd
index febebceb..737d988f 100644
--- a/contents/benchmarking/benchmarking.qmd
+++ b/contents/core/benchmarking/benchmarking.qmd
@@ -5,7 +5,7 @@ bibliography: benchmarking.bib
# Benchmarking AI {#sec-benchmarking_ai}
::: {.content-visible when-format="html"}
-Resources: [Slides](#sec-benchmarking-ai-resource), [Videos](#sec-benchmarking-ai-resource), [Exercises](#sec-benchmarking-ai-resource), [Labs](#sec-benchmarking-ai-resource)
+Resources: [Slides](#sec-benchmarking-ai-resource), [Videos](#sec-benchmarking-ai-resource), [Exercises](#sec-benchmarking-ai-resource)
:::
![_DALL·E 3 Prompt: Photo of a podium set against a tech-themed backdrop. On each tier of the podium, there are AI chips with intricate designs. The top chip has a gold medal hanging from it, the second one has a silver medal, and the third has a bronze medal. Banners with 'AI Olympics' are displayed prominently in the background._](images/png/cover_ai_benchmarking.png)
@@ -20,7 +20,7 @@ This chapter will provide an overview of popular ML benchmarks, best practices f
* Understand the purpose and goals of benchmarking AI systems, including performance assessment, resource evaluation, validation, and more.
-* Learn about key model benchmarks, metrics, and trends, including accuracy, fairness, complexity, and efficiency.
+* Learn about key model benchmarks, metrics, and trends, including accuracy, fairness, complexity, performance, and energy efficiency.
* Become familiar with the key components of an AI benchmark, including datasets, tasks, metrics, baselines, reproducibility rules, and more.
@@ -36,7 +36,7 @@ This chapter will provide an overview of popular ML benchmarks, best practices f
:::
-## Introduction {#sec-benchmarking-ai}
+## Introduction
Benchmarking provides the essential measurements needed to drive machine learning progress and truly understand system performance. As the physicist Lord Kelvin famously said, "To measure is to know." Benchmarks allow us to quantitatively know the capabilities of different models, software, and hardware. They allow ML developers to measure the inference time, memory usage, power consumption, and other metrics that characterize a system. Moreover, benchmarks create standardized processes for measurement, enabling fair comparisons across different solutions.
@@ -46,6 +46,8 @@ Benchmarking has several important goals and objectives that guide its implement
* **Performance assessment.** This involves evaluating key metrics like a given model's speed, accuracy, and efficiency. For instance, in a TinyML context, it is crucial to benchmark how quickly a voice assistant can recognize commands, as this evaluates real-time performance.
+* **Power assessment.** Evaluating the power drawn by a workload along with its performance equates to its energy efficiency. As the environmental impact of ML computing continues to grow, benchmarking energy can enable us to better optimize our systems for sustainability.
+
* **Resource evaluation.** This means assessing the model's impact on critical system resources, including battery life, memory usage, and computational overhead. A relevant example is comparing the battery drain of two different image recognition algorithms running on a wearable device.
* **Validation and verification.** Benchmarking helps ensure the system functions correctly and meets specified requirements. One way is by checking the accuracy of an algorithm, like a heart rate monitor on a smartwatch, against readings from medical-grade equipment as a form of clinical validation.
@@ -60,7 +62,7 @@ This chapter will cover the 3 types of AI benchmarks, the standard metrics, tool
## Historical Context
-### Standard Benchmarks
+### Performance Benchmarks
The evolution of benchmarks in computing vividly illustrates the industry's relentless pursuit of excellence and innovation. In the early days of computing during the 1960s and 1970s, benchmarks were rudimentary and designed for mainframe computers. For example, the [Whetstone benchmark](https://en.wikipedia.org/wiki/Whetstone_(benchmark)), named after the Whetstone ALGOL compiler, was one of the first standardized tests to measure the floating-point arithmetic performance of a CPU. These pioneering benchmarks prompted manufacturers to refine their architectures and algorithms to achieve better benchmark scores.
@@ -70,7 +72,25 @@ The 1990s brought the era of graphics-intensive applications and video games. Th
The 2000s saw a surge in mobile phones and portable devices like tablets. With portability came the challenge of balancing performance and power consumption. Benchmarks like [MobileMark](https://bapco.com/products/mobilemark-2014/) by BAPCo evaluated speed and battery life. This drove companies to develop more energy-efficient System-on-Chips (SOCs), leading to the emergence of architectures like ARM that prioritized power efficiency.
-The focus of the recent decade has shifted towards cloud computing, big data, and artificial intelligence. Cloud service providers like Amazon Web Services and Google Cloud compete on performance, scalability, and cost-effectiveness. Tailored cloud benchmarks like [CloudSuite](http://cloudsuite.ch/) have become essential, driving providers to optimize their infrastructure for better services.
+The focus of the recent decade has shifted towards cloud computing, big data, and artificial intelligence. Cloud service providers like Amazon Web Services and Google Cloud compete on performance, scalability, and cost-effectiveness. Tailored cloud benchmarks like [CloudSuite](http://cloudsuite.ch/) have become essential, driving providers to optimize their infrastructure for better services.
+
+### Energy Benchmarks
+
+Energy consumption and environmental concerns have gained prominence in recent years, making power (more precisely, energy) benchmarking increasingly important in the industry. This shift began in the mid-2000s when processors and systems started hitting cooling limits, and scaling became a crucial aspect of building large-scale systems due to internet advancements. Since then, energy considerations have expanded to encompass all areas of computing, from personal devices to large-scale data centers.
+
+Power benchmarking aims to measure the energy efficiency of computing systems, evaluating performance in relation to power consumption. This is crucial for several reasons:
+
+* **Environmental impact:** With the growing carbon footprint of the tech industry, there's a pressing need to reduce energy consumption.
+* **Operational costs:** Energy expenses constitute a significant portion of data center operating costs.
+* **Device longevity:** For mobile devices, power efficiency directly impacts battery life and user experience.
+
+Several key benchmarks have emerged in this space:
+
+* **SPEC Power:** Introduced in 2007, [SPEC Power](https://www.spec.org/power/) was one of the first industry-standard benchmarks for evaluating the power and performance characteristics of computer servers.
+* **Green500:** The [Green500](https://top500.org/lists/green500/) list ranks supercomputers by energy efficiency, complementing the performance-focused TOP500 list.
+* **Energy Star:** While not a benchmark per se, [ENERGY STAR for Computers](https://www.energystar.gov/products/computers) certification program has driven manufacturers to improve the energy efficiency of consumer electronics.
+
+Power benchmarking faces unique challenges, such as accounting for different workloads and system configurations, and measuring power consumption accurately across a range of hardware that scales from microWatts to megaWatts in power consumption. As AI and edge computing continue to grow, power benchmarking is likely to become even more critical, driving the development of specialized energy-efficient AI hardware and software optimizations.
### Custom Benchmarks
@@ -88,7 +108,7 @@ A key prerogative for any benchmark to be impactful is that it must reflect the
Furthermore, benchmarks published with broad co-authorship from respected institutions carry authority and validity that convinces the community to adopt them as trusted standards. Benchmarks perceived as biased by particular corporate or institutional interests breed skepticism. Ongoing community engagement through workshops and challenges is also key after the initial release, and that is what, for instance, led to the success of ImageNet. As research progresses, collective participation enables continual refinement and expansion of benchmarks over time.
-Finally, community-developed benchmarks released with open access accelerate adoption and consistent implementation. We shared open-source code, documentation, models, and infrastructure to lower barriers for groups to benchmark solutions on an equal footing using standardized implementations. This consistency is critical for fair comparisons. Without coordination, labs and companies may implement benchmarks differently, reducing result reproducibility.
+Finally, releasing community-developed benchmarks with open access promotes their adoption and consistent use. By providing open-source code, documentation, models, and infrastructure, we reduce barriers to entry, enabling groups to benchmark solutions on an equal footing with standardized implementations. This consistency is essential for fair comparisons. Without coordination, labs and companies might implement benchmarks differently, which can undermine reproducibility and comparability of results.
Community consensus brings benchmarks lasting relevance, while fragmentation confuses. Through collaborative development and transparent operation, benchmarks can become authoritative standards for tracking progress. Several of the benchmarks that we discuss in this chapter were developed and built by the community, for the community, and that is what ultimately led to their success.
@@ -106,7 +126,7 @@ The architecture, size, and complexity of AI models vary widely. Different model
### Data Benchmarks
-AI, particularly machine learning, is inherently data-driven. The quality, size, and diversity of data influence AI models' training efficacy and generalization capability. Data benchmarks focus on the datasets used in AI training and evaluation. They provide standardized datasets the community can use to train and test models, ensuring a level playing field for comparisons. Moreover, these benchmarks highlight data quality, diversity, and representation challenges, pushing the community to address biases and gaps in AI training data. By understanding data benchmarks, researchers can also gauge how models might perform in real-world scenarios, ensuring robustness and reliability.
+In machine learning, data is foundational because the quality, scale, and diversity of datasets directly impact model efficacy and generalization. Data benchmarks focus on the datasets used in training and evaluation. They provide standardized datasets the community can use to train and test models, ensuring a level playing field for comparisons. Moreover, these benchmarks highlight data quality, diversity, and representation challenges, pushing the community to address biases and gaps in training data. By understanding data benchmarks, researchers can also gauge how models might perform in real-world scenarios, ensuring robustness and reliability.
In the remainder of the sections, we will discuss each of these benchmark types. The focus will be an in-depth exploration of system benchmarks, as these are critical to understanding and advancing machine learning system performance. We will briefly cover model and data benchmarks for a comprehensive perspective, but the emphasis and majority of the content will be devoted to system benchmarks.
@@ -123,7 +143,7 @@ Machine learning system benchmarking provides a structured and systematic approa
#### Micro Benchmarks
-Micro-benchmarks in AI are specialized, evaluating distinct components or specific operations within a broader machine learning process. These benchmarks zero in on individual tasks, offering insights into the computational demands of a particular neural network layer, the efficiency of a unique optimization technique, or the throughput of a specific activation function. For instance, practitioners might use micro-benchmarks to measure the computational time required by a convolutional layer in a deep learning model or to evaluate the speed of data preprocessing that feeds data into the model. Such granular assessments are instrumental in fine-tuning and optimizing discrete aspects of AI models, ensuring that each component operates at its peak potential.
+Micro-benchmarks are specialized, evaluating distinct components or specific operations within a broader machine learning process. These benchmarks focus on individual tasks, offering insights into the computational demands of a particular neural network layer, the efficiency of a unique optimization technique, or the throughput of a specific activation function. For instance, practitioners might use micro-benchmarks to measure the computational time required by a convolutional layer in a deep learning model or to evaluate the speed of data preprocessing that feeds data into the model. Such granular assessments are instrumental in fine-tuning and optimizing discrete aspects of models, ensuring that each component operates at its peak potential.
These types of microbenchmarks include zooming into very specific operations or components of the AI pipeline, such as the following:
@@ -133,7 +153,7 @@ These types of microbenchmarks include zooming into very specific operations or
* **Layer Benchmarks:** Evaluations of the computational efficiency of distinct neural network layers, such as LSTM or Transformer blocks, when operating on standardized input sizes.
-Example: [DeepBench](https://github.com/baidu-research/DeepBench), introduced by Baidu, is a good example of something that assesses the above. DeepBench assesses the performance of basic operations in deep learning models, providing insights into how different hardware platforms handle neural network training and inference.
+Example: [DeepBench](https://github.com/baidu-research/DeepBench), introduced by Baidu, is a good benchmark that evaluates fundamental deep learning operations, such as those mentioned above. DeepBench assesses the performance of basic operations in deep learning models, providing insights into how different hardware platforms handle neural network training and inference.
:::{#exr-cuda .callout-caution collapse="true"}
@@ -147,7 +167,7 @@ Ever wonder how your image filters get so fast? Special libraries like cuDNN sup
#### Macro Benchmarks
-Macro benchmarks provide a holistic view, assessing the end-to-end performance of entire machine learning models or comprehensive AI systems. Rather than focusing on individual operations, macro-benchmarks evaluate the collective efficacy of models under real-world scenarios or tasks. For example, a macro-benchmark might assess the complete performance of a deep learning model undertaking image classification on a dataset like [ImageNet](https://www.image-net.org/). This includes gauging accuracy, computational speed, and resource consumption. Similarly, one might measure the cumulative time and resources needed to train a natural language processing model on extensive text corpora or evaluate the performance of an entire recommendation system, from data ingestion to final user-specific outputs.
+Macro benchmarks provide a holistic view, assessing the end-to-end performance of entire machine learning models or comprehensive ML systems. Rather than focusing on individual operations, macro-benchmarks evaluate the collective efficacy of models under real-world scenarios or tasks. For example, a macro-benchmark might assess the complete performance of a deep learning model undertaking image classification on a dataset like [ImageNet](https://www.image-net.org/). This includes gauging accuracy, computational speed, and resource consumption. Similarly, one might measure the cumulative time and resources needed to train a natural language processing model on extensive text corpora or evaluate the performance of an entire recommendation system, from data ingestion to final user-specific outputs.
Examples: These benchmarks evaluate the AI model:
@@ -159,7 +179,7 @@ Examples: These benchmarks evaluate the AI model:
#### End-to-end Benchmarks
-End-to-end benchmarks provide an all-inclusive evaluation that extends beyond the boundaries of the AI model itself. Instead of focusing solely on a machine learning model's computational efficiency or accuracy, these benchmarks encompass the entire pipeline of an AI system. This includes initial data preprocessing, the core model's performance, post-processing of the model's outputs, and other integral components like storage and network interactions.
+End-to-end benchmarks provide an all-inclusive evaluation that extends beyond the boundaries of the ML model itself. Instead of focusing solely on a machine learning model's computational efficiency or accuracy, these benchmarks encompass the entire pipeline of an AI system. This includes initial data preprocessing, the core model's performance, post-processing of the model's outputs, and other integral components like storage and network interactions.
Data preprocessing is the first stage in many AI systems, transforming raw data into a format suitable for model training or inference. These preprocessing steps' efficiency, scalability, and accuracy are vital for the overall system's performance. End-to-end benchmarks assess this phase, ensuring that data cleaning, normalization, augmentation, or any other transformation process doesn't become a bottleneck.
@@ -197,7 +217,7 @@ Example: Tasks for natural language processing benchmarks might include sentimen
#### Evaluation Metrics
-Once a task is defined, benchmarks require metrics to quantify performance. These metrics offer objective measures to compare different models or systems. In classification tasks, metrics like accuracy, precision, recall, and [F1 score](https://en.wikipedia.org/wiki/F-score) are commonly used. Mean squared or absolute errors might be employed for regression tasks.
+Once a task is defined, benchmarks require metrics to quantify performance. These metrics offer objective measures to compare different models or systems. In classification tasks, metrics like accuracy, precision, recall, and [F1 score](https://en.wikipedia.org/wiki/F-score) are commonly used. Mean squared or absolute errors might be employed for regression tasks. We can also measure the power consumed by the benchmark execution to calculate energy efficiency.
#### Baselines and Baseline Models
@@ -234,28 +254,19 @@ Beyond raw scores or metrics, benchmarks often provide guidelines or context to
Example: A benchmark might highlight that while Model A scored higher than Model B in accuracy, it offers better real-time performance, making it more suitable for time-sensitive applications.
-### Training vs. Inference
-
-The development life cycle of a machine learning model involves two critical phases - training and inference. [Training](../training/training.qmd), as you may recall, is the process of learning patterns from data to create the model. Inference refers to the model making predictions on new unlabeled data. Both phases play indispensable yet distinct roles. Consequently, each phase warrants rigorous benchmarking to evaluate performance metrics like speed, accuracy, and computational efficiency.
-
-Benchmarking the training phase provides insights into how different model architectures, hyperparameter values, and optimization algorithms impact the time and resources needed to train the model. For instance, benchmarking shows how neural network depth affects training time on a given dataset. Benchmarking also reveals how hardware accelerators like GPUs and TPUs can speed up training.
-
-On the other hand, benchmarking inference evaluates model performance in real-world conditions after deployment. Key metrics include latency, throughput, memory footprint, and power consumption. This type of benchmarking determines if a model meets the requirements of its target application regarding response time and device constraints. However, we will discuss these broadly to ensure a general understanding.
-
-
### Training Benchmarks
-Training represents the phase where the system processes and ingests raw data to adjust and refine its parameters. Therefore, it is an algorithmic activity and involves system-level considerations, including data pipelines, storage, computing resources, and orchestration mechanisms. The goal is to ensure that the ML system can efficiently learn from data, optimizing both the model's performance and the system's resource utilization.
+The development life cycle of a machine learning model involves two critical phases - training and inference. Training represents the phase where the system processes and ingests raw data to adjust and refine its parameters. Benchmarking the training phase reveals how choices in data pipelines, storage solutions, model architectures, computing resources, hyperparameter settings, and optimization algorithms affect the efficiency and resource demands of model training. The goal is to ensure that the ML system can efficiently learn from data, optimizing both the model's performance and the system's resource utilization.
#### Purpose
-From an ML systems perspective, training benchmarks evaluate how well the system scales with increasing data volumes and computational demands. It's about understanding the interplay between hardware, software, and the data pipeline in the training process.
+From a systems perspective, training machine learning models is resource-intensive, especially when working with large models. These models often contain billions or even trillions of trainable parameters and require enormous amounts of data, often on the scale of many terabytes. For example, [OpenAI's GPT-3](https://arxiv.org/abs/2005.14165) [@brown2020language] has 175 billion parameters, was trained on 45 TB of compressed plaintext data, and required 3,640 petaflop-days of compute for pretraining. ML training benchmarks evaluate the systems and resources required to manage the computational load of training such models.
-Consider a distributed ML system designed to train on vast datasets, like those used in large-scale e-commerce product recommendations. A training benchmark would assess how efficiently the system scales across multiple nodes, manage data sharding and handle failures or node drop-offs during training.
+Efficient data storage and delivery during training also play a major role in the training process. For instance, in a machine learning model that predicts bounding boxes around objects in an image, thousands of images may be required. However, loading an entire image dataset into memory is typically infeasible, so practitioners rely on data loaders (as disucssed in @sec-frameworks-data-loaders) from ML frameworks. Successful model training depends on timely and efficient data delivery, making it essential to benchmark tools like data loaders, data pipelines, preprocessing speed, and storage retrieval times to understand their impact on training performance.
-Training benchmarks evaluate CPU, GPU, memory, and network utilization during the training phase, guiding system optimizations. When training a model in a cloud-based ML system, it's crucial to understand how resources are being utilized. Are GPUs being fully leveraged? Is there unnecessary memory overhead? Benchmarks can highlight bottlenecks or inefficiencies in resource utilization, leading to cost savings and performance improvements.
+Hardware selection is another key factor in training machine learning systems, as it can significantly impact training time. Training benchmarks evaluate CPU, GPU, memory, and network utilization during the training phase to guide system optimizations. Understanding how resources are used is essential: Are GPUs being fully leveraged? Is there unnecessary memory overhead? Benchmarks can uncover bottlenecks or inefficiencies in resource utilization, leading to cost savings and performance improvements.
-Training an ML model is contingent on timely and efficient data delivery. Benchmarks in this context would also assess the efficiency of data pipelines, data preprocessing speed, and storage retrieval times. For real-time analytics systems, like those used in fraud detection, the speed at which training data is ingested, preprocessed, and fed into the model can be critical. Benchmarks would evaluate the latency of data pipelines, the efficiency of storage systems (like SSDs vs. HDDs), and the speed of data augmentation or transformation tasks.
+In many cases, using a single hardware accelerator, such as a single GPU, is insufficient to meet the computational demands of large-scale model training. Machine learning models are often trained in data centers with multiple GPUs or TPUs, where distributed computing enables parallel processing across nodes. Training benchmarks assess how efficiently the system scales across multiple nodes, manages data sharding, and handles challenges like node failures or drop-offs during training.
#### Metrics
@@ -265,13 +276,13 @@ The following metrics are often considered important:
1. **Training Time:** The time it takes to train a model from scratch until it reaches a satisfactory performance level. It directly measures the computational resources required to train a model. For example, [Google's BERT](https://arxiv.org/abs/1810.04805) [@devlin2018bert] is a natural language processing model that requires several days to train on a massive corpus of text data using multiple GPUs. The long training time is a significant resource consumption and cost challenge. In some cases, benchmarks can instead measure the training throughput (training samples per unit of time). Throughput can be calculated much faster and easier than training time but may obscure the metrics we really care about (e.g. time to train).
-2. **Scalability:** How well the training process can handle increases in data size or model complexity. Scalability can be assessed by measuring training time, memory usage, and other resource consumption as data size or model complexity increases. [OpenAI's GPT-3](https://arxiv.org/abs/2005.14165) [@brown2020language] model has 175 billion parameters, making it one of the largest language models in existence. Training GPT-3 required extensive engineering efforts to scale the training process to handle the massive model size. This involved using specialized hardware, distributed training, and other techniques to ensure the model could be trained efficiently.
+2. **Scalability:** How well the training process can handle increases in data size or model complexity. Scalability can be assessed by measuring training time, memory usage, and other resource consumption as data size or model complexity increases. For instance, training OpenAI's GPT-3 required extensive engineering efforts to scale the training process across many GPU nodes to handle the massive model size. This involved using specialized hardware, distributed training, and other techniques to ensure the model could be trained efficiently.
3. **Resource Utilization:** The extent to which the training process utilizes available computational resources such as CPU, GPU, memory, and disk I/O. High resource utilization can indicate an efficient training process, while low utilization can suggest bottlenecks or inefficiencies. For instance, training a convolutional neural network (CNN) for image classification requires significant GPU resources. Utilizing multi-GPU setups and optimizing the training code for GPU acceleration can greatly improve resource utilization and training efficiency.
4. **Memory Consumption:** The amount of memory the training process uses. Memory consumption can be a limiting factor for training large models or datasets. For example, Google researchers faced significant memory consumption challenges when training BERT. The model has hundreds of millions of parameters, requiring large amounts of memory. The researchers had to develop techniques to reduce memory consumption, such as gradient checkpointing and model parallelism.
-5. **Energy Consumption:** The energy consumed during training. As machine learning models become more complex, energy consumption has become an important consideration. Training large machine learning models can consume significant energy, leading to a large carbon footprint. For instance, the training of OpenAI's GPT-3 was estimated to have a carbon footprint equivalent to traveling by car for 700,000 kilometers.
+5. **Energy Consumption:** The energy consumed during training. As machine learning models become more complex, energy consumption has become an important consideration. Training large machine learning models can consume significant energy, leading to a large carbon footprint. For instance, the training of OpenAI's GPT-3 was estimated to have a carbon footprint equivalent to traveling by car for 700,000 kilometers (~435,000 miles).
6. **Throughput:** The number of training samples processed per unit time. Higher throughput generally indicates a more efficient training process. The throughput is an important metric to consider when training a recommendation system for an e-commerce platform. A high throughput ensures that the model can process large volumes of user interaction data promptly, which is crucial for maintaining the relevance and accuracy of the recommendations. But it's also important to understand how to balance throughput with latency bounds. Therefore, a latency-bounded throughput constraint is often imposed on service-level agreements for data center application deployments.
@@ -285,27 +296,13 @@ The following metrics are often considered important:
By benchmarking for these types of metrics, we can obtain a comprehensive view of the training process's performance and efficiency from a systems perspective. This can help identify areas for improvement and ensure that resources are used effectively.
-#### Tasks
-
-Selecting a handful of representative tasks for benchmarking machine learning systems is challenging because machine learning is applied to various domains with unique characteristics and requirements. Here are some of the challenges faced in selecting representative tasks:
-
-1. **Diversity of Applications:** Machine learning is used in numerous fields such as healthcare, finance, natural language processing, computer vision, and many more. Each field has specific tasks that may not be representative of other fields. For example, image classification tasks in computer vision may not be relevant to financial fraud detection.
-2. **Variability in Data Types and Quality:** Different tasks require different data types, such as text, images, videos, or numerical data. Data quality and availability can vary greatly between tasks, making it difficult to select tasks that are representative of the general challenges faced in machine learning.
-3. **Task Complexity and Difficulty:** The complexity of tasks varies greatly. Some are relatively straightforward, while others are highly complex and require sophisticated models and techniques. Selecting representative tasks that cover the complexities encountered in machine learning is challenging.
-4. **Ethical and Privacy Concerns:** Some tasks may involve sensitive or private data, such as medical records or personal information. These tasks may have ethical and privacy concerns that need to be addressed, making them less suitable as representative tasks for benchmarking.
-5. **Scalability and Resource Requirements:** Different tasks may have different scalability and resource requirements. Some tasks may require extensive computational resources, while others can be performed with minimal resources. Selecting tasks that represent the general resource requirements in machine learning is difficult.
-6. **Evaluation Metrics:** The metrics used to evaluate the performance of machine learning models vary between tasks. Some tasks may have well-established evaluation metrics, while others lack clear or standardized metrics. This can make it challenging to compare performance across different tasks.
-7. **Generalizability of Results:** The results obtained from benchmarking on a specific task may not be generalizable to other tasks. This means that a machine learning system's performance on a selected task may not be indicative of its performance on other tasks.
-
-It is important to carefully consider these factors when designing benchmarks to ensure they are meaningful and relevant to the diverse range of tasks encountered in machine learning.
-
#### Benchmarks
Here are some original works that laid the fundamental groundwork for developing systematic benchmarks for training machine learning systems.
-*[MLPerf Training Benchmark](https://github.com/mlcommons/training)*
+**[MLPerf Training Benchmark](https://github.com/mlcommons/training)**: MLPerf is a suite of benchmarks designed to measure the performance of machine learning hardware, software, and services. The MLPerf Training benchmark [@mattson2020mlperf] focuses on the time it takes to train models to a target quality metric. It includes diverse workloads, such as image classification, object detection, translation, and reinforcement learning. @fig-perf-trend highlights the performance improvements in progressive versions of MLPerf Training benchmarks, which have all outpaced Moore's Law. Using standardized benchmarking trends enables us to rigorously showcase the rapid evolution of ML computing.
-MLPerf is a suite of benchmarks designed to measure the performance of machine learning hardware, software, and services. The MLPerf Training benchmark [@mattson2020mlperf] focuses on the time it takes to train models to a target quality metric. It includes diverse workloads, such as image classification, object detection, translation, and reinforcement learning.
+![MLPerf Training performance trends. Source: @mattson2020mlperf.](images/png/mlperf_perf_trend.png){#fig-perf-trend}
Metrics:
@@ -313,9 +310,7 @@ Metrics:
* Throughput (examples per second)
* Resource utilization (CPU, GPU, memory, disk I/O)
-*[DAWNBench](https://dawn.cs.stanford.edu/benchmark/)*
-
-DAWNBench [@coleman2017dawnbench] is a benchmark suite focusing on end-to-end deep learning training time and inference performance. It includes common tasks such as image classification and question answering.
+**[DAWNBench](https://dawn.cs.stanford.edu/benchmark/)**: DAWNBench [@coleman2017dawnbench] is a benchmark suite focusing on end-to-end deep learning training time and inference performance. It includes common tasks such as image classification and question answering.
Metrics:
@@ -323,9 +318,7 @@ Metrics:
* Inference latency
* Cost (in terms of cloud computing and storage resources)
-*[Fathom](https://github.com/rdadolf/fathom)*
-
-Fathom [@adolf2016fathom] is a benchmark from Harvard University that evaluates the performance of deep learning models using a diverse set of workloads. These include common tasks such as image classification, speech recognition, and language modeling.
+**[Fathom](https://github.com/rdadolf/fathom)**: Fathom [@adolf2016fathom] is a benchmark from Harvard University that evaluates the performance of deep learning models using a diverse set of workloads. These include common tasks such as image classification, speech recognition, and language modeling.
Metrics:
@@ -335,17 +328,18 @@ Metrics:
#### Example Use Case
-Consider a scenario where we want to benchmark the training of an image classification model on a specific hardware platform.
+Imagine you have been tasked with benchmarking the training performance of an image classification model on a specific hardware platform. Let’s break down how you might approach this:
+
+1. **Define the Task**: First, choose a model and dataset. In this case, you’ll be training a CNN to classify images in the [CIFAR-10](https://www.cs.toronto.edu/kriz/cifar.html) dataset, a widely used benchmark in computer vision.
-1. **Task:** The task is to train a convolutional neural network (CNN) for image classification on the CIFAR-10 dataset.
-2. **Benchmark:** We can use the MLPerf Training benchmark for this task. It includes an image classification workload that is relevant to our task.
-3. **Metrics:** We will measure the following metrics:
+2. **Select the Benchmark**: Choosing a widely accepted benchmark helps ensure your setup is comparable with other real-world evaluations. You could choose to use the MLPerf Training benchmark because it provides a structured image classification workload, making it a relevant and standardized option for assessing training performance on CIFAR-10. Using MLPerf enables you to evaluate your system against industry-standard metrics, helping to ensure that results are meaningful and comparable to those achieved on other hardware platforms.
-* Training time to reach a target accuracy of 90%.
-* Throughput in terms of images processed per second.
-* GPU and CPU utilization during training.
+3. **Identify Key Metrics**: Now, decide on the metrics that will help you evaluate the system’s training performance. For this example, you might track:
+ - **Training Time**: How long does it take to reach 90% accuracy?
+ - **Throughput**: How many images are processed per second?
+ - **Resource Utilization**: What’s the GPU and CPU usage throughout training?
-By measuring these metrics, we can assess the performance and efficiency of the training process on the selected hardware platform. This information can then be used to identify potential bottlenecks or areas for improvement.
+By analyzing these metrics, you’ll gain insights into the model's training performance on your chosen hardware platform. Consider whether training time meets your expectations, if there are any bottlenecks, such as underutilized GPUs or slow data loading. This process helps identify areas for potential optimization, like improving data handling or adjusting resource allocation, and can guide future benchmarking decisions.
### Inference Benchmarks
@@ -375,26 +369,6 @@ Finally, it is vital to ensure that the model's predictions are not only accurat
6. **Memory Usage:** Memory usage quantifies the volume of RAM needed by a machine learning model to carry out inference tasks. A relevant example to illustrate this would be a face recognition system based on a CNN; if such a system requires 150 MB of RAM to process and recognize faces within an image, its memory usage is 150 MB.
-#### Tasks
-
-The challenges in picking representative tasks for benchmarking inference machine learning systems are, by and large, somewhat similar to the taxonomy we have provided for training. Nevertheless, to be pedantic, let's discuss those in the context of inference machine learning systems.
-
-1. **Diversity of Applications:** Inference machine learning is employed across numerous domains such as healthcare, finance, entertainment, security, and more. Each domain has unique tasks, and what's representative in one domain might not be in another. For example, an inference task for predicting stock prices in the financial domain might differ from image recognition tasks in the medical domain.
-
-2. **Variability in Data Types:** Different inference tasks require different types of data—text, images, videos, numerical data, etc. Ensuring that benchmarks address the wide variety of data types used in real-world applications is challenging. For example, voice recognition systems process audio data, which is vastly different from the visual data processed by facial recognition systems.
-
-3. **Task Complexity:** The complexity of inference tasks can differ immensely, from basic classification tasks to intricate tasks requiring state-of-the-art models. For example, differentiating between two categories (binary classification) is typically simpler than detecting hundreds of object types in a crowded scene.
-
-4. **Real-time Requirements:** Some applications demand immediate or real-time responses, while others may allow for some delay. In autonomous driving, real-time object detection and decision-making are paramount, whereas a recommendation engine for a shopping website might tolerate slight delays.
-
-5. **Scalability Concerns:** Given the varied scale of applications, from edge devices to cloud-based servers, tasks must represent the diverse computational environments where inference occurs. For example, an inference task running on a smartphone's limited resources differs from a powerful cloud server.
-
-6. **Evaluation Metrics Diversity:** The metrics used to evaluate performance can differ significantly depending on the task. Finding a common ground or universally accepted metric for diverse tasks is challenging. For example, precision and recall might be vital for a medical diagnosis task, whereas throughput (inferences per second) might be more crucial for video processing tasks.
-
-7. **Ethical and Privacy Concerns:** Concerns related to ethics and privacy exist, especially in sensitive areas like facial recognition or personal data processing. These concerns can impact the selection and nature of tasks used for benchmarking. For example, using real-world facial data for benchmarking can raise privacy issues, whereas synthetic data might not replicate real-world challenges.
-
-8. **Hardware Diversity:** With a wide range of devices from GPUs, CPUs, and TPUs to custom ASICs used for inference, ensuring that tasks are representative across varied hardware is challenging. For example, a task optimized for inference on a GPU might perform sub-optimally on an edge device.
-
#### Benchmarks
Here are some original works that laid the fundamental groundwork for developing systematic benchmarks for inference machine learning systems.
@@ -434,20 +408,19 @@ Metrics:
#### Example Use Case
-Consider a scenario where we want to evaluate the inference performance of an object detection model on a specific edge device.
+Suppose you were tasked with evaluating the inference performance of an object detection model on a specific edge device. Here’s how you might approach structuring this benchmark:
-Task: The task is to perform real-time object detection on video streams, detecting and identifying objects such as vehicles, pedestrians, and traffic signs.
+1. **Define the Task**: In this case, the task is real-time object detection on video streams, identifying objects such as vehicles, pedestrians, and traffic signs.
-Benchmark: We can use the AI Benchmark for this task as it evaluates inference performance on edge devices, which suits our scenario.
+2. **Select the Benchmark**: To align with your goal of evaluating inference on an edge device, the AI Benchmark is a suitable choice. It provides a standardized framework specifically for assessing inference performance on edge hardware, making it relevant to this scenario.
-Metrics: We will measure the following metrics:
+3. **Identify Key Metrics**: Now, determine the metrics that will help evaluate the model’s inference performance. For this example, you might track:
+ - **Inference Time**: How long does it take to process each video frame?
+ - **Latency**: What is the delay in generating bounding boxes for detected objects?
+ - **Energy Consumption**: How much power is used during inference?
+ - **Throughput**: How many video frames are processed per second?
-* Inference time to process each video frame
-* Latency to generate the bounding boxes for detected objects
-* Energy consumption during the inference process
-* Throughput in terms of video frames processed per second
-
-By measuring these metrics, we can assess the performance of the object detection model on the edge device and identify any potential bottlenecks or areas for optimization to improve real-time processing capabilities.
+By measuring these metrics, you’ll gain insights into how well the object detection model performs on the edge device. This can help identify any bottlenecks, such as slow frame processing or high energy consumption, and highlight areas for potential optimization to improve real-time performance.
:::{#exr-perf .callout-caution collapse="true"}
@@ -459,6 +432,31 @@ Get ready to put your AI models to the ultimate test! MLPerf is like the Olympic
:::
+
+### Benchmark Task Selection
+
+Selecting representative tasks for benchmarking machine learning systems is complex due to the varied applications, data types, and requirements across different domains. Machine learning is applied in fields such as healthcare, finance, natural language processing, and computer vision, each with unique tasks that may not be relevant or comparable to others. Key challenges in task selection include:
+
+1. **Diversity of Applications and Data Types:** Tasks across domains involve different data types (e.g., text, images, video) and qualities, making it difficult to find benchmarks that universally represent ML challenges.
+2. **Task Complexity and Resource Needs:** Tasks vary in complexity and resource demands, with some requiring substantial computational power and sophisticated models, while others can be addressed with simpler resources and methods.
+3. **Privacy Concerns:** Tasks involving sensitive data, such as medical records or personal information, introduce ethical and privacy issues, making them unsuitable for general benchmarks.
+4. **Evaluation Metrics:** Performance metrics vary widely across tasks, and results from one task often do not generalize to others, complicating comparisons and limiting insights from one benchmarked task to another.
+
+Addressing these challenges is essential to designing meaningful benchmarks that are relevant across the diverse tasks encountered in machine learning, ensuring benchmarks provide useful, generalizable insights for both training and inference.
+
+
+### Measuring Energy Efficiency
+
+As machine learning capabilities expand, both in training and inference, concerns about increased power consumption and its ecological footprint have intensified. Addressing the sustainability of ML systems, a topic explored in more depth in the [Sustainable AI](../sustainable_ai/sustainable_ai.qmd) chapter, has thus become a key priority. This focus on sustainability has led to the development of standardized benchmarks designed to accurately measure energy efficiency. However, standardizing these methodologies poses challenges due to the need to accommodate vastly different scales—from the microwatt consumption of TinyML devices to the megawatt demands of data center training systems. Moreover, ensuring that benchmarking is fair and reproducible requires accommodating the diverse range of hardware configurations and architectures in use today.
+
+One example is the MLPerf Power benchmarking methodology [@tschand2024mlperf], which tackles these challenges by tailoring the methodologies for datacenter, edge inference, and tiny inference systems while measuring power consumption as comprehensively as possible for each scale. This methodology adapts to a variety of hardware, from general-purpose CPUs to specialized AI accelerators, while maintaining uniform measurement principles to ensure that comparisons are both fair and accurate across different platforms.
+
+@fig-power-diagram illustrates the power measurement boundaries for different system scales, from TinyML devices to inference nodes and training racks. Each example highlights the components within the measurement boundary and those outside it. This setup allows for accurate reflection of the true energy costs associated with running ML workloads across various real-world scenarios, and ensures that the benchmark captures the full spectrum of energy consumption.
+
+![MLPerf Power system measurement diagram. Source: @tschand2024mlperf.](images/png/power_component_diagram.png){#fig-power-diagram}
+
+It is important to note that optimizing a system for performance may not lead to the most energy efficient execution. Oftentimes, sacrificing a small amount of performance or accuracy can lead to significant gains in energy efficiency, highlighting the importance of accurately benchmarking power metrics. Future insights from energy efficiency and sustainability benchmarking will enable us to optimize for more sustainable ML systems.
+
### Benchmark Example
To properly illustrate the components of a systems benchmark, we can look at the keyword spotting benchmark in MLPerf Tiny and explain the motivation behind each decision.
@@ -505,15 +503,13 @@ But of all these, the most important challenge is benchmark engineering.
#### Hardware Lottery
-The ["hardware lottery"](https://arxiv.org/abs/2009.06489) in benchmarking machine learning systems refers to the situation where the success or efficiency of a machine learning model is significantly influenced by the compatibility of the model with the underlying hardware [@chu2021discovering]. In other words, some models perform exceptionally well because they are a good fit for the particular characteristics or capabilities of the hardware they are run on rather than because they are intrinsically superior models.
-
-![Hardware Lottery.](images/png/hardware_lottery.png){#fig-hardware-lottery}
+The hardware lottery, first described by @10.1145/3467017, refers to the situation where a machine learning model's success or efficiency is significantly influenced by its compatibility with the underlying hardware [@chu2021discovering]. Some models perform exceptionally well not because they are intrinsically superior, but because they are optimized for specific hardware characteristics, such as the parallel processing capabilities of Graphics Processing Units (GPUs) or Tensor Processing Units (TPUs).
-For instance, certain machine learning models may be designed and optimized to take advantage of the parallel processing capabilities of specific hardware accelerators, such as Graphics Processing Units (GPUs) or Tensor Processing Units (TPUs). As a result, these models might show superior performance when benchmarked on such hardware compared to other models that are not optimized for the hardware.
+For instance, @fig-hardware-lottery compares the performance of models across different hardware platforms. The multi-hardware models show comparable results to "MobileNetV3 Large min" on both the CPU uint8 and GPU configurations. However, these multi-hardware models demonstrate significant performance improvements over the MobileNetV3 Large baseline when run on the EdgeTPU and DSP hardware. This emphasizes the variable efficiency of multi-hardware models in specialized computing environments.
-For example, a 2018 paper introduced a new convolutional neural network architecture for image classification that achieved state-of-the-art accuracy on ImageNet. However, the paper only mentioned that the model was trained on 8 GPUs without specifying the model, memory size, or other relevant details. A follow-up study tried to reproduce the results but found that training the same model on commonly available GPUs achieved 10% lower accuracy, even after hyperparameter tuning. The original hardware likely had far higher memory bandwidth and compute power. As another example, training times for large language models can vary drastically based on the GPUs used.
+![Accuracy-latency trade-offs of multiple ML models and how they perform on various hardware. Source: @chu2021discovering](images/png/hardware_lottery.png){#fig-hardware-lottery}
-The "hardware lottery" can introduce challenges and biases in benchmarking machine learning systems, as the model's performance is not solely dependent on the model's architecture or algorithm but also on the compatibility and synergies with the underlying hardware. This can make it difficult to compare different models fairly and to identify the best model based on its intrinsic merits. It can also lead to a situation where the community converges on models that are a good fit for the popular hardware of the day, potentially overlooking other models that might be superior but incompatible with the current hardware trends.
+Hardware lottery can introduce challenges and biases in benchmarking machine learning systems, as the model's performance is not solely dependent on the model's architecture or algorithm but also on the compatibility and synergies with the underlying hardware. This can make it difficult to compare different models fairly and to identify the best model based on its intrinsic merits. It can also lead to a situation where the community converges on models that are a good fit for the popular hardware of the day, potentially overlooking other models that might be superior but incompatible with the current hardware trends.
#### Benchmark Engineering
@@ -521,7 +517,7 @@ Hardware lottery occurs when a machine learning model unintentionally performs e
In contrast to the accidental hardware lottery, benchmark engineering involves deliberately optimizing or designing a machine learning model to perform exceptionally well on specific hardware, often to win benchmarks or competitions. This intentional optimization might include tweaking the model's architecture, algorithms, or parameters to exploit the hardware's features and capabilities fully.
-#### Problem
+##### Problem
Benchmark engineering refers to tweaking or modifying an AI system to optimize performance on specific benchmark tests, often at the expense of generalizability or real-world performance. This can include adjusting hyperparameters, training data, or other aspects of the system specifically to achieve high scores on benchmark metrics without necessarily improving the overall functionality or utility of the system.
@@ -531,7 +527,7 @@ It can lead to several risks and challenges. One of the primary risks is that th
The AI community must prioritize transparency and accountability to mitigate the risks associated with benchmark engineering. This can include disclosing any optimizations or adjustments made specifically for benchmark tests and providing more comprehensive evaluations of AI systems that include real-world performance metrics and benchmark scores. Researchers and developers must prioritize holistic improvements to AI systems that improve their generalizability and functionality across various applications rather than focusing solely on benchmark-specific optimizations.
-#### Issues
+##### Issues
One of the primary problems with benchmark engineering is that it can compromise the real-world performance of AI systems. When developers focus on optimizing their systems to achieve high scores on specific benchmark tests, they may neglect other important system performance aspects crucial in real-world applications. For example, an AI system designed for image recognition might be engineered to perform exceptionally well on a benchmark test that includes a specific set of images but needs help to recognize images slightly different from those in the test set accurately.
@@ -539,15 +535,15 @@ Another area for improvement with benchmark engineering is that it can result in
It can also lead to misleading results. When AI systems are engineered to perform well on benchmark tests, the results may not accurately reflect the system's true capabilities. This can be problematic for users or investors who rely on benchmark scores to make informed decisions about which AI systems to use or invest in. For example, an AI system engineered to achieve high scores on a benchmark test for speech recognition might need to be more capable of accurately recognizing speech in real-world situations, leading users or investors to make decisions based on inaccurate information.
-#### Mitigation
+##### Mitigation
There are several ways to mitigate benchmark engineering. Transparency in the benchmarking process is crucial to maintaining benchmark accuracy and reliability. This involves clearly disclosing the methodologies, data sets, and evaluation criteria used in benchmark tests, as well as any optimizations or adjustments made to the AI system for the purpose of the benchmark.
One way to achieve transparency is through the use of open-source benchmarks. Open-source benchmarks are made publicly available, allowing researchers, developers, and other stakeholders to review, critique, and contribute to them, thereby ensuring their accuracy and reliability. This collaborative approach also facilitates sharing best practices and developing more robust and comprehensive benchmarks.
-One example is the MLPerf Tiny. It's an open-source framework designed to make it easy to compare different solutions in the world of TinyML. Its modular design allows components to be swapped out for comparison or improvement. The reference implementations, shown in green and orange in @fig-ml-perf, act as the baseline for results. TinyML often needs optimization across the entire system, and users can contribute by focusing on specific parts, like quantization. The modular benchmark design allows users to showcase their contributions and competitive advantage by modifying a reference implementation. In short, MLPerf Tiny offers a flexible and modular way to assess and improve TinyML applications, making it easier to compare and improve different aspects of the technology.
+The modular design of MLPerf Tiny connects to the problem of benchmark engineering by providing a structured yet flexible approach that encourages a balanced evaluation of TinyML. In benchmark engineering, systems may be overly optimized for specific benchmarks, leading to inflated performance scores that don’t necessarily translate to real-world effectiveness. MLPerf Tiny’s modular design aims to address this issue by allowing contributors to swap out and test specific components within a standardized framework, such as hardware, quantization techniques, or inference models. The reference implementations, highlighted in green and orange in @fig-ml-perf, provide a baseline for results, enabling flexible yet controlled testing by specifying which components can be modified. This structure supports transparency and flexibility, enabling a focus on genuine improvements rather than benchmark-specific optimizations.
-![MLPerf Tiny modular design. Source: @mattson2020mlperf.](images/png/mlperf_tiny.png){#fig-ml-perf}
+![Modular design of the MLPerf Tiny benchmark, showing the reference implementation with modifiable components. This modular approach enables flexible, targeted testing while maintaining a standardized baseline. Source: @banbury2021mlperf.](images/png/mlperf_tiny.png){#fig-ml-perf}
Another method for achieving transparency is through peer review of benchmarks. This involves having independent experts review and validate the benchmark's methodology, data sets, and results to ensure their credibility and reliability. Peer review can provide a valuable means of verifying the accuracy of benchmark tests and help build confidence in the results.
@@ -569,17 +565,17 @@ Machine learning datasets have a rich history and have evolved significantly ove
The [MNIST dataset](https://www.tensorflow.org/datasets/catalog/mnist), created by Yann LeCun, Corinna Cortes, and Christopher J.C. Burges in 1998, can be considered a cornerstone in the history of machine learning datasets. It comprises 70,000 labeled 28x28 pixel grayscale images of handwritten digits (0-9). MNIST has been widely used for benchmarking algorithms in image processing and machine learning as a starting point for many researchers and practitioners. @fig-mnist shows some examples of handwritten digits.
-![MNIST handwritten digits. Source: [Suvanjanprasai.](https://en.wikipedia.org/wiki/File:MnistExamplesModified.png)](images/png/mnist.png){#fig-mnist}
+![MNIST handwritten digits. Source: [Suvanjanprasai](https://en.wikipedia.org/wiki/File:MnistExamplesModified.png)](images/png/mnist.png){#fig-mnist}
#### ImageNet (2009)
-Fast forward to 2009, and we see the introduction of the [ImageNet dataset](https://www.tensorflow.org/datasets/catalog/imagenet2012), which marked a significant leap in the scale and complexity of datasets. ImageNet consists of over 14 million labeled images spanning more than 20,000 categories. Fei-Fei Li and her team developed it to advance object recognition and computer vision research. The dataset became synonymous with the ImageNet Large Scale Visual Recognition Challenge (ILSVRC), an annual competition crucial in developing deep learning models, including the famous AlexNet in 2012.
+Fast forward to 2009, and we see the introduction of the [ImageNet dataset](https://www.tensorflow.org/datasets/catalog/imagenet2012), which marked a significant leap in the scale and complexity of datasets. ImageNet consists of over 14 million labeled images spanning more than 20,000 categories. Fei-Fei Li and her team developed it to advance object recognition and computer vision research. The dataset became synonymous with the ImageNet [Large Scale Visual Recognition Challenge (LSVRC)](https://www.image-net.org/challenges/LSVRC/), an annual competition crucial in developing deep learning models, including the famous AlexNet in 2012.
#### COCO (2014)
-The [Common Objects in Context (COCO) dataset](https://cocodataset.org/) [@lin2014microsoft], released in 2014, further expanded the landscape of machine learning datasets by introducing a richer set of annotations. COCO consists of images containing complex scenes with multiple objects, and each image is annotated with object bounding boxes, segmentation masks, and captions. This dataset has been instrumental in advancing research in object detection, segmentation, and image captioning.
+The [Common Objects in Context (COCO) dataset](https://cocodataset.org/) [@lin2014microsoft], released in 2014, further expanded the landscape of machine learning datasets by introducing a richer set of annotations. COCO consists of images containing complex scenes with multiple objects, and each image is annotated with object bounding boxes, segmentation masks, and captions, as shown in @fig-coco. This dataset has been instrumental in advancing research in object detection, segmentation, and image captioning.
-![Coco dataset. Source: Coco.](images/png/coco.png)
songhan
Zishen
\x0a','☎','mouse_wheel_up','MersennePrimeExponentQ','isxdigit','BartlettWindow','SymletWavelet','iframe','bezier','part_type_direction','#Adjective\x20[enough]','area','((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)(\x5c.(25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)(\x5c.(25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)(\x5c.(25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)(\x5c.(25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)(\x5c.(25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)(\x5c.(25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)(\x5c.(25[0-5]|2[0-4]\x5cd|1\x5cd\x5cd|[1-9]?\x5cd)){3}))|:)))\x5cb','setHumidity','CreateScheduledTask','Stan','YIELDMAT','⥶','remoteNumber','StableDistribution','or\x20[#PastTense]\x20#Noun','LaplaceDistribution','mr-2','details','≔','room_set_height','password','infoPanel','livecodeserver','DeleteStopwords','gamepad_set_colour','Tidy_Parameters','buttonSetAction','ctrlScrollValues','GammaRegularized','-behaviour','AirPressureData','does-that-work','LOWER_X','GeoLabels','parseMatch','abort\x20break\x20case\x20clear\x20catch\x20continue\x20do\x20elseif\x20else\x20endfunction\x20end\x20for\x20function\x20global\x20if\x20pause\x20return\x20resume\x20select\x20try\x20then\x20while','SystemsModelFeedbackConnect','Expression','part_system_automatic_draw','kbv_type_url','CheckAbort','society','pt_shape_ring','anyfunc','HighpassFilter','#Determiner\x20#Noun+\x20[which]\x20#Verb','FindAnomalies','AddSides','AstroProjection','-include_lib','PacletDirectoryRemove','⦓','gamepad_is_supported','sprite_xoffset','toupper','diag_captureFrame','θ','camera_get_proj_mat','CanonicalWarpingDistance','bluff','infinity','ToonShading','DEC2OCT','bessel_first_kind','[#Verb\x20(in|out|up|down|off|back)]\x20(on|in)','3-no-period-acronym','camCommit','ˇ','stdin','argument10','DOLLAR','OpacityFunction','1-punct-hyphen\x27\x27','humanize','antlrtask','limited','AugmentedPolyhedron','$EvaluationEnvironment','StringRepeat','as-pale-as','#Adjective\x20[#Gerund]','EntityPrefetch','buffer_create_from_vertex_buffer','Multinomial','$NoValue','q\x22\x5c{','mathemat','chief\x20.\x20officer','$assertnonvacuouson','CorrelationTest','upper','work-in-the','sprite_get_bbox_bottom','assert','setVisibleIfTreeCollapsed','device-height','MultivariatePoissonDistribution','display_get_sleep_margin','analyt','MB_ICONSTOP','ROWS','#Verb\x20[around]','log10','innerHeight','SecuredAuthenticationKeys','triggerActivation','FourierCosTransform','enableStressDamage','FindEulerianCycle','Speak','matchIndexes','addParameter','plugin-types','primary\x20key','cflow','current','KeySortBy','keywordPatternRe','Factorial','physics_fixture_delete','lge','author\x20name','#submit-quiz-btn','directSay','php','CSCH','ds_list_sort','government-of-x','ProgressIndicator','ц','textarea','capnp','HorizontalScrollPosition','gpu_push_state','SubCaption','FACT','isSubordinate','SurfaceGraphics','pending','∢','elseif','pt_shape_line','log_sum_exp','debugger','data:image/svg+xml,','⪨','magazine','$','partial','ReadRegDWORD','company-wide','CDS','SampledSoundList','covar_pop','draw_line_width_colour','⋚','#Copula\x20[#Adverb]$','enableVehicleSensor','VertexDegree','opinion','separate','InstallDir','JacobiSC','\x5c\x5c([\x27\x22\x5c?\x5c\x5cabfnrtv]|u[\x5cdA-Fa-f]{4}|[0-7]{1,3}|x[\x5cdA-Fa-f]{2}|U[\x5cdA-Fa-f]{8})|&[a-zA-Z\x5cd]{2,};','SetPermissions','и','ugc_visibility_public','svd_V','atn','⤦','Dodecahedron','mapCenterOnCamera','answerCall','commandChat','ToRules','hundred\x20thousand','DETECT','CreatePalette','ExponentialBackOff','NORMAL','contradict','FischerGroupFi22','brotherhood','$asinh','$HomeDirectory','TrainImageContentDetector','$KernelCount','three','GreaterEqual','couldnt','about-love','Ѓ','Sow','getSignalStrength','won\x27t','MatchLocalNames','RealSign','__schema__','GeoVisibleRegionBoundary','phy_inertia','shape-margin','remove','none','InternalError','fipnamel','DateValue','switches','physics_joint_gear_create','selectionPosition','BitClear','layer_background_blend','schism','license','ConnesWindow','2-verb-type\x27\x27','WattsStrogatzGraphDistribution','⥟','$ActivationUserRegistered','ammoOnPylon','FormBox','$system','NestWhile','camera_create','iap_status_unavailable','WaveletThreshold','seventy','menuCollapse','ImageDeconvolve','gpu_get_blendmode_src','∖','toarray','BSSID','WeierstrassInvariants','modal_feedback','precision','⃛','AreaGeodetic','MapAll','AudioStop','dependencies','buffer_save_async','may-want','UninstallButtonText','SpheroidalQSPrime','clone','RegexIterator','row-gap','vertex_usage_position','Text','lookAfter','PaulWavelet','mp_potential_path','ctrlURL','⥕','voice-that-rocks','DiscreteWaveletData','enabled','FillingStyle','[#Infinitive]\x20(yourself|yourselves)','∀','LetterNumber','Conjunction','ParametricNDSolve','OceanData','AffineTransform','IBOutlet','redirect','ImportByteArray','PoissonWindow','make_color_hsv','[second]\x20#Noun','FinancialDerivative','render_esi','ImageInstanceQ','deptab','WebAssembly','maleable','NetInsertSharedArrays','AlgebraicNumberNorm','COSH','ŧ','path_get_point_speed','vertex_submit','^\x5cs*\x5cw+\x5cs*(,|%)','crew','hasQuote','GammaDistribution','WeatherForecastData','daccdb','\x5cb\x5cd{1,3}\x5c.\x5cd{1,3}\x5c.\x5cd{1,3}\x5c.\x5cd{1,3}(:\x5cd{1,5})?\x5cb','EmptyRegion','conference','UnitBox','draw_set_colour','GeoProjectionData','UnRegDLL','createGroup','Yesterday','\x5c.?','ycomp','could','PolyhedronBox','(?:(?![><|]|','background_colour','needReload','drawPolygon','matchOne','simulInClouds','lnbColorRight','StandardForm','Attributes','$CurrentTask','LinkFlush','gp_shoulderr','[\x5c[\x5c]\x5c|\x5c$\x5c?%,~#@]','clientY','EdgeTaggedGraphQ','SGN','FTEST','FnOnce','setCombatMode','disabled','os_xbox360','RegularExpression','RoundNearestTiesAway','grid-row','mask-clip','lbIsSelected','ef_spark','endMission','setForceGeneratorRTD','TextCardinal','MedicalTestData','sysmsg','draw_get_color','€','NetworkPacketCapture','einduction','[;.\x5c-]','ClusteringMeasurements','DensityPlot','AcyclicGraphQ','WilksW','file_text_open_from_string','audio_pause_all','[(un|contra|extra|inter|intra|macro|micro|mid|mis|mono|multi|pre|sub|tri|ex)]\x20#Adjective','CloudAccountData','PacletInformation','AspectJ','do-you','(have|had|has)','\x5cs+','sliderPosition','DefaultLabelStyle','Wronskian','FeatureSetByName','ying:ie¦1ing:se,ke,te,we,ne,re,de,pe,me,le,c,he¦2ing:ll,ng,dd,ee,ye,oe,rg,us¦2ning:un¦2ging:og,ag,ug,ig,eg¦2ming:um¦2bing:ub,ab,eb,ob¦3ning:lan,can,hin,pin,win¦3ring:cur,lur,tir,tar,pur,car¦3ing:ait,del,eel,fin,eat,oat,eem,lel,ool,ein,uin¦3ping:rop,rap,top,uip,wap,hip,hop,lap,rip,cap¦3ming:tem,wim,rim,kim,lim¦3ting:mat,cut,pot,lit,lot,hat,set,pit,put¦3ding:hed,bed,bid¦3king:rek¦3ling:cil,pel¦3bing:rib¦4ning:egin¦4ing:isit,ruit,ilot,nsit,dget,rkel,ival,rcel¦4ring:efer,nfer¦4ting:rmit,mmit,ysit,dmit,emit,bmit,tfit,gret¦4ling:evel,xcel,ivel¦4ding:hred¦5ing:arget,posit,rofit¦5ring:nsfer¦5ting:nsmit,orget,cquit¦5ling:ancel,istil','color_get_blue','setShotParents','cbv','elevatePeriscope','PreserveColor','setGroupOwner','StreamColorFunction','\x5c(|=>','ConicHullRegion3DBoxOptions','CONT','nil','ChemicalData','malloc','some-kind-of','Box','≮','#Honorific\x20#Actor','ImageVectorscopePlot',')^:','ready','RankedMin','clipboard_has_text','BreadthFirstScan','typographer','LegendreP','parameter_count','html_to_markdown','[a-zA-Z0-9_]+','found-it-gerund','achievement_show_achievements','triand','(#Gerund|her)','without\x20timezone','sourceSets','array_height_2d','nonempty','AnnotationDelete','Negative','vertex','VideoReplace','quadraticVertex','font_get_name','(?:documentclass|usepackage|input|include)','$atanh','LengthException','ColorFunction','border-block-start','LocalResponseNormalizationLayer','StateFeedbackGains','vk_pause','#Honorific\x20#FirstName?\x20#ProperNoun','word-break','FillForm','⋋','allowedService','MoleculeMatchQ','Line3DBox','AudioInputDevice','NotebookClose','CDNSKEY','break-inside','InverseChiSquareDistribution','slug','FeatureExtractorFunction',')\x5c.)+(?:%TLDS%)))','\x22|$','Loopback','$MaxNumber','□','compgroups','EntityStores','PowerSymmetricPolynomial','One\x20of\x20your\x20code\x20blocks\x20includes\x20unescaped\x20HTML.','unreachable!','removeHandgunItem','for','LiouvilleLambda','setDropInterval','LaplacianPDETerm','@autoreleasepool','ByteOrdering','diag_tickTime','ctrlSetActiveColor','$tanh','ctrlSetFontH3B','small','bindkey','NValues','x86asm','𝕚','draw_set_valign','coffeescript','cmd','URLRead','ArcSin','CreateFile','RETURN|0','lock','zep','part_type_step','LineNumberNode','xs:positiveInteger','BinaryRead','assoc','layer_tile_change','ResamplingMethod','#Reflexive$','inline\x20rule\x20didn\x27t\x20increment\x20state.pos','colour_get_hue','Scilab','ravine','⇇','qnint','LegendMarkerSize','weekdayname','$CloudVersionNumber','BackFaceOpacity','ReturnEntersInput','getAllUnitTraits','side','Likelihood','yaml','show_message','order','\x5c{|;','popd','denominator','setRank','part_type_orientation','^\x5cs*[a-z_\x5c.\x5c$][a-z0-9_\x5c.\x5c$]+:','transitivity','ImagePad','sha256sum','AnchoredSearch','string_byte_at','waypointScript','ArrayPad','NEGBINOMDIST','layer_tilemap_destroy','║','toInteger','YES','Red','Hint','#Unit','(become|fall|grow)\x20#Adverb?\x20[#PastTense]','shownScoretable','Eigenvalues','EXEDIR','𝔠','dabs','⤪','kbv_returnkey_continue','Ť','inv_chi_square','setMarkerDir','mp_potential_path_object','WithLock','src_tlds','hypergeometric','restrict','vectorNormalized','IDOK','march-and-feb','location','GeoBoundsRegion','LEFTB','C++','#popover-container','grid-auto-flow','$dist_poisson','pf.conf','setFeatureType','list-style','ToCharacterCode','ConfidenceRange','bm_inv_src_color','room_duplicate','DeleteDuplicates','event_object','is_iostat_end','layer_get_element_type','ConstellationData','code_block','∳','Apache\x20config','IncludeDirectories','[way]\x20#Adverb\x20#Adjective','#Gerund\x20[#Gerund]','lockCargo','this-month','mp_grid_clear_rectangle','GraphicsArray','getWingsPositionRTD','-file','window_mouse_get_x','isRealTime','Arrow3DBox','Open','AiryAiPrime','waypointHousePosition','tile_rotate','juldate','ė','rgba(0,\x200,\x200,\x200.5)','LevyDistribution','$isunbounded','DynamicName','fmax','GridElementStyleOptions','phenomena','PacletUpdate','SSID','once','setFormDir','Ĥ','omit','VectorGreater','FunctionSurjective','AllowedHeads','FixedPointList','xpath','[a-zA-Z0-9_]+@','defer','⊂⃒','CrossingDetect','StreamStyle','HumpDownHump','⪯','view_set_yport','skeleton_animation_mix','∿','↫','GetFileTime','×','CellChangeTimes','any-verbs-for','fa_center','INCLUDE','53rd','⊊︀','mark','[\x5c]\x5c}]','DownTeeArrow','win8_settingscharm_add_html_entry','SynchronousUpdating','ocks','ChartLayout','MinorPlanetData','background-attachment','weaponAccessories','no-highlight','query','setRadioMsg','greatest','seems-filled','rounded-lg','VerticalGauge','json_table_primitive','edge','CloudObjectURLType',')?\x5c.(','begin_keywords','ds_map_find_previous','ses','remotePort','GeoVectorENU','NumberFieldRootsOfUnity','condition_variable','noLights','podspec','buildingExit','true\x20false','is3DENMultiplayer','audio_old_system','csr_to_dense_matrix','steam_file_persisted','Adverb\x20Adjective','PolygonAngle','SemialgebraicComponentInstances','cull_clockwise','forceWalk','gamepad_set_button_threshold',')\x20#Cardinal','tilemap_get_mask','pmouseX','⋮','HeldPart','html`','xs:time','zombies','⪂','GeodesicErosion','SummationLayer','SyntaxQ','buffer_seek_end','post','REPLACEB','sen','$BoxForms','fileprivate','Option\x20','BlockchainBase','Compute','steam_ugc_send_query','insert\x20into','ArrayBuffer','map\x20is\x20read-only','PrintingOptions','special','list_item_close','setGroupIconsVisible','WindowTitle','InverseFourierSequenceTransform','BooleanConvert','LinkProtocol','SpaceForm','cmplx','mouseMoved','$RegisteredUserName','vertex_format_add_position','date_time_string','getgrgid','coulda','2px\x20solid\x20','GCD','MatchLocalNameQ','got-tired-of','AsyncFunction','audio_pause_music','IndependentUnit','Subscript','TildeEqual','trimn','profileNamespace','child-src','VARA','tcl_wordBreakAfter','\x20class=\x27rounded-lg\x20hover:bg-blue-100\x27>\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20
\x20\x20
\x20\x20\x20
\x20','mp_grid_clear_all','PieChart3D','attachedTo','Notation','adjective','getobject','queryMagazinePool','deprecated','os_get_config','date_diff_millis','simulCloudDensity','gerund','SystemModelSimulateSensitivity','ú','since','exfalso','vk_numpad8','100%','architecture\x20augeasversion\x20blockdevices\x20boardmanufacturer\x20boardproductname\x20boardserialnumber\x20cfkey\x20dhcp_servers\x20domain\x20ec2_\x20ec2_userdata\x20facterversion\x20filesystems\x20ldom\x20fqdn\x20gid\x20hardwareisa\x20hardwaremodel\x20hostname\x20id|0\x20interfaces\x20ipaddress\x20ipaddress_\x20ipaddress6\x20ipaddress6_\x20iphostnumber\x20is_virtual\x20kernel\x20kernelmajversion\x20kernelrelease\x20kernelversion\x20kernelrelease\x20kernelversion\x20lsbdistcodename\x20lsbdistdescription\x20lsbdistid\x20lsbdistrelease\x20lsbmajdistrelease\x20lsbminordistrelease\x20lsbrelease\x20macaddress\x20macaddress_\x20macosx_buildversion\x20macosx_productname\x20macosx_productversion\x20macosx_productverson_major\x20macosx_productversion_minor\x20manufacturer\x20memoryfree\x20memorysize\x20netmask\x20metmask_\x20network_\x20operatingsystem\x20operatingsystemmajrelease\x20operatingsystemrelease\x20osfamily\x20partitions\x20path\x20physicalprocessorcount\x20processor\x20processorcount\x20productname\x20ps\x20puppetversion\x20rubysitedir\x20rubyversion\x20selinux\x20selinux_config_mode\x20selinux_config_policy\x20selinux_current_mode\x20selinux_current_mode\x20selinux_enforced\x20selinux_policyversion\x20serialnumber\x20sp_\x20sshdsakey\x20sshecdsakey\x20sshrsakey\x20swapencrypted\x20swapfree\x20swapsize\x20timezone\x20type\x20uniqueid\x20uptime\x20uptime_days\x20uptime_hours\x20uptime_seconds\x20uuid\x20virtual\x20vlans\x20xendomains\x20zfs_version\x20zonenae\x20zones\x20zpool_version','cartoon-ish','triggerAttachObject','view_hview','Count','noLoop','dcmplx','brakesDisabled','s_nexttime','globalvar','_blank','RemoveAudioStream','plateau','DifferenceRoot','getOut','fromdir','))(','DeviceOpenQ','nav-index','SystemCredentialData','background_get_width','dfm','__index__','Cvoid','tagger','QueueingNetworkProcess','(#Adjective|#Preposition|#Determiner|#Value)','contentWindow','ffs','unit','waitsas','Illegal\x20lexeme\x20\x22','EllipticPi','û','$increment','draw_triangle','_Decimal128','disableAutodetect','MaxDate','TEMPLATES','colour_get_green','HKDD','PersistentObjects','asset_get_type','FindTransientRepeat','model','nks','DirichletDistribution','retry','week\x20mm-dd','Row','room_get_camera','unbox','WaitNext','\x5cb0(_*[0-7])*[lL]?\x5cb','Ũ','ctrlSetFontHeightH6','nanf','𝓅','SuperMinus','LightGray','Polyline','os_get_region','MeanClusteringCoefficient','unlinkItem','allCurators','Ő','WatsonUSquareTest','OutputFormData','ReleaseHold','NProduct','propernoun-place','move_towards_point','layer_tile_yscale','RegionEqual','^will\x20have\x20#PastTense$','ColorRules','BezierCurveBox','what\x27s','DirVerify','NoneTrue','::\x5cs*','NumberDigit','\x5cb0[oO][0-7]+(?:_[0-7]+)*\x5cb','declare','remove3DENLayer','Share','NBodySimulationData','blkIndent','$Aborted','WordOrientation','With','GraphDiameter','false\x20null\x20true','atan2','Csch','(-?)(\x5cb0[xX][a-fA-F0-9\x27]+|(\x5cb[\x5cd\x27]+(\x5c.[\x5cd\x27]*)?|\x5c.[\x5cd\x27]+)([eE][-+]?[\x5cd\x27]+)?)','dynamicSimulationDistanceCoef','src_host','c_int8_t','$System','import','BondLabelStyle','iterable','Cashflow','combatMode','StarClusterData','VideoScreenCapture','Inset','shader_is_compiled','CloudSubmit','ds_stack_read','uwire','ℭ','getAllPylonsInfo','#Organization\x20of\x20the?\x20#ProperNoun','lnbSetValue',')\x5cb|\x5c.)?|(','repeatElement','complete','skipSpacesBack','setParticleCircle','Append','window_set_max_height','selection','BottomHatTransform','play','AudioStreams','isInfinitive','TraceOff','DispatchQ','map','backpack','moveToFailed','StadiumShape','ReturnExpressionPacket','Compose','$atan2','mp_grid_get_cell','OCT$','gesture_drag_distance','iap_purchased','steam_ugc_create_query_all_ex','strong','Corollary','CoordinateBoundingBoxArray','EstimatedVariogramModel','audio_get_name','MixedMagnitude','Decrypt','loop','setPylonsPriority','device_mouse_y_to_gui','assignAsTurret','surface_create','CantorMesh','WindowMargins','DateList','helpModal','menuPicture','#FirstName\x20#Acronym?\x20(#Possessive\x20&&\x20#LastName)','EnableWindow','EntityFunction','≍⃒','\x5c([^()]*(\x5c([^()]*(\x5c([^()]*\x5c)[^()]*)*\x5c)[^()]*)*\x5c)\x5cs*\x5c{','office','mp_linear_path_object','ByteArrayFormatQ','dance-coach','freeze','indexw','PointSize','markets','BioSequenceComplement','noAux','num_images','PRESET','[(dark|bright|flat|light|soft|pale|dead|dim|faux|little|wee|sheer|most|near|good|extra|all)]\x20#Adjective','CellOpen','########','cards','str_to_duration','lady-titlecase','Abort','ds_priority_add','NS_ASSUME_NONNULL_END','#Copula\x20#Adverb\x20#Adjective','ipython','STANDARDIZE','NetAppend','PersistenceTime','needService','⊙','ExternalFunction','proceed','sendSysex','date_second_span','AutoSubmitting','MeshPrimitives','tvSetPictureColor','Client','low','ErlangB','[(private|general|major|rear|prime|field|count|miss)]\x20#Honorific?\x20#Person','ReImLabels','AcousticSoundHardValue','softmax','macAddress','(saw|made)','ϒ','bessel_jn','regr_avgx','Win7','groupFromNetId','simulWeatherSync','reserve','oneof','SHEETS','xs:gYear','UpArrowDownArrow','\x5cb(\x5cd+|\x5cd+\x5c.|\x5c.\x5cd+|\x5cd+\x5c.\x5cd+)[Bb][-+]?\x5cd+\x5cb','$UserAddOnsDirectory','padEnd','#Copula\x20[still]\x20(in|#Gerund|#Adjective)','[]\x5c{\x5c}%#\x27\x22','inv_logit','download.txt','desc','⦱','Left|0','ĹĺĻļĽľĿŀŁłƚƪǀǏǐȴȽΙӀӏ','your-guild-colors','wed','addCuratorEditableObjects','\x5cbend\x5cs+','ColumnLines','LoopFreeGraphQ','MB_RETRYCANCEL','(said|says)\x20#ProperNoun$','sprite_duplicate','↷','HeaderBackground','file_bin_size','c_white','SolidRegionQ','SetShellVarContext','GeoVisibleRegion','ode_rk45_tol','CellPrint','UnderscriptBoxOptions','sint64','max0','this','platform::shell','cr_beam','AutoMatch','setTerrainHeight','audio_listener_get_data','$UserAgentMachine','YIELDDISC','AnimationRate','SynchronousInitialization','image_xscale','shr','ant','nth-last-child','♥','UnconstrainedParameters','\x20to\x20expand\x20upon\x20this\x20content\x20','Any','audio_is_paused','(::','Superlative','active','↠','|$)|\x5c!+(?!','¡','get_string','^will\x20want\x20to\x20#Infinitive$','peekc','Equilibrium','(\x5c/\x5c/|','date_get_hour_of_year','gpu_get_blendmode_ext','in|0','_Generic','[=>\x27/<($\x22]','steam_ugc_subscribe_item','ListPicker','NPV','rewindDirectory','CERT','gethostent','module','extern','one-second','corr_matrix|10','camCommitPrepared','from\x20to','⌌','([\x5cda-fA-F][\x5cda-fA-F_]*|_[\x5cda-fA-F][\x5cda-fA-F_]*)','.2byte\x20.4byte\x20.align\x20.ascii\x20.asciz\x20.balign\x20.byte\x20.code\x20.data\x20.else\x20.end\x20.endif\x20.endm\x20.endr\x20.equ\x20.err\x20.exitm\x20.extern\x20.global\x20.hword\x20.if\x20.ifdef\x20.ifndef\x20.include\x20.irp\x20.long\x20.macro\x20.rept\x20.req\x20.section\x20.set\x20.skip\x20.space\x20.text\x20.word\x20.ltorg\x20','CrossMatrix','leaves','Plural|Verb','webцвета\x20windowsцвета\x20windowsшрифты\x20библиотекакартинок\x20рамкистиля\x20символы\x20цветастиля\x20шрифтыстиля\x20автоматическоесохранениеданныхформывнастройках\x20автонумерациявформе\x20автораздвижениесерий\x20анимациядиаграммы\x20вариантвыравниванияэлементовизаголовков\x20вариантуправлениявысотойтаблицы\x20вертикальнаяпрокруткаформы\x20вертикальноеположение\x20вертикальноеположениеэлемента\x20видгруппыформы\x20виддекорацииформы\x20виддополненияэлементаформы\x20видизмененияданных\x20видкнопкиформы\x20видпереключателя\x20видподписейкдиаграмме\x20видполяформы\x20видфлажка\x20влияниеразмеранапузырекдиаграммы\x20горизонтальноеположение\x20горизонтальноеположениеэлемента\x20группировкаколонок\x20группировкаподчиненныхэлементовформы\x20группыиэлементы\x20действиеперетаскивания\x20дополнительныйрежимотображения\x20допустимыедействияперетаскивания\x20интервалмеждуэлементамиформы\x20использованиевывода\x20использованиеполосыпрокрутки\x20используемоезначениеточкибиржевойдиаграммы\x20историявыборапривводе\x20источникзначенийоситочекдиаграммы\x20источникзначенияразмерапузырькадиаграммы\x20категориягруппыкоманд\x20максимумсерий\x20начальноеотображениедерева\x20начальноеотображениесписка\x20обновлениетекстаредактирования\x20ориентациядендрограммы\x20ориентациядиаграммы\x20ориентацияметокдиаграммы\x20ориентацияметоксводнойдиаграммы\x20ориентацияэлементаформы\x20отображениевдиаграмме\x20отображениевлегендедиаграммы\x20отображениегруппыкнопок\x20отображениезаголовкашкалыдиаграммы\x20отображениезначенийсводнойдиаграммы\x20отображениезначенияизмерительнойдиаграммы\x20отображениеинтерваладиаграммыганта\x20отображениекнопки\x20отображениекнопкивыбора\x20отображениеобсужденийформы\x20отображениеобычнойгруппы\x20отображениеотрицательныхзначенийпузырьковойдиаграммы\x20отображениепанелипоиска\x20отображениеподсказки\x20отображениепредупрежденияприредактировании\x20отображениеразметкиполосырегулирования\x20отображениестраницформы\x20отображениетаблицы\x20отображениетекстазначениядиаграммыганта\x20отображениеуправленияобычнойгруппы\x20отображениефигурыкнопки\x20палитрацветовдиаграммы\x20поведениеобычнойгруппы\x20поддержкамасштабадендрограммы\x20поддержкамасштабадиаграммыганта\x20поддержкамасштабасводнойдиаграммы\x20поисквтаблицепривводе\x20положениезаголовкаэлементаформы\x20положениекартинкикнопкиформы\x20положениекартинкиэлементаграфическойсхемы\x20положениекоманднойпанелиформы\x20положениекоманднойпанелиэлементаформы\x20положениеопорнойточкиотрисовки\x20положениеподписейкдиаграмме\x20положениеподписейшкалызначенийизмерительнойдиаграммы\x20положениесостоянияпросмотра\x20положениестрокипоиска\x20положениетекстасоединительнойлинии\x20положениеуправленияпоиском\x20положениешкалывремени\x20порядокотображенияточекгоризонтальнойгистограммы\x20порядоксерийвлегендедиаграммы\x20размеркартинки\x20расположениезаголовкашкалыдиаграммы\x20растягиваниеповертикалидиаграммыганта\x20режимавтоотображениясостояния\x20режимвводастроктаблицы\x20режимвыборанезаполненного\x20режимвыделениядаты\x20режимвыделениястрокитаблицы\x20режимвыделениятаблицы\x20режимизмененияразмера\x20режимизменениясвязанногозначения\x20режимиспользованиядиалогапечати\x20режимиспользованияпараметракоманды\x20режиммасштабированияпросмотра\x20режимосновногоокнаклиентскогоприложения\x20режимоткрытияокнаформы\x20режимотображениявыделения\x20режимотображениягеографическойсхемы\x20режимотображениязначенийсерии\x20режимотрисовкисеткиграфическойсхемы\x20режимполупрозрачностидиаграммы\x20режимпробеловдиаграммы\x20режимразмещениянастранице\x20режимредактированияколонки\x20режимсглаживаниядиаграммы\x20режимсглаживанияиндикатора\x20режимсписказадач\x20сквозноевыравнивание\x20сохранениеданныхформывнастройках\x20способзаполнениятекстазаголовкашкалыдиаграммы\x20способопределенияограничивающегозначениядиаграммы\x20стандартнаягруппакоманд\x20стандартноеоформление\x20статусоповещенияпользователя\x20стильстрелки\x20типаппроксимациилиниитрендадиаграммы\x20типдиаграммы\x20типединицышкалывремени\x20типимпортасерийслоягеографическойсхемы\x20типлиниигеографическойсхемы\x20типлиниидиаграммы\x20типмаркерагеографическойсхемы\x20типмаркерадиаграммы\x20типобластиоформления\x20типорганизацииисточникаданныхгеографическойсхемы\x20типотображениясериислоягеографическойсхемы\x20типотображенияточечногообъектагеографическойсхемы\x20типотображенияшкалыэлементалегендыгеографическойсхемы\x20типпоискаобъектовгеографическойсхемы\x20типпроекциигеографическойсхемы\x20типразмещенияизмерений\x20типразмещенияреквизитовизмерений\x20типрамкиэлементауправления\x20типсводнойдиаграммы\x20типсвязидиаграммыганта\x20типсоединениязначенийпосериямдиаграммы\x20типсоединенияточекдиаграммы\x20типсоединительнойлинии\x20типстороныэлементаграфическойсхемы\x20типформыотчета\x20типшкалырадарнойдиаграммы\x20факторлиниитрендадиаграммы\x20фигуракнопки\x20фигурыграфическойсхемы\x20фиксациявтаблице\x20форматдняшкалывремени\x20форматкартинки\x20ширинаподчиненныхэлементовформы\x20виддвижениябухгалтерии\x20виддвижениянакопления\x20видпериодарегистрарасчета\x20видсчета\x20видточкимаршрутабизнеспроцесса\x20использованиеагрегатарегистранакопления\x20использованиегруппиэлементов\x20использованиережимапроведения\x20использованиесреза\x20периодичностьагрегатарегистранакопления\x20режимавтовремя\x20режимзаписидокумента\x20режимпроведениядокумента\x20авторегистрацияизменений\x20допустимыйномерсообщения\x20отправкаэлементаданных\x20получениеэлементаданных\x20использованиерасшифровкитабличногодокумента\x20ориентациястраницы\x20положениеитоговколоноксводнойтаблицы\x20положениеитоговстроксводнойтаблицы\x20положениетекстаотносительнокартинки\x20расположениезаголовкагруппировкитабличногодокумента\x20способчтениязначенийтабличногодокумента\x20типдвустороннейпечати\x20типзаполненияобластитабличногодокумента\x20типкурсоровтабличногодокумента\x20типлиниирисункатабличногодокумента\x20типлинииячейкитабличногодокумента\x20типнаправленияпереходатабличногодокумента\x20типотображениявыделениятабличногодокумента\x20типотображениялинийсводнойтаблицы\x20типразмещениятекстатабличногодокумента\x20типрисункатабличногодокумента\x20типсмещениятабличногодокумента\x20типузоратабличногодокумента\x20типфайлатабличногодокумента\x20точностьпечати\x20чередованиерасположениястраниц\x20отображениевремениэлементовпланировщика\x20типфайлаформатированногодокумента\x20обходрезультатазапроса\x20типзаписизапроса\x20видзаполнениярасшифровкипостроителяотчета\x20типдобавленияпредставлений\x20типизмеренияпостроителяотчета\x20типразмещенияитогов\x20доступкфайлу\x20режимдиалогавыборафайла\x20режимоткрытияфайла\x20типизмеренияпостроителязапроса\x20видданныханализа\x20методкластеризации\x20типединицыинтервалавременианализаданных\x20типзаполнениятаблицырезультатаанализаданных\x20типиспользованиячисловыхзначенийанализаданных\x20типисточникаданныхпоискаассоциаций\x20типколонкианализаданныхдереворешений\x20типколонкианализаданныхкластеризация\x20типколонкианализаданныхобщаястатистика\x20типколонкианализаданныхпоискассоциаций\x20типколонкианализаданныхпоискпоследовательностей\x20типколонкимоделипрогноза\x20типмерырасстоянияанализаданных\x20типотсеченияправилассоциации\x20типполяанализаданных\x20типстандартизациианализаданных\x20типупорядочиванияправилассоциациианализаданных\x20типупорядочиванияшаблоновпоследовательностейанализаданных\x20типупрощениядереварешений\x20wsнаправлениепараметра\x20вариантxpathxs\x20вариантзаписидатыjson\x20вариантпростоготипаxs\x20видгруппымоделиxs\x20видфасетаxdto\x20действиепостроителяdom\x20завершенностьпростоготипаxs\x20завершенностьсоставноготипаxs\x20завершенностьсхемыxs\x20запрещенныеподстановкиxs\x20исключениягруппподстановкиxs\x20категорияиспользованияатрибутаxs\x20категорияограниченияидентичностиxs\x20категорияограниченияпространствименxs\x20методнаследованияxs\x20модельсодержимогоxs\x20назначениетипаxml\x20недопустимыеподстановкиxs\x20обработкапробельныхсимволовxs\x20обработкасодержимогоxs\x20ограничениезначенияxs\x20параметрыотбораузловdom\x20переносстрокjson\x20позициявдокументеdom\x20пробельныесимволыxml\x20типатрибутаxml\x20типзначенияjson\x20типканоническогоxml\x20типкомпонентыxs\x20типпроверкиxml\x20типрезультатаdomxpath\x20типузлаdom\x20типузлаxml\x20формаxml\x20формапредставленияxs\x20форматдатыjson\x20экранированиесимволовjson\x20видсравнениякомпоновкиданных\x20действиеобработкирасшифровкикомпоновкиданных\x20направлениесортировкикомпоновкиданных\x20расположениевложенныхэлементоврезультатакомпоновкиданных\x20расположениеитоговкомпоновкиданных\x20расположениегруппировкикомпоновкиданных\x20расположениеполейгруппировкикомпоновкиданных\x20расположениеполякомпоновкиданных\x20расположениереквизитовкомпоновкиданных\x20расположениересурсовкомпоновкиданных\x20типбухгалтерскогоостаткакомпоновкиданных\x20типвыводатекстакомпоновкиданных\x20типгруппировкикомпоновкиданных\x20типгруппыэлементовотборакомпоновкиданных\x20типдополненияпериодакомпоновкиданных\x20типзаголовкаполейкомпоновкиданных\x20типмакетагруппировкикомпоновкиданных\x20типмакетаобластикомпоновкиданных\x20типостаткакомпоновкиданных\x20типпериодакомпоновкиданных\x20типразмещениятекстакомпоновкиданных\x20типсвязинаборовданныхкомпоновкиданных\x20типэлементарезультатакомпоновкиданных\x20расположениелегендыдиаграммыкомпоновкиданных\x20типпримененияотборакомпоновкиданных\x20режимотображенияэлементанастройкикомпоновкиданных\x20режимотображениянастроеккомпоновкиданных\x20состояниеэлементанастройкикомпоновкиданных\x20способвосстановлениянастроеккомпоновкиданных\x20режимкомпоновкирезультата\x20использованиепараметракомпоновкиданных\x20автопозицияресурсовкомпоновкиданных\x20вариантиспользованиягруппировкикомпоновкиданных\x20расположениересурсоввдиаграммекомпоновкиданных\x20фиксациякомпоновкиданных\x20использованиеусловногооформлениякомпоновкиданных\x20важностьинтернетпочтовогосообщения\x20обработкатекстаинтернетпочтовогосообщения\x20способкодированияинтернетпочтовоговложения\x20способкодированиянеasciiсимволовинтернетпочтовогосообщения\x20типтекстапочтовогосообщения\x20протоколинтернетпочты\x20статусразборапочтовогосообщения\x20режимтранзакциизаписижурналарегистрации\x20статустранзакциизаписижурналарегистрации\x20уровеньжурналарегистрации\x20расположениехранилищасертификатовкриптографии\x20режимвключениясертификатовкриптографии\x20режимпроверкисертификатакриптографии\x20типхранилищасертификатовкриптографии\x20кодировкаименфайловвzipфайле\x20методсжатияzip\x20методшифрованияzip\x20режимвосстановленияпутейфайловzip\x20режимобработкиподкаталоговzip\x20режимсохраненияпутейzip\x20уровеньсжатияzip\x20звуковоеоповещение\x20направлениепереходакстроке\x20позициявпотоке\x20порядокбайтов\x20режимблокировкиданных\x20режимуправленияблокировкойданных\x20сервисвстроенныхпокупок\x20состояниефоновогозадания\x20типподписчикадоставляемыхуведомлений\x20уровеньиспользованиязащищенногосоединенияftp\x20направлениепорядкасхемызапроса\x20типдополненияпериодамисхемызапроса\x20типконтрольнойточкисхемызапроса\x20типобъединениясхемызапроса\x20типпараметрадоступнойтаблицысхемызапроса\x20типсоединениясхемызапроса\x20httpметод\x20автоиспользованиеобщегореквизита\x20автопрефиксномеразадачи\x20вариантвстроенногоязыка\x20видиерархии\x20видрегистранакопления\x20видтаблицывнешнегоисточникаданных\x20записьдвиженийприпроведении\x20заполнениепоследовательностей\x20индексирование\x20использованиебазыпланавидоврасчета\x20использованиебыстроговыбора\x20использованиеобщегореквизита\x20использованиеподчинения\x20использованиеполнотекстовогопоиска\x20использованиеразделяемыхданныхобщегореквизита\x20использованиереквизита\x20назначениеиспользованияприложения\x20назначениерасширенияконфигурации\x20направлениепередачи\x20обновлениепредопределенныхданных\x20оперативноепроведение\x20основноепредставлениевидарасчета\x20основноепредставлениевидахарактеристики\x20основноепредставлениезадачи\x20основноепредставлениепланаобмена\x20основноепредставлениесправочника\x20основноепредставлениесчета\x20перемещениеграницыприпроведении\x20периодичностьномерабизнеспроцесса\x20периодичностьномерадокумента\x20периодичностьрегистрарасчета\x20периодичностьрегистрасведений\x20повторноеиспользованиевозвращаемыхзначений\x20полнотекстовыйпоискпривводепостроке\x20принадлежностьобъекта\x20проведение\x20разделениеаутентификацииобщегореквизита\x20разделениеданныхобщегореквизита\x20разделениерасширенийконфигурацииобщегореквизита\x20режимавтонумерацииобъектов\x20режимзаписирегистра\x20режимиспользованиямодальности\x20режимиспользованиясинхронныхвызововрасширенийплатформыивнешнихкомпонент\x20режимповторногоиспользованиясеансов\x20режимполученияданныхвыборапривводепостроке\x20режимсовместимости\x20режимсовместимостиинтерфейса\x20режимуправленияблокировкойданныхпоумолчанию\x20сериикодовпланавидовхарактеристик\x20сериикодовпланасчетов\x20сериикодовсправочника\x20созданиепривводе\x20способвыбора\x20способпоискастрокипривводепостроке\x20способредактирования\x20типданныхтаблицывнешнегоисточникаданных\x20типкодапланавидоврасчета\x20типкодасправочника\x20типмакета\x20типномерабизнеспроцесса\x20типномерадокумента\x20типномеразадачи\x20типформы\x20удалениедвижений\x20важностьпроблемыприменениярасширенияконфигурации\x20вариантинтерфейсаклиентскогоприложения\x20вариантмасштабаформклиентскогоприложения\x20вариантосновногошрифтаклиентскогоприложения\x20вариантстандартногопериода\x20вариантстандартнойдатыначала\x20видграницы\x20видкартинки\x20видотображенияполнотекстовогопоиска\x20видрамки\x20видсравнения\x20видцвета\x20видчисловогозначения\x20видшрифта\x20допустимаядлина\x20допустимыйзнак\x20использованиеbyteordermark\x20использованиеметаданныхполнотекстовогопоиска\x20источникрасширенийконфигурации\x20клавиша\x20кодвозвратадиалога\x20кодировкаxbase\x20кодировкатекста\x20направлениепоиска\x20направлениесортировки\x20обновлениепредопределенныхданных\x20обновлениеприизмененииданных\x20отображениепанелиразделов\x20проверказаполнения\x20режимдиалогавопрос\x20режимзапускаклиентскогоприложения\x20режимокругления\x20режимоткрытияформприложения\x20режимполнотекстовогопоиска\x20скоростьклиентскогосоединения\x20состояниевнешнегоисточникаданных\x20состояниеобновленияконфигурациибазыданных\x20способвыборасертификатаwindows\x20способкодированиястроки\x20статуссообщения\x20типвнешнейкомпоненты\x20типплатформы\x20типповеденияклавишиenter\x20типэлементаинформацииовыполненииобновленияконфигурациибазыданных\x20уровеньизоляциитранзакций\x20хешфункция\x20частидаты','MenuEvaluator','AbsolutePointSize','eep','locale_name','iany','DateTicksFormat','RightTee','#Date\x20#Month','ProcessInformation','SolveAlways','#Verb\x20[','StringRiffle','esplit','(?:','module_path!','AsymptoticGreaterEqual','paragraph','\x5cb([1-9][0-9_]*[0-9]|[0-9])(\x5c.[0-9][0-9_]*)?([eE]_?[-+]?[0-9_]*)?(_?f(32|64))?(?!_)','ShowShortBoxForm','EulerGamma','secondaryWeaponMagazine','PlotTheme','RemoteConnect','StringToByteArray','view_xport','form_widget','counter-reset','VectorLessEqual','FieldHintStyle','arcsin','EqualTilde','buses','array_resize','Here\x27s\x20something\x20I\x20wanted\x20to\x20share\x20with\x20you','LiteralSearch','font','getDoc','\x27[A-Za-z](_?[A-Za-z0-9])*','agent','#elseif','human','⫮','i-mean','LocatorRegion','MapAt','pool','NUMBER_RE','oil','getObjectMaterials','#NumericValue\x20#PhoneNumber','game_save',')+(-)[\x5cw\x5cd]+','𝕢','fun','assignedDriver','PlanarGraphQ','(AR|P|PAYLOAD|PR|R|SR|RSR|LBL|VR|UALM|MESSAGE|UTOOL|UFRAME|TIMER|TIMER_OVERFLOW|JOINT_MAX_SPEED|RESUME_PROG|DIAG_REC)\x5c[','ads_get_display_width','FeedbackLinearize','set','speed','formatpercent','RegionCongruent','insertAfter','⥋','audio_falloff_none','NotebooksMenu','UniformSumDistribution','$dist_exponential','shader_set','hmd','(LinkifyIt)\x20Invalid\x20schema\x20\x22','HararyGraph','physics_apply_force','Image','fsep','Insphere','ContinuedFractionK','currentThrowable','$EntityStores','ordered_list_close','get_login_async','variants','GaugeFaceStyle','[#Hyphenated\x20(#Hyphenated\x20&&\x20#PastTense)]\x20(#Noun|#Conjunction)','FindHiddenMarkovStates','$Canceled','RomanNumeral','LaTeX','RecursiveCallbackFilterIterator','GeometricDistribution','audio_stop_all','ListVectorDisplacementPlot3D','achievement_leaderboard_info','winphone_tile_back_image_wide','ANSI_CHARSET','musicVolume','mix-blend-mode','seven','normalizeLink','gpu_set_lightingenable','goNext','pt_shape_snow','diag_exportTerrainSVG','volatile','ibits','UnsavedVariables','preloadTitleObj','#Noun\x20[#Determiner]','isHidden','HYPERLINK','$printtimescale','child::','⪢','Texture','template','Periodogram','axapta','OutputGrouping','ListSliceVectorPlot3D','sus','Break','AudioNormalize','⫂','simulationEnabled','RightUpVector','(#Possessive|#Organization|#Place|#Pronoun|@hasTitleCase)','ctrlSetEventHandler','ung','Cylinder','GridBoxOptions','$NewSymbol','\x1b[32m','only-of-type','Pivot','Throwable','percent','𝔮','complex_schur_decompose_t','createSimpleTask','exceedingly','u-pronoun-2','readBytes','CombinatorS','foreign\x20import\x20export\x20ccall\x20stdcall\x20cplusplus\x20jvm\x20dotnet\x20safe\x20unsafe','mikrotik','shadow','(#=>|=>|\x5c|>>|-?->|!->)','emoji-class','given-up-on-x','FactorTerms','for\x20some\x20[#PresentTense]','pure','SixJSymbol','Int8Array','c_long','been-walking','IntegerPart','noun-of-noun','𝕟','SetDateSave','overlap','IntegerDigits','SplMinHeap','MaxWordGap','DNS\x20Zone','move_alloc','title-van-title','#\x5cs+','around','macCatalyst','def','part_type_colour_mix','(were|was)\x20being\x20[#PresentTense]','⊬','ds_map_replace_map','aspect-ratio','toLocaleDateString','TBILLEQ','ds_grid_add_region','SequenceIndicesLayer','ConjugateTranspose','article','#Gerund\x20(giant|capital|center|zone|application)','ChromaticPolynomial','^(was|were)\x20being\x20#Passive','DOLLARFR','steam_activate_overlay_user','vitro','splitEachLine','RegionSize','DoubleLeftTee','$Echo','ev_alarm','Postfix','ctrlEnable','LocalClusteringCoefficient','⨦','QFactorial','ev_user12','gpu_set_blendmode_ext','GridBoxItemStyle','AudioSplit','path_add','$future_gclk','forceselectorder','RectangleBox','DateDelimiters','ComplexListPlot','only-child','being-born','loadpicture','⪯̸','ї','variable_global_set','logNetwork','StepMonitor','add_diag','thead_close','AutoItalicWords','Ń','getnetbyname','ColorCoverage','putchar','agence','SUMXMY2','HTTP\x20error!\x20Status:\x20','∗','[#Noun+]\x20#Actor','AngelScript','LinkMode','formatted','cr_drag','his','(taught|teaches|learns|learned)\x20[#PresentTense]','TimeDirection','#Value\x20#Value\x20#Value','ImportAutoReplacements','city','mid','StyleBox','observe','vertex_usage_depth','achievement_filter_all_players','fixs','tbl','augment','ODD','import\x20qualified\x20as\x20hiding','min-block-size','shared_future','GeoBoundingBox','insert','PreIncrement','userInputDisabled','high-enough','tvExpandAll','PRINT]','DiscreteAsymptotic','$fwriteo','number_format','з','neg_binomial','nearestLocations','scroll-padding','isIRLaserOn','layer_force_draw_depth','isMultiplayerSolo','late','\x5c$[0-9A-Fa-f]+','VerifySolutions','transform-box','promise','tac','RemoveAlphaChannel','dirty','Ltac','effect_clear','⋟','firstTerms','SquaresR','associatedtype','BoxObject','modelToWorldVisualWorld','#Acronym','ev_collision','AddSubString\x20AdjustLineBreaks\x20AmountInWords\x20Analysis\x20ArrayDimCount\x20ArrayHighBound\x20ArrayLowBound\x20ArrayOf\x20ArrayReDim\x20Assert\x20Assigned\x20BeginOfMonth\x20BeginOfPeriod\x20BuildProfilingOperationAnalysis\x20CallProcedure\x20CanReadFile\x20CArrayElement\x20CDataSetRequisite\x20ChangeDate\x20ChangeReferenceDataset\x20Char\x20CharPos\x20CheckParam\x20CheckParamValue\x20CompareStrings\x20ConstantExists\x20ControlState\x20ConvertDateStr\x20Copy\x20CopyFile\x20CreateArray\x20CreateCachedReference\x20CreateConnection\x20CreateDialog\x20CreateDualListDialog\x20CreateEditor\x20CreateException\x20CreateFile\x20CreateFolderDialog\x20CreateInputDialog\x20CreateLinkFile\x20CreateList\x20CreateLock\x20CreateMemoryDataSet\x20CreateObject\x20CreateOpenDialog\x20CreateProgress\x20CreateQuery\x20CreateReference\x20CreateReport\x20CreateSaveDialog\x20CreateScript\x20CreateSQLPivotFunction\x20CreateStringList\x20CreateTreeListSelectDialog\x20CSelectSQL\x20CSQL\x20CSubString\x20CurrentUserID\x20CurrentUserName\x20CurrentVersion\x20DataSetLocateEx\x20DateDiff\x20DateTimeDiff\x20DateToStr\x20DayOfWeek\x20DeleteFile\x20DirectoryExists\x20DisableCheckAccessRights\x20DisableCheckFullShowingRestriction\x20DisableMassTaskSendingRestrictions\x20DropTable\x20DupeString\x20EditText\x20EnableCheckAccessRights\x20EnableCheckFullShowingRestriction\x20EnableMassTaskSendingRestrictions\x20EndOfMonth\x20EndOfPeriod\x20ExceptionExists\x20ExceptionsOff\x20ExceptionsOn\x20Execute\x20ExecuteProcess\x20Exit\x20ExpandEnvironmentVariables\x20ExtractFileDrive\x20ExtractFileExt\x20ExtractFileName\x20ExtractFilePath\x20ExtractParams\x20FileExists\x20FileSize\x20FindFile\x20FindSubString\x20FirmContext\x20ForceDirectories\x20Format\x20FormatDate\x20FormatNumeric\x20FormatSQLDate\x20FormatString\x20FreeException\x20GetComponent\x20GetComponentLaunchParam\x20GetConstant\x20GetLastException\x20GetReferenceRecord\x20GetRefTypeByRefID\x20GetTableID\x20GetTempFolder\x20IfThen\x20In\x20IndexOf\x20InputDialog\x20InputDialogEx\x20InteractiveMode\x20IsFileLocked\x20IsGraphicFile\x20IsNumeric\x20Length\x20LoadString\x20LoadStringFmt\x20LocalTimeToUTC\x20LowerCase\x20Max\x20MessageBox\x20MessageBoxEx\x20MimeDecodeBinary\x20MimeDecodeString\x20MimeEncodeBinary\x20MimeEncodeString\x20Min\x20MoneyInWords\x20MoveFile\x20NewID\x20Now\x20OpenFile\x20Ord\x20Precision\x20Raise\x20ReadCertificateFromFile\x20ReadFile\x20ReferenceCodeByID\x20ReferenceNumber\x20ReferenceRequisiteMode\x20ReferenceRequisiteValue\x20RegionDateSettings\x20RegionNumberSettings\x20RegionTimeSettings\x20RegRead\x20RegWrite\x20RenameFile\x20Replace\x20Round\x20SelectServerCode\x20SelectSQL\x20ServerDateTime\x20SetConstant\x20SetManagedFolderFieldsState\x20ShowConstantsInputDialog\x20ShowMessage\x20Sleep\x20Split\x20SQL\x20SQL2XLSTAB\x20SQLProfilingSendReport\x20StrToDate\x20SubString\x20SubStringCount\x20SystemSetting\x20Time\x20TimeDiff\x20Today\x20Transliterate\x20Trim\x20UpperCase\x20UserStatus\x20UTCToLocalTime\x20ValidateXML\x20VarIsClear\x20VarIsEmpty\x20VarIsNull\x20WorkTimeDiff\x20WriteFile\x20WriteFileEx\x20WriteObjectHistory\x20Анализ\x20БазаДанных\x20БлокЕсть\x20БлокЕстьРасш\x20БлокИнфо\x20БлокСнять\x20БлокСнятьРасш\x20БлокУстановить\x20Ввод\x20ВводМеню\x20ВедС\x20ВедСпр\x20ВерхняяГраницаМассива\x20ВнешПрогр\x20Восст\x20ВременнаяПапка\x20Время\x20ВыборSQL\x20ВыбратьЗапись\x20ВыделитьСтр\x20Вызвать\x20Выполнить\x20ВыпПрогр\x20ГрафическийФайл\x20ГруппаДополнительно\x20ДатаВремяСерв\x20ДеньНедели\x20ДиалогДаНет\x20ДлинаСтр\x20ДобПодстр\x20ЕПусто\x20ЕслиТо\x20ЕЧисло\x20ЗамПодстр\x20ЗаписьСправочника\x20ЗначПоляСпр\x20ИДТипСпр\x20ИзвлечьДиск\x20ИзвлечьИмяФайла\x20ИзвлечьПуть\x20ИзвлечьРасширение\x20ИзмДат\x20ИзменитьРазмерМассива\x20ИзмеренийМассива\x20ИмяОрг\x20ИмяПоляСпр\x20Индекс\x20ИндикаторЗакрыть\x20ИндикаторОткрыть\x20ИндикаторШаг\x20ИнтерактивныйРежим\x20ИтогТблСпр\x20КодВидВедСпр\x20КодВидСпрПоИД\x20КодПоAnalit\x20КодСимвола\x20КодСпр\x20КолПодстр\x20КолПроп\x20КонМес\x20Конст\x20КонстЕсть\x20КонстЗнач\x20КонТран\x20КопироватьФайл\x20КопияСтр\x20КПериод\x20КСтрТблСпр\x20Макс\x20МаксСтрТблСпр\x20Массив\x20Меню\x20МенюРасш\x20Мин\x20НаборДанныхНайтиРасш\x20НаимВидСпр\x20НаимПоAnalit\x20НаимСпр\x20НастроитьПереводыСтрок\x20НачМес\x20НачТран\x20НижняяГраницаМассива\x20НомерСпр\x20НПериод\x20Окно\x20Окр\x20Окружение\x20ОтлИнфДобавить\x20ОтлИнфУдалить\x20Отчет\x20ОтчетАнал\x20ОтчетИнт\x20ПапкаСуществует\x20Пауза\x20ПВыборSQL\x20ПереименоватьФайл\x20Переменные\x20ПереместитьФайл\x20Подстр\x20ПоискПодстр\x20ПоискСтр\x20ПолучитьИДТаблицы\x20ПользовательДополнительно\x20ПользовательИД\x20ПользовательИмя\x20ПользовательСтатус\x20Прервать\x20ПроверитьПараметр\x20ПроверитьПараметрЗнач\x20ПроверитьУсловие\x20РазбСтр\x20РазнВремя\x20РазнДат\x20РазнДатаВремя\x20РазнРабВремя\x20РегУстВрем\x20РегУстДат\x20РегУстЧсл\x20РедТекст\x20РеестрЗапись\x20РеестрСписокИменПарам\x20РеестрЧтение\x20РеквСпр\x20РеквСпрПр\x20Сегодня\x20Сейчас\x20Сервер\x20СерверПроцессИД\x20СертификатФайлСчитать\x20СжПроб\x20Символ\x20СистемаДиректумКод\x20СистемаИнформация\x20СистемаКод\x20Содержит\x20СоединениеЗакрыть\x20СоединениеОткрыть\x20СоздатьДиалог\x20СоздатьДиалогВыбораИзДвухСписков\x20СоздатьДиалогВыбораПапки\x20СоздатьДиалогОткрытияФайла\x20СоздатьДиалогСохраненияФайла\x20СоздатьЗапрос\x20СоздатьИндикатор\x20СоздатьИсключение\x20СоздатьКэшированныйСправочник\x20СоздатьМассив\x20СоздатьНаборДанных\x20СоздатьОбъект\x20СоздатьОтчет\x20СоздатьПапку\x20СоздатьРедактор\x20СоздатьСоединение\x20СоздатьСписок\x20СоздатьСписокСтрок\x20СоздатьСправочник\x20СоздатьСценарий\x20СоздСпр\x20СостСпр\x20Сохр\x20СохрСпр\x20СписокСистем\x20Спр\x20Справочник\x20СпрБлокЕсть\x20СпрБлокСнять\x20СпрБлокСнятьРасш\x20СпрБлокУстановить\x20СпрИзмНабДан\x20СпрКод\x20СпрНомер\x20СпрОбновить\x20СпрОткрыть\x20СпрОтменить\x20СпрПарам\x20СпрПолеЗнач\x20СпрПолеИмя\x20СпрРекв\x20СпрРеквВведЗн\x20СпрРеквНовые\x20СпрРеквПр\x20СпрРеквПредЗн\x20СпрРеквРежим\x20СпрРеквТипТекст\x20СпрСоздать\x20СпрСост\x20СпрСохранить\x20СпрТблИтог\x20СпрТблСтр\x20СпрТблСтрКол\x20СпрТблСтрМакс\x20СпрТблСтрМин\x20СпрТблСтрПред\x20СпрТблСтрСлед\x20СпрТблСтрСозд\x20СпрТблСтрУд\x20СпрТекПредст\x20СпрУдалить\x20СравнитьСтр\x20СтрВерхРегистр\x20СтрНижнРегистр\x20СтрТблСпр\x20СумПроп\x20Сценарий\x20СценарийПарам\x20ТекВерсия\x20ТекОрг\x20Точн\x20Тран\x20Транслитерация\x20УдалитьТаблицу\x20УдалитьФайл\x20УдСпр\x20УдСтрТблСпр\x20Уст\x20УстановкиКонстант\x20ФайлАтрибутСчитать\x20ФайлАтрибутУстановить\x20ФайлВремя\x20ФайлВремяУстановить\x20ФайлВыбрать\x20ФайлЗанят\x20ФайлЗаписать\x20ФайлИскать\x20ФайлКопировать\x20ФайлМожноЧитать\x20ФайлОткрыть\x20ФайлПереименовать\x20ФайлПерекодировать\x20ФайлПереместить\x20ФайлПросмотреть\x20ФайлРазмер\x20ФайлСоздать\x20ФайлСсылкаСоздать\x20ФайлСуществует\x20ФайлСчитать\x20ФайлУдалить\x20ФмтSQLДат\x20ФмтДат\x20ФмтСтр\x20ФмтЧсл\x20Формат\x20ЦМассивЭлемент\x20ЦНаборДанныхРеквизит\x20ЦПодстр\x20','UpEquilibrium','SubstitutionSystem','^#{1,6}','WebSessionObject','NETWORKDAYS','(e|E|u&|U&)\x27','curatorCameraAreaCeiling','translate-x-full','ShannonWavelet','$coverage_get_max','store','[(be|been)]\x20(#Adverb|not)+?\x20#Gerund','ID\x20is\x20required\x20for\x20updating\x20content.','categorical_logit_glm','ShowWindow','delay_mode_unit','datastore','\x5cb(addi?u?|andi?|b(al)?|beql?|bgez(al)?l?|bgtzl?|blezl?|bltz(al)?l?|bnel?|cl[oz]|divu?|ext|ins|j(al)?|jalr(\x5c.hb)?|jr(\x5c.hb)?|lbu?|lhu?|ll|lui|lw[lr]?|maddu?|mfhi|mflo|movn|movz|move|msubu?|mthi|mtlo|mul|multu?|nop|nor|ori?|rotrv?|sb|sc|se[bh]|sh|sllv?|slti?u?|srav?|srlv?|subu?|sw[lr]?|xori?|wsbh|abs\x5c.[sd]|add\x5c.[sd]|alnv.ps|bc1[ft]l?|c\x5c.(s?f|un|u?eq|[ou]lt|[ou]le|ngle?|seq|l[et]|ng[et])\x5c.[sd]|(ceil|floor|round|trunc)\x5c.[lw]\x5c.[sd]|cfc1|cvt\x5c.d\x5c.[lsw]|cvt\x5c.l\x5c.[dsw]|cvt\x5c.ps\x5c.s|cvt\x5c.s\x5c.[dlw]|cvt\x5c.s\x5c.p[lu]|cvt\x5c.w\x5c.[dls]|div\x5c.[ds]|ldx?c1|luxc1|lwx?c1|madd\x5c.[sd]|mfc1|mov[fntz]?\x5c.[ds]|msub\x5c.[sd]|mth?c1|mul\x5c.[ds]|neg\x5c.[ds]|nmadd\x5c.[ds]|nmsub\x5c.[ds]|p[lu][lu]\x5c.ps|recip\x5c.fmt|r?sqrt\x5c.[ds]|sdx?c1|sub\x5c.[ds]|suxc1|swx?c1|break|cache|d?eret|[de]i|ehb|mfc0|mtc0|pause|prefx?|rdhwr|rdpgpr|sdbbp|ssnop|synci?|syscall|teqi?|tgei?u?|tlb(p|r|w[ir])|tlti?u?|tnei?|wait|wrpgpr)','bezierPoint','louse','gesture_get_drag_distance','VolcanoData','LeastSquares','host','TickLabelPositioning','true¦0:2E;1:2L;2:33;a2Ub2Lc29d22e1Rf1Ng1Eh16i11j0Yk0Wl0Rm0Hn0Do0Cp03rZsLt9uran2Jv7w3you\x20gu0E;a5his17i4oo3;d,l;ldlife,ne;rm8t1;apor,ernacul29i3;neg28ol1Otae;eDhBiAo8r4un3yranny;a,gst1B;aff2Oea1Ko4ue\x20nor3;th;o08u3;bleshoot2Ose1Tt;night,othpas1Vwn3;foEsfoE;me\x20off,n;er3und1;e,mod2S;a,nnis;aDcCeBhAi9ki8o7p6t4u3weepstak0;g1Unshi2Hshi;ati08e3;am,el;ace2Keci0;ap,cc1meth2C;n,ttl0;lk;eep,ingl0or1C;lf,na1Gri0;ene1Kisso1C;d0Wfe2l4nd,t3;i0Iurn;m1Ut;abi0e4ic3;e,ke15;c3i01laxa11search;ogni10rea10;a9e8hys7luto,o5re3ut2;amble,mis0s3ten20;en1Zs0L;l3rk;i28l0EyH;\x2016i28;a24tr0F;nt3ti0M;i0s;bstetri24vercrowd1Qxyg09;a5e4owada3utella;ys;ptu1Ows;il\x20poliZtional\x20securi2;aAe8o5u3;m3s1H;ps;n3o1K;ey,o3;gamy;a3cha0Elancholy,rchandi1Htallurgy;sl0t;chine3g1Aj1Hrs,thema1Q;\x20learn1Cry;aught1e6i5ogi4u3;ck,g12;c,s1M;ce,ghtn18nguis1LteratWv1;ath1isVss;ara0EindergartPn3;icke0Aowled0Y;e3upit1;a3llyfiGwel0G;ns;ce,gnor6mp5n3;forma00ter3;net,sta07;atiSort3rov;an18;a7e6isto09o3ung1;ckey,mework,ne4o3rseradi8spitali2use\x20arrest;ky;s2y;adquarteXre;ir,libut,ppiHs3;hi3te;sh;ene8l6o5r3um,ymnas11;a3eZ;niUss;lf,re;ut3yce0F;en;\x203ti0W;edit0Hpo3;ol;aNicFlour,o4urnit3;ure;od,rgive3uri1wl;ness;arCcono0LducaBlectr9n7quip8thi0Pvery6x3;ist4per3;ti0B;en0J;body,o08th07;joy3tertain3;ment;ici2o3;ni0H;tiS;nings,th;emi02i6o4raugh3ynas2;ts;pe,wnstai3;rs;abet0ce,s3;honZrepu3;te;aDelciChAivi07l8o3urrency;al,ld\x20w6mmenta5n3ral,ttIuscoB;fusiHt\x203;ed;ry;ar;assi01oth0;es;aos,e3;eMwK;us;d,rO;a8i6lood,owlHread5u3;ntGtt1;er;!th;lliarJs3;on;g3ss;ga3;ge;cKdviJeroGirFmBn6ppeal\x20court,r4spi3thleL;rin;ithmet3sen3;ic;i6y3;o4th3;ing;ne;se;en5n3;es2;ty;ds;craft;bi8d3nau7;yna3;mi6;ce;id,ous3;ti3;cs','iap_status_available','convenience','IndentMaxFraction','^facet\x20','hintSilent','$CurrentWebSession','noexcept','physics_fixture_set_density','HammingWindow','ain\x27t','$1es','putc','⊫','view_vborder','grammar-error','Comparable','buldozer_IsEnabledRoadDiag','z-index','AGGREGATE','𝔏','Wire','hyphens','value-bucks','⪸','⊁','AUTO|0','SelectionMove','(#Pronoun|#Person)\x20(had|#Adverb)?\x20[better]\x20#PresentTense','DoubleContourIntegral','FinancialData','ClassPriors','.overlay-settings','FILE_ATTRIBUTE_ARCHIVE','sens','camCommand','transform-style','@[A-z0-9_]+','being-driven','PaddingLayer','speaker','(\x5c([^()]*(\x5c([^()]*(\x5c([^()]*\x5c)[^()]*)*\x5c)[^()]*)*\x5c)|','ctrlStyle','TOOL_OFFSET','Delimiter','ConicHullRegion','hcLeader','ds_queue_dequeue','cache\x20database_names\x20database_schemanames\x20database_tablenames\x20define_tag\x20define_type\x20email_batch\x20encode_set\x20html_comment\x20handle\x20handle_error\x20header\x20if\x20inline\x20iterate\x20ljax_target\x20link\x20link_currentaction\x20link_currentgroup\x20link_currentrecord\x20link_detail\x20link_firstgroup\x20link_firstrecord\x20link_lastgroup\x20link_lastrecord\x20link_nextgroup\x20link_nextrecord\x20link_prevgroup\x20link_prevrecord\x20log\x20loop\x20namespace_using\x20output_none\x20portal\x20private\x20protect\x20records\x20referer\x20referrer\x20repeating\x20resultset\x20rows\x20search_args\x20search_arguments\x20select\x20sort_args\x20sort_arguments\x20thread_atomic\x20value_list\x20while\x20abort\x20case\x20else\x20fail_if\x20fail_ifnot\x20fail\x20if_empty\x20if_false\x20if_null\x20if_true\x20loop_abort\x20loop_continue\x20loop_count\x20params\x20params_up\x20return\x20return_value\x20run_children\x20soap_definetag\x20soap_lastrequest\x20soap_lastresponse\x20tag_name\x20ascending\x20average\x20by\x20define\x20descending\x20do\x20equals\x20frozen\x20group\x20handle_failure\x20import\x20in\x20into\x20join\x20let\x20match\x20max\x20min\x20on\x20order\x20parent\x20protected\x20provide\x20public\x20require\x20returnhome\x20skip\x20split_thread\x20sum\x20take\x20thread\x20to\x20trait\x20type\x20where\x20with\x20yield\x20yieldhome','#Copula\x20(pretty|dead|full|well|sure)\x20(#Adjective|#Noun)','getNth','lbData','CirculantGraph','p-4\x20md:p-6\x20lg:p-8\x20overflow-auto','updateDrawIcon','date_get_timezone','cjs','enter-btn','environment_get_variable','abbr_class','loadAbs','(^|(?![.:/\x5c-_@])(?:[$+<=>^`||]|','sput','DiscretePlot3D','estuary','Map','LogitModelFit','script_exists','IncludeFileExtension','parents','$cast','DeleteSearchIndex','browser_windows_store','ctrlAngle','baseNameOf','setText','»','_|0','Flatten','readwrite','џ','FindGraphIsomorphism','dclose','ttsabort','layer_get_x','vertex_format_add_normal','Differences','Ю','form_theme','physics_particle_group_add_point','BinLists','GetCurInstType','\x5cb(?!','Small','VerticalBar','integrate_1d','WolframAlphaDate','window_center','[{;]','src_xn','Ė','LeftTeeArrow','Given\x20the\x20following\x20BACKGROUND_KNOWLEDGE,\x20and\x20a\x20student\x27s\x20QUESTION,\x20answer\x20the\x20QUESTION.\x20You\x20may\x20use\x20the\x20\x20BACKGROUND_KNOWLEDGE\x20as\x20reference.\x20Otherwise,\x20you\x20can\x20be\x20creative\x20and\x20respond\x20in\x20a\x20way\x20that\x20always\x20ensure\x20you\x20MUST\x20answer\x20the\x20question.\x20For\x20important\x20keywords\x20or\x20phrases\x20in\x20your\x20response\x20that\x20also\x20have\x20wikipedia\x20pages,\x20you\x20must\x20surround\x20them\x20with\x20double\x20slashes.\x20For\x20example\x20\x27\x5cmachine\x20learning\x5c\x27.\x20Each\x20response\x20MUST\x20have\x20at\x20least\x20two\x20keywords\x20or\x20phrases.\x20At\x20the\x20end\x20of\x20your\x20response,\x20place\x20\x27%%%\x27\x203\x20percent\x20signs,\x20and\x20then\x20write\x202\x20to\x203\x20followup\x20questions\x20that\x20the\x20user\x20might\x20ask\x20to\x20expand\x20their\x20knowledge.\x20Return\x20as\x20markdown','iap_storeload_failed','AstroGraphics','path_get_x','⋐','mixin|10','ODDFPRICE','alumni','define','For','EmbeddingLayer','TrackDistanceAt','Links','TreeSize','$right','instance_number','SequenceReplace','TranslationTransform','SmallCircle','ScheduledTask','“”\x22❝❞','GetFeatureSet','getservbyname','gp_axisrh','NoncentralBetaDistribution','HyperbolicDistribution','TEMP','lol','╙','$AudioEncoders','really-mark','vk_f10','$SoundDisplay','▫','th_open','SearchIndexObject','caret-color','border-inline-color','winphone_tile_back_content_wide','FORECAST.LINEAR','SinIntegral','dlgama','≭','geq','jinja','TAB','possessives','matrix_stack_multiply','DefaultFontProperties','rify','AcceptanceThreshold','VoigtDistribution','path_orientation','TubeBezierCurveBoxOptions','Undelimit','house','swamp','ds_grid_height','lnbSetColumnsPos','current_time','resources','FaceGridsStyle','layer_tile_alpha','startScope','NetEvaluationMode','gpu_get_blendmode','Url','or-heightened-emotion','ϖ','ButtonFrame','PolygonCoordinates','prognoses','CloudExport','MixtureDistribution','Previous','Nothing','HelpViewerSettings','display_set_gui_size','s_until_with','isa','nee','police','severity','figcaption','text','ds_grid_add_grid_region','audio_exists','\x5c|#','\x5c[no_square_brackets','CoxIngersollRossProcess','rank','septillion','TransitiveReductionGraph','atomic_noexcept','errors','units','buffer_u64','numeric','FilledCurveBoxOptions','\x5cs*[\x5c$\x5cw_][\x5cw_-]*','nextMenuItemIndex','\x27\x5c\x5c?.','magazinesAmmoFull','failTo','url_open_full','FormatRules','SpellingCorrection','NotImplemented','steam_ugc_query_set_match_any_tag','lchoose','(0[xX](','diag_activeSQFScripts','$PipeSupported','CVD','ToDiscreteTimeModel','contra','cancelSimpleTaskDestination','AudioGenerator','abstracts','case_eq','$InitializationContexts','setVehicleReportOwnPosition','Evaluated','poisson','MapIndexed','RightComposition','valid','vk_return','DimensionalCombinations','date_current_datetime','mixin','fieldset','missionNameSource','3-[fallback]','breakpoint','▽','DAYS360','AxiomaticTheory','⌍','audio_sync_group_get_track_pos','dfn','strrchr','VideoTrim','INTERNAL2V56','FileInformation','and\x20#Value\x20#Fraction','frant','WiFiClient','keys','idnint','change','QnDispersion','dasin','[object\x20Object]','tastes-good','comparguments','revealed','_walk','Select','TabViewBoxOptions','MixedUnit','scoreSide','gpu_set_tex_max_aniso','Unequal','TreeLeafQ','LCHColor','ctrlSetFontH6B','д','BlackmanNuttallWindow','PersistenceLocation','LoadLanguageFile','String','DirichletCharacter','inv_wishart','pageY','bm_dest_color','ResourceSubmit','Declare','VideoStop','```+[\x20]*$','IdentityMatrix','cov_matrix|10','⤃','FindHamiltonianCycle','LearningRate','WaveletImagePlot','helpers','LucasL','ropeDetach','env','form-action','QuantityMagnitude','argument8','CellLabelPositioning','TrimmedMean','Min','vk_escape','private_constant','dims','curatorRegisteredObjects','MemberQ','ScaleRangeStyle','default','month','𝒦','showCompass','work','setycomp','Skewness','Wednesday','NetworkPacketTrace','audio_3d','ctrlSetChecked','AsynchronousTasks','physics_world_create','airportSide','≪̸','ropesAttachedTo','ªÀÁÂÃÄÅàáâãäåĀāĂ㥹ǍǎǞǟǠǡǺǻȀȁȂȃȦȧȺΆΑΔΛάαλАаѦѧӐӑӒӓƛæ','sysopen','MeshConnectivityGraph','GeoGridLinesStyle','cexist','Alignment','Bold','SubClass','IMCSCH','DESKTOP','clickable_change_ext','$RootDirectory','SystemGet','cornish','\x5cn/{4,}$','FitAll','FrontEndValueCache','Text3DBox','#FirstName\x20[#Determiner\x20#Noun]\x20#LastName','Firmata','(800)\x20PhoneNumber','asin','win8_search_add_suggestions','vk_f3','DirichletCondition','TextureCoordinateFunction','⊹','^[#Infinitive]\x20it\x20(please|now|again|plz)','NumericHex','add','ѓ','GegenbauerC','was','ds_grid_create','1pm-sun','¬','passthrough','POINT','HomePage','Checkbox','ClearSystemCache','tvSetSelected','Databins','TildeFullEqual','enableAudioFeature','twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|fourty','marginBottom','ToContinuousTimeModel','\x5cbproperty\x5cb','assert_eq!','physics_joint_rope_create','HasKey','(\x27|\x5c.\x27)+','CelestialSystem','⊅','adj-presentTense','view_get_yport','/?>','YEARFRAC','EdgeCycleMatrix','ifnull','(#Particle|#PhrasalVerb)','))?\x5cb','[\x5c.#:&\x5c[>]','SLN','display_mouse_get_x','〈','^dn(?=:)','maxWidth','ChebyshevDistance','eightieth','setOpticsMode','achievement_load_friends','physics_particle_set_category_flags','organizations','locally','string_insert','GrammarApply','add\x20remove\x20enable\x20disable\x20set\x20get\x20print\x20export\x20edit\x20find\x20run\x20debug\x20error\x20info\x20warning','AggregatedEntityClass','OptionsPacket','gesture_get_drag_time','endclass','CompoundExpression','singly','emphasis','(?:re)newenvironment','ctAddHeader','pushPending','⟵','MittagLefflerE','named-character','(have|has|had)\x20been','will\x20be','(impersonating|practicing|considering|assuming)','steam_ugc_query_set_return_long_description','vertex_color','isValid','classNameAliases','TaskExecute','left-her-boots','inside','ev_step_normal','vk_up','\x5cs*(:=|;|\x5c)|=>|$)','onDoubleClick','Coefficient','GroupElements','xs:int','PowerModList','prefecture','CompilerOptions','Geometry','keyboard_key','justify-content','phy_particle_data_flag_colour','𝕋','went','\x5c*(\x5c.[a-z\x5c-]+)+','LinkReadHeld','true¦c5lets,m4ought3sh1w0;ill,o5;a0o4;ll,nt;!\x20to,a;ight,ust;an,o0;uld','radix','$cos','getPlayerChannel','IfErrors','CounterEvaluator','Grid','∻','playMission','NAME','DATEVALUE','vectorMagnitude','\x5c$[0-9][0-9A-Fa-f]*','AxesStyle','TakeLargestBy','surface_set_target_ext','ListPickerBoxOptions','$strobe','gp_padd','caller','[A-Za-z_]\x5cw*(::\x5cw+)*(\x5c?|!)?','layer_sprite_xscale','value-of-month','detail','sun-the-5th','⨿','gpu_get_blendenable','moveInGunner','LinkFunction','TemplateArgBox','Ρ','menuChecked','Backslash','^(is|are|am)\x20#Gerund$','uninstConfirm','i\x20(#Adverb|do)?\x20not?\x20[mean]','RegExp','routeros','mint','RootTree','DimensionReduce','view_get_visible','physics_fixture_set_circle_shape','Struckthrough','UnhandledMatchError','mouse_clear','FontColor','tpl_host_fuzzy_test','[A-Za-zА-Яа-яёЁ_][A-Za-zА-Яа-яёЁ_0-9]+','⇘','JavaScript','ResponseForm','FilterIterator','library','IncludeSingularSolutions','vectorDistanceSqr','park','setWindForce','wants','MandelbrotSetMemberQ','shader_current','(is|was|am|are|will|were|be)','text-align:','press','remoteExecCall','modifies','unassignItem','#Copula','win8_secondarytile_pin','gpu_set_colorwriteenable','rehash','honorific','utime','jthread','≧̸','c_olive','co_yield','vk_delete','vinformatwx','NumberFieldDiscriminant','td_close','0[xX]','reference','¨','^[\x20]*\x5c.[a-zA-Z]*','MeshCells','maskr','SystemError','debugPrint','transformed','RESET|0','#Plural\x20on\x20[#Infinitive]','(^|[><|]|\x22|\x5c(|','getBleedingRemaining','gauss','triggerStatements','Directory','lnbData','(//|\x22|#|/\x5c*|\x5cs+/\x5cw+)','LeftRightArrow','encodeURI','style','shownMap','works-for-me','Ś','elemental','!%&*+-/<=>@^|~?','Dashed','lcase','StringQ','clear','EXP','RadonTransform','FieldCompletionFunction','nothing','Existential','univ','ImageCacheValid','RegionPlot','EdgeRules','INTERCEPT','ContinuedFraction','observatory','(?![a-zA-Z@:_])','addWeaponWithAttachmentsCargo','highest','ROUND','a-warhol','PShape','rep','file_text_open_read','PropertyList','EuclideanDistance','random_set_seed','setLocalWindParams','ChromaKey','gas','assurance','distanceSqr','OpenerView','GeometricTest','BINARY_NUMBER_MODE',')[jJ]?(?=','toPresent','whims','border-block-style','GaugeLabels','draw_set_alpha_test_ref_value','jldoctest','is_bool','ÿ','TelegraphProcess','$UserBaseDirectory','quad_form_sym','vk_decimal','camera_get_end_script','errorlevel','will\x20#Adverb?\x20not?\x20#Adverb?\x20[be]\x20#Adjective','getEditorCamera','GeneratedAssetFormat','XIRR','selectWeaponTurret','Coarse','TextSearchReport','macroend','inner','CylindricalDecompositionFunction','std-time','Ê','LegendLabel','date_leap_year','CoreNilpotentDecomposition','TextPosition','⨍','⟪','AstroRange','ListContourPlot','tlds','OEM_CHARSET','HypothesisTestData','MeanNeighborDegree','#Adjective\x20[#Adjective]\x20#Copula','NSEC3PARAM','dtan','⥳','SolarSystemFeatureData','sci','PacletSiteRegister','blockquote','$tan','(#Verb\x20&&\x20!#Auxiliary)\x20#Adverb+?\x20#Copula','|[()[\x5c]{}.,\x22\x27?!\x5c-;]).|\x5c[(?:(?!','LowerCaseQ','tokens','scale','wantarray','byval','$changing_gclk','scroll-snap-align','PrintingStartingPageNumber','⤎','phy_particle_flag_powder','GeneratorDescription','octet_length','buffer_seek_start','Conjectures','TravelDistanceList','c_f_pointer','PartitionsQ','event_data','no-doubt','true¦0:BR;1:BF;2:B5;3:BH;4:AX;5:9Y;6:B6;7:BK;8:B0;9:AV;A:AL;B:8Q;C:8G;D:7K;E:BM;F:AH;aBDb9Zc8Wd88e81f7Kg6Wh64i60j5Lk4Vl4Dm39n2Wo2Op25quispe,r1Ls0Pt0Ev03wTxSyKzG;aIhGimmerm6A;aGou,u;ng,o;khar5ytsE;aKeun9BiHoGun;koya32shiBU;!lG;diGmaz;rim,z;maGng;da,g52mo83sGzaC;aChiBV;iao,u;aLeJiHoGright,u;jcA5lff,ng;lGmm0nkl0sniewsC;kiB1liams33s3;bGiss,lt0;b,er,st0;a6Vgn0lHtG;anabe,s3;k0sh,tG;e2Non;aLeKiHoGukD;gt,lk5roby5;dHllalGnogr3Kr1Css0val3S;ba,ob1W;al,ov4;lasHsel8W;lJn\x20dIrgBEsHzG;qu7;ilyEqu7siljE;en\x20b6Aijk,yk;enzueAIverde;aPeix1VhKi2j8ka43oJrIsui,uG;om5UrG;c2n0un1;an,emblA7ynisC;dorAMlst3Km4rrAth;atch0i8UoG;mHrG;are84laci79;ps3sG;en,on;hirDkah9Mnaka,te,varA;a06ch01eYhUiRmOoMtIuHvGzabo;en9Jobod3N;ar7bot4lliv2zuC;aIeHoG;i7Bj4AyanAB;ele,in2FpheBvens25;l8rm0;kol5lovy5re7Tsa,to,uG;ng,sa;iGy72;rn5tG;!h;l71mHnGrbu;at9cla9Egh;moBo7M;aIeGimizu;hu,vchG;en8Luk;la,r1G;gu9infe5YmGoh,pulveA7rra5P;jGyG;on5;evi6iltz,miHneid0roed0uGwarz;be3Elz;dHtG;!t,z;!t;ar4Th8ito,ka4OlJnGr4saCto,unde19v4;ch7dHtGz;a5Le,os;b53e16;as,ihDm4Po0Y;aVeSiPoJuHyG;a6oo,u;bio,iz,sG;so,u;bKc8Fdrigue67ge10j9YmJosevelt,sItHux,wG;e,li6;a9Ch;enb4Usi;a54e4L;erts15i93;bei4JcHes,vGzzo;as,e9;ci,hards12;ag2es,iHut0yG;es,nol5N;s,t0;dImHnGsmu97v6C;tan1;ir7os;ic,u;aUeOhMiJoHrGut8;asad,if6Zochazk27;lishc2GpGrti72u10we76;e3Aov51;cHe45nG;as,to;as70hl0;aGillips;k,m,n6I;a3Hde3Wete0Bna,rJtG;ersHrovGters54;!a,ic;!en,on;eGic,kiBss3;i9ra,tz,z;h86k,padopoulIrk0tHvG;ic,l4N;el,te39;os;bMconn2Ag2TlJnei6PrHsbor6XweBzG;dem7Rturk;ella4DtGwe6N;ega,iz;iGof7Hs8I;vGyn1R;ei9;aSri1;aPeNiJoGune50ym2;rHvGwak;ak4Qik5otn66;odahl,r4S;cholsZeHkolGls4Jx3;ic,ov84;ls1miG;!n1;ils3mG;co4Xec;gy,kaGray2sh,var38;jiGmu9shiG;ma;a07c04eZiWoMuHyeG;rs;lJnIrGssoli6S;atGp03r7C;i,ov4;oz,te58;d0l0;h2lOnNo0RrHsGza1A;er,s;aKeJiIoz5risHtG;e56on;!on;!n7K;au,i9no,t5J;!lA;r1Btgome59;i3El0;cracFhhail5kkeHlG;l0os64;ls1;hmeJiIj30lHn3Krci0ssiGyer2N;!er;n0Po;er,j0;dDti;cartHlG;aughl8e2;hy;dQe7Egnu68i0jer3TkPmNnMrItHyG;er,r;ei,ic,su21thews;iHkDquAroqu8tinG;ez,s;a5Xc,nG;!o;ci5Vn;a5UmG;ad5;ar5e6Kin1;rig77s1;aVeOiLoJuHyG;!nch;k4nGo;d,gu;mbarGpe3Fvr4we;di;!nGu,yana2B;coln,dG;b21holm,strom;bedEfeKhIitn0kaHn8rGw35;oy;!j;m11tG;in1on1;bvGvG;re;iGmmy,ng,rs2Qu,voie,ws3;ne,t1F;aZeYh2iWlUnez50oNrJuHvar2woG;k,n;cerGmar68znets5;a,o34;aHem0isGyeziu;h23t3O;m0sni4Fus3KvG;ch4O;bay57ch,rh0Usk16vaIwalGzl5;czGsC;yk;cIlG;!cGen4K;huk;!ev4ic,s;e8uiveG;rt;eff0kGl4mu9nnun1;ucF;ll0nnedy;hn,llKminsCne,pIrHstra3Qto,ur,yGzl5;a,s0;j0Rls22;l2oG;or;oe;aPenOha6im14oHuG;ng,r4;e32hInHrge32u6vG;anD;es,ss3;anHnsG;en,on,t3;nesGs1R;en,s1;kiBnings,s1;cJkob4EnGrv0E;kDsG;en,sG;en0Ion;ks3obs2A;brahimDglesi5Nke5Fl0Qno07oneIshikHto,vanoG;u,v54;awa;scu;aVeOiNjaltal8oIrist50uG;!aGb0ghAynh;m2ng;a6dz4fIjgaa3Hk,lHpUrGwe,x3X;ak1Gvat;mAt;er,fm3WmG;ann;ggiBtchcock;iJmingw4BnHrGss;nand7re9;deGriks1;rs3;kkiHnG;on1;la,n1;dz4g1lvoQmOns0ZqNrMsJuIwHyG;asFes;kiB;g1ng;anHhiG;mo14;i,ov0J;di6p0r10t;ue;alaG;in1;rs1;aVeorgUheorghe,iSjonRoLrJuGw3;errGnnar3Co,staf3Ctierr7zm2;a,eG;ro;ayli6ee2Lg4iffithGub0;!s;lIme0UnHodGrbachE;e,m2;calvAzale0S;dGubE;bGs0E;erg;aj,i;bs3l,mGordaO;en7;iev3U;gnMlJmaIndFo,rGsFuthi0;cGdn0za;ia;ge;eaHlG;agh0i,o;no;e,on;aVerQiLjeldsted,lKoIrHuG;chs,entAji41ll0;eem2iedm2;ntaGrt8urni0wl0;na;emi6orA;lipIsHtzgeraG;ld;ch0h0;ovG;!ic;hatDnanIrG;arGei9;a,i;deY;ov4;b0rre1D;dKinsJriksIsGvaB;cob3GpGtra3D;inoza,osiQ;en,s3;te8;er,is3warG;ds;aXePiNjurhuMoKrisco15uHvorakG;!oT;arte,boHmitru,nn,rGt3C;and,ic;is;g2he0Omingu7nErd1ItG;to;us;aGcki2Hmitr2Ossanayake,x3;s,z;\x20JbnaIlHmirGrvisFvi,w2;!ov4;gado,ic;th;bo0groot,jo6lHsilGvriA;va;a\x20cruz,e3uG;ca;hl,mcevsCnIt2WviG;dGes,s;ov,s3;ielsGku22;!en;ki;a0Be06hRiobQlarkPoIrGunningh1H;awfo0RivGuz;elli;h1lKntJoIrGs2Nx;byn,reG;a,ia;ke,p0;i,rer2K;em2liB;ns;!e;anu;aOeMiu,oIristGu6we;eGiaG;ns1;i,ng,p9uHwGy;!dH;dGng;huJ;!n,onGu6;!g;kJnIpm2ttHudhGv7;ry;erjee,o14;!d,g;ma,raboG;rty;bJl0Cng4rG;eghetHnG;a,y;ti;an,ota1C;cerAlder3mpbeLrIstGvadi0B;iGro;llo;doHl0Er,t0uGvalho;so;so,zo;ll;a0Fe01hYiXlUoNrKuIyG;rLtyG;qi;chan2rG;ke,ns;ank5iem,oGyant;oks,wG;ne;gdan5nIruya,su,uchaHyKziG;c,n5;rd;darGik;enG;ko;ov;aGond15;nco,zG;ev4;ancFshw16;a08oGuiy2;umGwmG;ik;ckRethov1gu,ktPnNrG;gJisInG;ascoGds1;ni;ha;er,mG;anG;!n;gtGit7nP;ss3;asF;hi;er,hG;am;b4ch,ez,hRiley,kk0ldw8nMrIshHtAu0;es;ir;bInHtlGua;ett;es,i0;ieYosa;dGik;a9yoG;padhyG;ay;ra;k,ng;ic;bb0Acos09d07g04kht05lZnPrLsl2tJyG;aHd8;in;la;chis3kiG;ns3;aImstro6sl2;an;ng;ujo,ya;dJgelHsaG;ri;ovG;!a;ersJov,reG;aGjEws;ss1;en;en,on,s3;on;eksejEiyEmeiIvG;ar7es;ez;da;ev;arwHuilG;ar;al;ams,l0;er;ta;as','emitNotTerminatedNamedEntity','IntervalUnion','thousand','ħ','binary_semaphore','ListCorrelate','LindleyDistribution','herald','scrollbar-color','HeunC','fog','ο','TrigFactorList','TaskFailedException','c_double','HarmonicNumber','TreeElementSizeFunction','interp','ℂ','≕','wor','beginScope','fabs','commander','BETADIST','tvSetCurSel','ibclr','infixr','arrayIntersect','beforeMatch\x20cannot\x20be\x20used\x20with\x20starts','[cause]\x20#Pronoun\x20#Verb','#PresentTense\x20the\x20[#Gerund]','BubbleScale','GeneratedParameters','Byte','DescriptorStateSpace','atomic_commit','\','Inset3DBoxOptions','strncat','cite','set_local','getLines','date_add_str','steam_download_scores_around_user','zmodload','Dust','indent','⫛','apply','BigUint64Array','CircularOrthogonalMatrixDistribution','FileTemplateApply','ContentsBoundingBox','$InitialDirectory','JankoGroupJ1','AudioRecord','GrayLevel','GreaterTilde','grouping\x20sets','idint','⪺','nth-child','AnyOrder','consulting','symbol','physics_particle_draw_ext','allVariables','#f0f0f0','MaxMemoryUsed','pickWeaponPool','LinkCreate','Љ','foo-off','twenty','country_timezones','\x5c[noprocess\x5c]|','six','Unhandled\x20action\x20type:\x20','mas','GraphDifference','[#PresentTense]\x20(music|class|lesson|night|party|festival|league|ceremony)','waypointLoiterAltitude','\x5cb(0b[01\x27]+)','LOAD','⁠','divmod','@throw','ctrlTooltip','ChannelSubscribers','debriefingText','aside','appendfile','Ą','Equals','diag_recordTurretLimits','os_windows','there-are','node','ils','⋼','⩾̸','blue','dcosh','can_close','time_vector','(\x5cb0o[0-7_]+)|(\x5cb0b[01_]+)|(\x5cb0x[0-9a-fA-F_]+)|(-?\x5cb[0-9][0-9_]*(\x5c.[0-9_]+([eE][-+]?[0-9]+)?)?)','⦌','RollPitchYawMatrix','DenseVecOrMat','WaitAsynchronousTask','prefers-reduced-transparency','filled','port\x20effect\x20module\x20where\x20command\x20subscription\x20exposing','359838kSYsKI','Reset','badRule','NamedTuple','#PresentTense\x20#Plural','clock_str','Polynomials','otherwise','tagged','uptime','to_array_1d','soundex','⌶','animation-play-state','src_email_name','(we|they)','window_set_fullscreen','SierpinskiCurve','excludeEnd','to\x20#Infinitive\x20[#PresentTense]','ListVectorPlot3D','classList','(\x5c(|\x5cs+|$)','shoulda','cursor_sprite','colón','#Infinitive\x20and\x20[%Noun|Verb%]','television','pointwiseMax','[\x5c.,]','HadamardMatrix','\x5cb(\x5cd[\x5cd_]*(\x5c.[0-9_]+)?([eE][+-]?[0-9_]+)?)','TimeZone','datepart','sbyte','isNotEqualTo','^[\x5c-\x5c.]{4,}\x5cn','ev_joystick1_button5','msgrcv','GrammarToken','perl','#responseTextarea','quickSplit','NextDate','profileNameSteam','GenerateDigitalSignature','image_blend','2-titlecase','#Copula\x20#Adjective+\x20(and|or)\x20[#PastTense]$','EEPROM','phy_joint_translation','window_set_cursor','pt_shape_circle','add3DENConnection','Subtract','been-told','PROPER','SecurityCertificate','error-notice','foundation','LUBackSubstitution','allUnitsUAV','esq','PairedSmoothHistogram','ds_list_replace','BackedEnum','(is|are|am)','CauchyPointProcess','VIFileVersion','get3DENLayerEntities','}','^Content','Timestamp','CNT','PauliMatrix','⦒','AnomalyDetectorFunction','today','⥅','StackComplete','(he|she|they|it|we)\x20is','FileWriteWord','GeoBackground','clearWeaponPool','Magenta','disableDebriefingStats','LogSeriesDistribution','^come\x20#Infinitive','json_arrayagg','BusinessDayQ','mstr','TrackCurrentAcceleration','win8_secondarytile_badge_notification','setTimeout','collision_line','waypointLoiterRadius','Positive','ini_open','RulePlot','mutable','xor_eq','𝔶','⟼','Ű','barely-even','skeleton_get_minmax','win8_appbar_add_element','WeightedGraphQ','$TimeZone','GeometricTransformationBoxOptions','.True.','FrameRate','Demonym','str','isFrozen','draw_vertex','unhex','ExpirationDate','physics_fixture_set_polygon_shape','say3D','SechDistribution','weaponsItemsCargo','⥖','rewrite_strat','subjects','constructor','[%Person|Place%]\x20(harbor|harbour|pier|town|city|place|dump|landfill)','vectorDB_','intersection','roboconf','SectorSpacing','BilateralZTransform','org-abbrv','true¦aXbTcReNhowMiEjust00noBo9p8supposing,t5wh0yet;e1il0o3;e,st;n1re0thN;\x20if,by,vM;evL;h0il,o;erefOo0;!uU;lus,rovided\x20th9;r0therwiM;!\x20not;\x20mattEr,w0;!\x200;since,th4w7;f4n0;\x200asmuch;as\x20mIcaForder\x20t0;h0o;at;!\x200;only,t0w0;hen;!ev3;ith2ven0;!\x200;if,tB;er;o0uz;s,z;e0ut,y\x20the\x20time;cau1f0;ore;se;lt3nd,s\x200;far1if,m0soon1t2;uch0;\x20as;hou0;gh','/dev/poll','eprintf','#Negative','buffer_delete','Permutations','View','OpenSpecialOptions','layer','ImageForwardTransformation','ptx','room_get_viewport','MathieuS','squadParams','ArcCosh','^(have|must)\x20you','obj-c++','ControllerDuration','file_exists','WhitePoint','Timeout','rayleigh','RootIntervals','$rtoi','renderInlineAsText','include\x20use','HypergeometricDistribution','red','enddeclare','createDiarySubject','ImageForestingComponents','localIP','frame-ancestors','surface_get_width','[sun]\x20#Date','#Honorific\x20#Acronym','HYPGEOM.DIST','ClassifierInformation','layer_add_instance','_Noreturn','GeometricTransformationBox','array_push','RiemannXi','ATTRIBUTE_SELECTOR_MODE','OutputSizeLimit','ShearingMatrix','drawLink',':^$','ev_joystick2_button4','SilentInstall','chan','CSGRegionQ','EventSeries','ServiceConnect','ł','view_set_surface_id','target-within','setActualCollectiveRTD','screen_save','Simplify','win8_settingscharm_get_xaml_property','TabView','stack','đ','UInt64','ps_shape_line','audio_destroy_stream','give','LocalEvaluate','part_system_get_layer','bg-gray-600\x20rounded-lg\x20p-2\x20text-white\x20hover:bg-gray-300','upcast','emails','gully','ordered_list_open','if\x20else\x20elif\x20endif\x20define\x20undef\x20ifdef\x20ifndef','beep','layer_x','SphericalRegion','ive','backpackItems','MonomialList','outerHTML','macromodule','tbody_close','CoordinateChartData','ize','GM_build_date','AugmentedSymmetricPolynomial','LinearGradientFilling','LinearOffsetFunction','pinMode','lsl','cheatsEnabled','createTreeWalker','steam_ugc_request_item_details','(going|to)','apacheconf','LibraryUnload','Darker','willSet','setPlayerVoNVolume','Hold','felt-loved','PatternFilling','GAMMAINV','lbClear','createSite','title.function','reflexpr','mp_linear_step_object','AnimatorBoxOptions','colour_get_value','nettype','do\x20not\x20(forget|omit|neglect)\x20to\x20[#Infinitive]','to-swears','surg','inRangeOfArtillery','ENDSEC','subroutine\x20function\x20program','AudioFade','beginShape','^#QuestionWord','devnull','NotCupCap','scroll','#Month\x20#Date\x20#Date','Piecewise','WeightedAdjacencyGraph','/[a-z]*','setPosASL','LineOpacity','corp','#Money\x20#Currency','UndefKeywordError','setSuppression','ClipRange','doFirst','(-?)(\x5cb0[xX][a-fA-F0-9]+|(\x5cb\x5cd+(\x5c.\x5cd*)?|\x5c.\x5cd+)([eE][-+]?\x5cd+)?)','1:mt¦2:llen¦3:iven,aken¦:ne¦y:in','Self','profile','ProgressIndicatorBox','matched','do\x20while\x20select\x20delete\x20by\x20update\x20from','Cuintmax_t','gpu_get_tex_min_mip','unlink','EvenQ','sendTaskResult','dst','MessageName','
','effect','peek','cacheDoc','kqueue','satir','EntityProperty','Annotation','LanguageData','celldefine','current_user','draw_enable_alphablend','vk_alt','AbstractFloat','BoundaryMeshRegionQ','his-fine','Conditioned','Colorize','huge','percent_rank','HKEY_CURRENT_USER','HEBREW_CHARSET','vk_control','diag_enable','sessionStorage','`[cwd]?','column-rule-style','Form','AllowVersionUpdate','Subsequences','stata','ReadEnvStr','bunked','nearObjects','DirectionalLight','vertex_texcoord','CreateDialog','invalidOp','ds_list_find_value','gpu_set_tex_filter_ext','rapids','SphereBox','|case|contractions|parentheses|quotations|emoji|honorifics|debullet','PrincipalValue','src_ZPCc','InString','ctrlSetFontPB','nounconnected_drive','GraphDensity','text_join','ISERROR','md5_file','^[(well|so|okay|now)]\x20!#Adjective?','²','forceSpeed','casex','ExpIntegralE','completedFSM','GeoVectorXYZ','CopyFiles','os_win32','gamepad_button_check_pressed','diag_frameno','deref','RightUpDownVector','program_directory','achievement_show_profile','toUpper','tokens_meta','PreviousDate','bquote','(supposing|although)',')|r)?i?\x5cb','lapply','ToEntity','ImageLevels','VBG','Timer','forests','fn\x20function','MatchQ','StringDrop','inspect','gesture_get_rotate_angle','timeunit','UndirectedGraph','sha224sum','ev_user10','nearest','add3DENEventHandler','Conjunction\x20Adjective\x20Noun','xhtmlOut','TeXForm','LineIndent','overcastForecast','cmpfunc_lessequal','vertex_format_add_colour','loadFile','true¦director1field\x20marsh2lieutenant1rear0sergeant\x20major,vice0;\x20admir1;\x20gener0;al','NotSquareSubset','toboolean','showUAVFeed','u32','PERCENTILE','surface_depth_disable','jan','[;@]','DualSystemsModel','tcl_wordBreakBefore','datetime','ParameterEstimator','⦐','SatisfiabilityCount','PDF','#Actor','genera','build','kbv_returnkey_emergency','@hasContraction','⊋','physics_particle_group_begin','PaperWidth','TogglerBox','isAbleToBreathe','PetersenGraph','plains','design','unpack','BetweennessCentrality','PowerExpand','%[Qwi]?\x5c{','ProbabilityPr','worldName','eal','ParseError','Blur','VARPTR','withStream','ReadINIStr','Bearing','TaskRemove','writeMessage','setWaypointForceBehaviour','%Person|Date%\x20#Acronym?\x20#ProperNoun','Paneled','setViewDistance','class\x20interface','BigFloat','getAllEnvSoundControllers','WebElementObject','audio_falloff_exponent_distance','$ConditionHold','discrR','num_elements','enginesPowerRTD','explains_wrapper','DeleteMissing','log_diff_exp',')|\x5c.)?|(','as\x20#Pronoun\x20[please]','BipartiteGraphQ','NS_AVAILABLE','xquery','HarmonicMeanFilter','MeanDeviation','trigger','Mizar','isLightOn','ImagingDevice','any\x20[#Infinitive]','regr_sxy','(health|school|commerce)\x20board','msgcat','magazineCargo','phy_joint_upper_angle_limit','ev_user5','QuotientRemainder','endl','primaryWeaponMagazine','FieldMasked','our','Hexahedron','log1p','^C$','vertex_float1','physics_particle_delete_region_poly','backticksScanned','pascal','localtime','selectOutput','(i|we|they)\x20have','openNextFile','FormulaLookup','markerAlpha','removeMagazineTurret','endcelldefine','FunctionContinuous','gfail','thing-doer','GeometricTransformation','#filePath','c_null_ptr','FunctionCompileExportLibrary','pre\x20code.hljs\x20{\x0a\x20\x20display:\x20block;\x0a\x20\x20overflow-x:\x20auto;\x0a\x20\x20padding:\x201em\x0a}\x0acode.hljs\x20{\x0a\x20\x20padding:\x203px\x205px\x0a}\x0a/*!\x0a\x20\x20Theme:\x20GitHub\x0a\x20\x20Description:\x20Light\x20theme\x20as\x20seen\x20on\x20github.com\x0a\x20\x20Author:\x20github.com\x0a\x20\x20Maintainer:\x20@Hirse\x0a\x20\x20Updated:\x202021-05-15\x0a\x0a\x20\x20Outdated\x20base\x20version:\x20https://github.com/primer/github-syntax-light\x0a\x20\x20Current\x20colors\x20taken\x20from\x20GitHub\x27s\x20CSS\x0a*/\x0a.hljs\x20{\x0a\x20\x20color:\x20#24292e;\x0a\x20\x20background:\x20#ffffff\x0a}\x0a.hljs-doctag,\x0a.hljs-keyword,\x0a.hljs-meta\x20.hljs-keyword,\x0a.hljs-template-tag,\x0a.hljs-template-variable,\x0a.hljs-type,\x0a.hljs-variable.language_\x20{\x0a\x20\x20/*\x20prettylights-syntax-keyword\x20*/\x0a\x20\x20color:\x20#d73a49\x0a}\x0a.hljs-title,\x0a.hljs-title.class_,\x0a.hljs-title.class_.inherited__,\x0a.hljs-title.function_\x20{\x0a\x20\x20/*\x20prettylights-syntax-entity\x20*/\x0a\x20\x20color:\x20#6f42c1\x0a}\x0a.hljs-attr,\x0a.hljs-attribute,\x0a.hljs-literal,\x0a.hljs-meta,\x0a.hljs-number,\x0a.hljs-operator,\x0a.hljs-variable,\x0a.hljs-selector-attr,\x0a.hljs-selector-class,\x0a.hljs-selector-id\x20{\x0a\x20\x20/*\x20prettylights-syntax-constant\x20*/\x0a\x20\x20color:\x20#005cc5\x0a}\x0a.hljs-regexp,\x0a.hljs-string,\x0a.hljs-meta\x20.hljs-string\x20{\x0a\x20\x20/*\x20prettylights-syntax-string\x20*/\x0a\x20\x20color:\x20#032f62\x0a}\x0a.hljs-built_in,\x0a.hljs-symbol\x20{\x0a\x20\x20/*\x20prettylights-syntax-variable\x20*/\x0a\x20\x20color:\x20#e36209\x0a}\x0a.hljs-comment,\x0a.hljs-code,\x0a.hljs-formula\x20{\x0a\x20\x20/*\x20prettylights-syntax-comment\x20*/\x0a\x20\x20color:\x20#6a737d\x0a}\x0a.hljs-name,\x0a.hljs-quote,\x0a.hljs-selector-tag,\x0a.hljs-selector-pseudo\x20{\x0a\x20\x20/*\x20prettylights-syntax-entity-tag\x20*/\x0a\x20\x20color:\x20#22863a\x0a}\x0a.hljs-subst\x20{\x0a\x20\x20/*\x20prettylights-syntax-storage-modifier-import\x20*/\x0a\x20\x20color:\x20#24292e\x0a}\x0a.hljs-section\x20{\x0a\x20\x20/*\x20prettylights-syntax-markup-heading\x20*/\x0a\x20\x20color:\x20#005cc5;\x0a\x20\x20font-weight:\x20bold\x0a}\x0a.hljs-bullet\x20{\x0a\x20\x20/*\x20prettylights-syntax-markup-list\x20*/\x0a\x20\x20color:\x20#735c0f\x0a}\x0a.hljs-emphasis\x20{\x0a\x20\x20/*\x20prettylights-syntax-markup-italic\x20*/\x0a\x20\x20color:\x20#24292e;\x0a\x20\x20font-style:\x20italic\x0a}\x0a.hljs-strong\x20{\x0a\x20\x20/*\x20prettylights-syntax-markup-bold\x20*/\x0a\x20\x20color:\x20#24292e;\x0a\x20\x20font-weight:\x20bold\x0a}\x0a.hljs-addition\x20{\x0a\x20\x20/*\x20prettylights-syntax-markup-inserted\x20*/\x0a\x20\x20color:\x20#22863a;\x0a\x20\x20background-color:\x20#f0fff4\x0a}\x0a.hljs-deletion\x20{\x0a\x20\x20/*\x20prettylights-syntax-markup-deleted\x20*/\x0a\x20\x20color:\x20#b31d28;\x0a\x20\x20background-color:\x20#ffeef0\x0a}\x0a.hljs-char.escape_,\x0a.hljs-link,\x0a.hljs-params,\x0a.hljs-property,\x0a.hljs-punctuation,\x0a.hljs-tag\x20{\x0a\x20\x20/*\x20purposely\x20ignored\x20*/\x0a\x20\x20\x0a}','running','≚','PRP$','TuringMachine','Saturday','assignedCargo','abbreviations','exponent','GaussianSymplecticMatrixDistribution','aget','nouns','tile_get_empty','timeprecision','scroll-margin-top','(?:<','one','ImageRangeCache','',';[\x20\x5ct]*#','Touches','GaborFilter','ev_user2','endRaw','move3DENCamera','ERB','^do\x20not?\x20[#Infinitive\x20#Particle?]','UTF8','java','border-2','SelectedCells','TreeChildren','vehicleChat','(=(?!>))?|[-+*/%](?!>)','\x5cb0b([01_]+)','sysfunc','$1ae','uname','vectored','ActiveStyle','readBytesUntil','AnatomySkinStyle','IMSIN','push','endScope\x20must\x20be\x20object','NebulaData','[','FunctionSpace','AudioOverlay','camera_set_view_pos','SamplingPeriod','authors','GeoPositionXYZ','ren','õ','curatorCameraArea','HTML','csr_extract_u','getEnv3DSoundController','CHIINV','beforeTags','http..','university-of-Foo','show_question','MoleculeModify','CepstrumArray','HoldPattern','CounterStyle','mountain','ACOT','layer_get_script_end','ugc_match_IntegratedGuides','AutoScaling','audio_falloff_set_model','CellContext','⊻','namespace-node','NonlinearStateSpaceModel','token','date_hour_span','text/xml','path','modelToWorldVisual','gpu_get_zfunc','NumberMultiplier','FoldList','factory','keydef','ActionDelay','batch','SampleDepth','hurried','$countbits','not','est','Scrollbars','part_type_colour3','⊿','audio_new_system','a-bit-confused','would','ordering','$q_add','isFormationLeader','alarm_get','forceGeneratorRTD','⋹̸','DefaultMenuStyle','moonIntensity','some-sort-of','TreeMap','buffer_s32','regs','Quantity','border-top-width','^of\x20.','frozenLex','vehicleMoveInfo','ExternalIdentifier','PairedHistogram','schema-attribute','Initial','RecalibrationFunction','languageDetectRe','BlackmanWindow','LongEqual','InstTypeGetText','setTrafficDensity','getAttribute','generate','NotebookWrite','VideoExtractFrames','vars','camPrepareFov','NotLeftTriangleBar','ParallelArray','layer_destroy','ceiling','LightBlue','object_index','achievement_login','$dumpvars','$ProcessorCount','clog','ibessel','comparatives','lsr','buffer_wrap','q[qwxr]?\x5cs*<','KeyFreeQ','MultivariateHypergeometricDistribution','audio_emitter_free','ctrlAutoScrollSpeed','prefers-contrast','steam_set_stat_avg_rate','enableAttack','semctl','AssociateTo','audio_stop_sound','cutObj','MenuSuite','n1ql','multiRegexes','BINOMDIST','setFromEditor','⋷','Normalize','%[0-9]+','os_get_info','$Off','classpath','ApplyTo','will-adj','datan','targetsAggregate','[#Cardinal+\x20#Ordinal]\x20of\x20.','deleteMarkerLocal','setStatValue','exists','(so|very|extremely)\x20[#Gerund]','TextWords','FindMaximumFlow','RightDownVectorBar','TANH','^\x5cs*#\x5cw+','concat','AsynchronousTaskObject','RemoteKernelObject','image_alpha','MaxTrainingRounds','BitOr','ous','CellEventActions','caption_lives','Tomorrow','HeatTransferValue','⇥','FromCharacterCode','isAutoTrimOnRTD','BitLength','Returning\x20results','specify','phy_joint_reaction_force_x','HTMLInjectionError','parseInline','(first|second|third|1st|2nd|3rd)\x20#Actor','PrimitiveRoot','river','SphericalHankelH2','$1zes','┐','≜','ReplicateLayer','Median','Flat','@[^@\x5cs]+','covergroup','notif0','achievement_post','AutoNumberFormatting','getEnvSoundController','skeleton_animation_get_ext','lightSpecular','Into','std_normal_log','BinomialPointProcess','answers','⩼','ComplexInfinity','Now','scss','$MessageList','ℨ','UnitStep','AbsoluteCurrentValue','(url|data-uri)\x5c(','Verb','MandelbrotSetDistance','allowDammage','⇀','TextElement','DebugStop','(\x5cs*\x5c(.*?\x5c))?[;{]','Kurtosis','IdDict','timeline_size','encodeURIComponent','PageWidth','PMT','stringify!','Illegal','darcsin','MassImpermeableBoundaryValue','≤⃒','HTAB','line_color','#Infinitive\x20#Pronoun\x20[like]','removeAllUserActionEventHandlers','Assuming','mapping','UpValues','narrows','CUMPRINC','#Noun\x20[that]\x20#Verb\x20#Adjective','missing','LineSpacing','%r\x5c(','platform','MonomialOrder','diag_toggle','NoTrayIcon','%Person|Verb%\x20#Acronym?\x20#ProperNoun','HornerForm','Modal','ListStreamDensityPlot','xs:unsignedShort','BACKSLASH_ESCAPE','EntityTypeName','mouseDragged','transition-duration','font-display','BraKet','emissive','addr','AudioPan','registerTask','⊡','early','pushBackUnique','NotebookInformation','#Value\x20[(buck|bucks|grand)]','TracyWidomDistribution','consuming','shebang','LongestCommonSequencePositions','PacletDirectoryAdd','vk_f6','LeftArrow','velocity','Manipulate','json_array','ps_distr_linear','parseHost','draw_surface_general','Offset','HankelMatrix','soldierMagazines','com1','WordTranslation','module\x20use_module\x20import_module\x20include_module\x20end_module\x20initialise\x20mutable\x20initialize\x20finalize\x20finalise\x20interface\x20implementation\x20pred\x20mode\x20func\x20type\x20inst\x20solver\x20any_pred\x20any_func\x20is\x20semidet\x20det\x20nondet\x20multi\x20erroneous\x20failure\x20cc_nondet\x20cc_multi\x20typeclass\x20instance\x20where\x20pragma\x20promise\x20external\x20trace\x20atomic\x20or_else\x20require_complete_switch\x20require_det\x20require_semidet\x20require_multi\x20require_nondet\x20require_cc_multi\x20require_cc_nondet\x20require_erroneous\x20require_failure','sin','Scope','ctrlIDD','person','to_underlying','isGamePaused','Application','power','c_f_procpointer','$dumpfile','#Value\x20[#PresentTense]\x20of','bindattr','RemoteBatchJobObject','matrix_stack_pop','catch','⁣','base64_encode','Boole','getPosATL','NetFlatten','#LastName+','escapeHtml','(looked|look|looks)\x20#Adverb?\x20[%Adj|Gerund%]','external_define','complex_matrix','yoo','flagAnimationPhase','\x5cs*(?:=|:=)\x5cs*)?(\x5c(.*\x5c)\x5cs*)?\x5cB!?[-~]{1,2}>\x5c*?','will\x20[be]\x20#PastTense','tvOS','DiggleGatesPointProcess','setWPPos','function.dispatch','bessel_y0','ds_priority_empty','EQV','triggerType','ACOS','newOutputStream','merge_bits','__TRAIT__','cr_cross','OCaml','UnitRootTest','part_emitter_create','⩕','RotationMatrix','𝓌','⤽','display_reset','ctrlSetTextSelection','#Noun\x20of\x20#Determiner?\x20#Noun','@import\x20url(https://fonts.googleapis.com/css2?family=Nunito:wght@400;800&display=swap);','OFFSET','$AllowDataUpdates','CapturedException','TimeSeriesForecast','iso\x20val\x20tag\x20trn\x20box\x20ref','DIGIT','if\x20else\x20elif\x20endif\x20define\x20undef\x20warning\x20error\x20line\x20pragma\x20_Pragma\x20ifdef\x20ifndef\x20include','[\x20\x5ct\x5cn\x5cr]','VarianceGammaPointProcess','delay_mode_path','(associated|worn|baked|aged|armed|bound|fried|loaded|mixed|packed|pumped|filled|sealed)','Cfloat','step','Bool','inv_inc_beta','NetGraph','sprite_merge','wire','DualPlanarGraph','out','radio','CheckboxBoxOptions','AudioIdentify','PrimitiveRootList','php-template','draw_set_font','OverVector','⨯','Extern','slice','facebook_send_invite','#Adjective\x20#Infinitive','Inline','SubtypeCode','StyleMenuListing','FourierDCTFilter','browser_edge','grid-column','ComplexityFunction','which\x20[%Noun|Verb%]\x20#Noun','σ','stdout','path_rescale','RandomPolygon','Are\x20you\x20sure\x20you\x20want\x20to\x20clear\x20all\x20of\x20your\x20chats?\x20This\x20action\x20cannot\x20be\x20undone.','Handlebars','fa_bottom','img','⪏','ListDensityPlot3D','%(ROW)?TYPE','#more-papers-search-button-','IndependentVertexSetQ','£','que','constraint','$dist_t','Õ','AxisObject','resistance','matrix_build','qconjg','KeyValuePattern','capt','𝕕','ReplacePixelValue','quantile','lzma','AnatomyForm','fromGerund','rob-a-smith','font_texture_page_size','(will|be)','FORECAST.ETS.STAT','#Verb\x20.*\x20[but]\x20.*\x20#Verb','directory_exists','Perimeter','BaselinePosition','Before','enddo','CompositeException','animateDoor','AudioPitchShift','src_host_port_strict','ErrorPacket','CSS','℘','^[#Infinitive]\x20(#Adjective|#Adverb)$','addWeaponItem','realloc','audio_play_sound','draw_primitive_begin_texture','gardens','Floor','$MinPrecision','MannWhitneyTest','ev_joystick1_button3','drawLaser','MovieCredits','$GeneratedAssetLocation','setRepairCargo','createMPCampaignDisplay','⥊','ds_priority_delete_max','id-','CountDistinct','window_has_focus','posinfif','&?:(:)?(','path_clear_points','ℵ','TWOPI','punctuation','LeftTeeVector','anything','grid-row-end','pharmacare','textBlock','NumericArray','[#Gerund]\x20#Adverb?\x20not?\x20#Copula','FractionalPart','SpellingDictionariesPath','penna','class','InitializationObject','GaussianFilter','Highlighted','digits','∪︀','airways','endpackage','displayLogos','Quiet','accTime','Image3D','binary_log_loss','⫰','aiActionCompleted','die','0|[1-9](_?[0-9])*|0[0-7]*[89][0-9]*','camera_get_view_border_x','gamepad_is_connected','Τ','yml','⌎','PCOPY','NuclearReactorData','\x20is\x20updated','c_size_t','COMBINA','PopupMenu','SystemProcessData',' ','mexican-train','$dumpportslimit','GraphUnion','JacobiSymbol','Error\x20in\x20getting\x20row:\x20','pushln','^[=\x5c*]{4,}\x5cn','os_ios','InverseSurvivalFunction','backface-visibility','GeoWithinQ','MB_RIGHT','#Infinitive\x20(this|that|the)\x20[#Infinitive]','turretLocal','MB_DEFBUTTON3','≩︀','vehicleReportRemoteTargets','conjugate','cuchar','^\x5cs*%{1,2}={0,2}','URLSubmit','faith-based','CONDITION','setWaypointPosition','⏞','AskDisplay','noSmooth','network_create_socket_ext','format!','cmpfunc_never','\x5cb(state|default)\x5cb','shortint','𝔚','declval','MultiLetterItalics','voice-duration','∅','Actor','awake','Checking\x20conversation\x20history','chop','pus','loadShape','WebSearch','terrainIntersectASL','bit_vector','RudvalisGroupRu','particlesQuality','\x0aGiven\x20a\x20piece\x20of\x20text\x20and\x20a\x20partial\x20query\x20as\x20input:\x20<','ExternalStorageBase','GridFrame','\x5cs*(?=[:+?]?=)','setSlingLoad','ctrlSetFontHeight','steam_file_write_file','native','CDBL','allPlayers','Ô','alphabetical','Unsupported\x20config\x20type:\x20','bessel_y1','⇃','ATANH','vertex_float3','sept','selector-id','DiscreteIndicator','IDENTIFIER\x20OPTIONS\x20XML_ELEMENT\x20XML_OP\x20XML_ELEMENT_OF\x20DOMDOCCREATE\x20DOMDOCLOADFILE\x20DOMDOCLOADXML\x20DOMDOCSAVEFILE\x20DOMDOCGETROOT\x20DOMDOCADDPI\x20DOMNODEGETNAME\x20DOMNODEGETTYPE\x20DOMNODEGETVALUE\x20DOMNODEGETCHILDCT\x20DOMNODEGETFIRSTCHILD\x20DOMNODEGETSIBLING\x20DOMNODECREATECHILDELEMENT\x20DOMNODESETATTRIBUTE\x20DOMNODEGETCHILDELEMENTCT\x20DOMNODEGETFIRSTCHILDELEMENT\x20DOMNODEGETSIBLINGELEMENT\x20DOMNODEGETATTRIBUTECT\x20DOMNODEGETATTRIBUTEI\x20DOMNODEGETATTRIBUTEBYNAME\x20DOMNODEGETBYNAME','lle','$CloudSymbolBase','move_random','^(?:','Packet\x20Filter\x20config','$Linked','GREEK_CHARSET','FileNameSetter','$PathnameSeparator','RelationalDatabase','org-possessive','removeAllWeapons','mask-origin','
','fortran','HandlerFunctionsKeys','ABORT','NegativelyOrientedPoints','InverseJacobiSC','FileReadByte','DataRange','ProcessStateDomain','minWant','BackTo','SpheroidalEigenvalue','ado','rantbl','left-verb','lindex|10',';|:','numbers','BASE','self::','ß','mask-border-source','tcl_findLibrary','http_get','FillingTransform','ScalingFunctions','addMagazine','#(o|O)[0-7]+(/[0-7]+)?','contractionTwo','config','worldToScreen','URLFetch','(ATAN|ABS|ACOS|ASIN|SIN|COS|EXP|FIX|FUP|ROUND|LN|TAN)(\x5c[)','box','gpu_get_tex_max_aniso','min-inline-size','PolyaAeppliDistribution','ExpressionPacket','addBackpackCargo','userNameRead','arcos','graftabl','downto','setFuel','#Adjective','make_pair','Ä','clock','EdgeContract','WORKDAY.INTL','⫽','ExponentialFamily','(\x5c$\x5cW)|((\x5c$|@@?)(\x5cw+))(?=[^@$?])(?![A-Za-z])(?![@$?\x27])','RETURNS','xcopy','registerLanguage','lockedDriver','language-','ever\x20since','\x1b[0m','FractionalBrownianMotionProcess','is_vec4','that-are','Center','LanguageIdentify','WikipediaSearch','ReverseEquilibrium','returning','DateObject','#Money','path_get_length','(got|were|was)\x20#Passive','ropeAttachEnabled','tvSetPicture','BiweightLocation','hundred','ctrlSetURL','Sunrise','FindVertexCover','¸','Objective-C','man','getRoadInfo','societe','Feature\x20Background\x20Ability\x20Business\x20Need\x20Scenario\x20Scenarios\x20Scenario\x20Outline\x20Scenario\x20Template\x20Examples\x20Given\x20And\x20Then\x20But\x20When','Joined','EdgeStyle','OPTIONS','findAny','matrix_stack_clear','StringDict','GroupOpenerInsideFrame','LinkConnectedQ','zcomp','textvalue-date','Ļ','ImagePerspectiveTransformation','getText','place_meeting','vectorModelToWorld','is_int32','Torus','GAMS','win8_livetile_queue_enable','FontName','cofix','pizza','SquareIntersection','setVelocityModelSpace','xs:language','comment','text-justify','GroebnerBasis','still-verb','Thinning','voice-stress','DATEDIF','setHitIndex','layer_sprite_get_yscale','hwy','distributionRegion','STRIG','Stdev','instantiate','Millisecond','llvm','AstronomicalData','Log','ContourShading','physics_test_overlap','safeZoneW','Error\x20retrieving\x20entry:\x20','loadStatus','StructuredArray','reflexivity','ImageMarker','ds_map_destroy','addItemCargoGlobal','lnbSize','LeftDownVector','draw_tile','∞','and','^[#Infinitive]\x20#Gerund','allDisplays','SARMAProcess','cks','musta','Continuation','\x5c})','gravity','DominantColors','help-stop','jsx','≽','VideoPadding','URLParse','setUserMFDValue','Tab','NMinValue','prompt','chkdsk','true¦began,came,d4had,kneel3l2m0sa4we1;ea0sg2;nt;eap0i0;ed;id','ImageCrop','LicenseLangString','access','setTaskResult','BaseDecode','RiskAchievementImportance','ev_other','audio_music_is_playing','chrw','Multiple','⊮','GOTO','clojure','#[\x5cw-]+','empty','Concatenate','AnimationRunTime','MEL','nonrec','DepthFirstScan','enableUAVWaypoints','SystemInstall','DrawFrontFaces','Complement','ParallelSubmit','TimeWarpingCorrespondence','WeibullDistribution','NevilleThetaC','draw_light_get_ambient','calloc','PillaiTrace','errordocument','BigInt','SubsuperscriptBoxOptions','ForwardBackward','steam_ugc_download','Deriv','⨪','commandFollow','setMagazineTurretAmmo','nsatz','trails','⪬','cards4','BooleanConsecutiveFunction','draw_skeleton','telecommunications','steam_get_app_id','isdigit','GreaterGreater','HorizontalForm','intrr','onReceive','IfAbort','IsomorphicSubgraphQ','↓','.False.\x20.True.','phy_particle_data_flag_color','ctSetValue','lnbSetData','eed','RoundNearestTiesUp','\x0a\x0a\x0a\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x0a\x20\x20\x20\x20
\x0a\x0a','querySelectorAll','lineMax','toString','(they|their)','ugc_visibility_private','isBetween','mdivide_left_spd','$CloudAccountName','validate','selectedEditorObjects','border-top','𝕧','emitNumericEntity','diag_deltaTime','ÙÚÛÜùúûüŨũŪūŬŭŮůŰűŲųƯưƱƲǓǔǕǖǗǘǙǚǛǜȔȕȖȗɄΰυϋύ','LocatorPane','
\x0a','(\x5cbReturn\x5cb)','\x5cn_{4,}$','MoleculeValue','treeIndex','SocketReadMessage','(?:-|','brewery','[A-Za-z_\x5cu00A1-\x5cuFFFF][A-Za-z_0-9\x5cu00A1-\x5cuFFFF]*','InterpolatingFunction','infoPanels','[(all|both)]\x20#Determiner\x20#Noun','TooltipDelay','tokenize','better','intro','CanonicalizeRegion','uplevel','Int64Fmt','pos','Retrieving\x20relevant\x20sentences','HistogramPointDensity','FindIntegerNullVector','tile_mirror','the\x20#Cardinal\x20[%Adj|Noun%]','findstr',')\x5c.?|(','etat','rtrim','DefaultControlPlacement','SetOutPath','delete',').\x20','[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*','that\x27s','billion','missileTarget','addWords','GraphicsRow','MONITOR','c_purple','physics_fixture_set_linear_damping','FilledCurveBox','ninetieth','array_create','FindEdgeCover','rindex','Serial','rotateX','DynamicModuleParent','TetrahedronBoxOptions','NumberQ','assignedVehicleRole','computation-expression','cholesky_decompose','$CloudCreditsAvailable','highscore_add','FailureDistribution','StandardDeviation','addPlayerScores','([^\x5c\x5c:=\x20\x5ct\x5cf\x5cn]|\x5c\x5c.)+','WiFiServer','set3DENIconsVisible','then','ConnectionSettings','MathieuGroupM24','setGroupIcon','Sinr','really-like','EthernetUDP','GAUSS','MathieuCharacteristicB','createShape','\x5cbend\x5csif\x5cb','IconData','9km','children','#NumericValue\x20#NumericValue','AudioEncoding','minus-value','modelToWorldWorld','c_gray','EdgeDashing','lds','~~~+[\x20]*$','GeometricMeanFilter','LeftTriangle','color_get_saturation','Pop','NormalizationLayer','CAA','file_text_write_real','PEDllCharacteristics','classPrefix','_automatic_','ASIN','endsWithParent','memcpy','doFire','c_funptr','createOutput','grouping','AsymptoticProduct','dot_product_3d_normalised','[a-zA-Z_][\x5cda-zA-Z_]+\x5c.[\x5cda-zA-Z_]{1,3}','`[A-Z][\x5cw\x27]*','RecurrenceTable','#Value\x20[(foot|feet)]','DGET','eng','had','^#PresentTense$','LEFT$','endspecify','ReentrantLock','getConnectedUAVUnit','border-image-source','getArtilleryETA','NotTildeTilde','HalfPlane','setcomp','DeBruijnGraph','%EF%BF%BD','#Comparative','CurlyQuote','⥥','event_user','text_special','PrimeOmega','ctrlMapMouseOver','CellEditDuplicate','setTriggerArea','PaneBoxOptions','part_type_color_mix','StepRange','onBriefingNotes','package','latter','enableGunLights','Direction','MultiaxisArrangement','StieltjesGamma','tilemap_get_at_pixel','BoundaryDiscretizeGraphics','marketing','CSNG','dtanh','unchecked','ј','IncludeQuantities','SpheroidalS1','tilemap_y','None','UInt32','visit','NumericalSort','⊃⃒','Û','tryUnbox','would-be','#Noun+\x20(coach|chef|king|engineer|fellow|personality|boy|girl|man|woman|master)','$1$1$1','filename_drive','mp_grid_path','NNP','rnd','DownArrowUpArrow','cathedral','EditButtonSettings','request','imaginary','HEAD','HighlightGraph','gymnasium','BracketingBar','anti','pathname','push_local_notification','clearItemPool','1:aed,fed,xed,hed¦2:sged,xted,wled,rped,lked,kied,lmed,lped,uped,bted,rbed,rked,wned,rled,mped,fted,mned,mbed,zzed,omed,ened,cked,gned,lted,sked,ued,zed,nted,ered,rted,rmed,ced,sted,rned,ssed,rded,pted,ved,cted¦3:cled,eined,siped,ooned,uked,ymed,jored,ouded,ioted,oaned,lged,asped,iged,mured,oided,eiled,yped,taled,moned,yled,lit,kled,oaked,gled,naled,fled,uined,oared,valled,koned,soned,aided,obed,ibed,meted,nicked,rored,micked,keted,vred,ooped,oaded,rited,aired,auled,filled,ouled,ooded,ceted,tolled,oited,bited,aped,tled,vored,dled,eamed,nsed,rsed,sited,owded,pled,sored,rged,osed,pelled,oured,psed,oated,loned,aimed,illed,eured,tred,ioned,celled,bled,wsed,ooked,oiled,itzed,iked,iased,onged,ased,ailed,uned,umed,ained,auded,nulled,ysed,eged,ised,aged,oined,ated,used,dged,doned¦4:ntied,efited,uaked,caded,fired,roped,halled,roked,himed,culed,tared,lared,tuted,uared,routed,pited,naked,miled,houted,helled,hared,cored,caled,tired,peated,futed,ciled,called,tined,moted,filed,sided,poned,iloted,honed,lleted,huted,ruled,cured,named,preted,vaded,sured,talled,haled,peded,gined,nited,uided,ramed,feited,laked,gured,ctored,unged,pired,cuted,voked,eloped,ralled,rined,coded,icited,vided,uaded,voted,mined,sired,noted,lined,nselled,luted,jured,fided,puted,piled,pared,olored,cided,hoked,enged,tured,geoned,cotted,lamed,uiled,waited,udited,anged,luded,mired,uired,raded¦5:modelled,izzled,eleted,umpeted,ailored,rseded,treated,eduled,ecited,rammed,eceded,atrolled,nitored,basted,twined,itialled,ncited,gnored,ploded,xcited,nrolled,namelled,plored,efeated,redited,ntrolled,nfined,pleted,llided,lcined,eathed,ibuted,lloted,dhered,cceded¦3ad:sled¦2aw:drew¦2ot:hot¦2ke:made¦2ow:hrew,grew¦2ose:hose¦2d:ilt¦2in:egan¦1un:ran¦1ink:hought¦1ick:tuck¦1ike:ruck¦1eak:poke,nuck¦1it:pat¦1o:did¦1ow:new¦1ake:woke¦go:went','smooth','𝔧','UniformGraphDistribution','growRight','c_int32_t','XML','Ramp','matrix','∓','facebook_dialog','^(#Country|#Region)','_n_','network_config_disable_reliable_udp','Ћ','bMarks','TestReport','barrier','cause-cuz','asset_tiles','IsSelfIntersecting','Protected','ManifestMaxVersionTested','Conjugate','DiscreteInputOutputModel','TotalWidth','StringContainsQ','bezierDetail','#NumberRange','bool\x20cdouble\x20cent\x20cfloat\x20char\x20creal\x20dchar\x20delegate\x20double\x20dstring\x20float\x20function\x20idouble\x20ifloat\x20ireal\x20long\x20real\x20short\x20string\x20ubyte\x20ucent\x20uint\x20ulong\x20ushort\x20wchar\x20wstring','\x20SocratiQ\x20with\x20tinyML\x20
\x20',Sn.renderer.rules.ordered_list_open=()=>'
',Sn.renderer.rules.list_item_open=()=>'
',s.block=!0,s.map=[t,e.line],s.markup=o,!0})),Sn.use(fn,"spoiler",{validate:function(e){return e.trim().match(/^spoiler\s+(.*)$/)},render:function(e,t){const n=e[t].info.trim().match(/^spoiler\s+(.*)$/);return 1===e[t].nesting?""+Sn.utils.escapeHtml(n[1])+"
\n":"{const n=this.getDatasetMeta(e);if(!n)throw new Error("No dataset found at index "+e);return{datasetIndex:e,element:n.data[t],index:t}}));!Ti(n,t)&&(this._active=n,this._lastEvent=null,this._updateHoverStyles(n,t))}notifyPlugins(e,t,n){return this._plugins.notify(this,e,t,n)}isPluginEnabled(e){return 1===this._plugins._cache.filter((t=>t.plugin.id===e)).length}_updateHoverStyles(e,t,n){const r=this.options.hover,i=(e,t)=>e.filter((e=>!t.some((t=>e.datasetIndex===t.datasetIndex&&e.index===t.index)))),a=i(t,e),o=n?e:i(e,t);a.length&&this.updateHoverStyle(a,r.mode,!1),o.length&&r.mode&&this.updateHoverStyle(o,r.mode,!0)}_eventHandler(e,t){const n={event:e,replay:t,cancelable:!0,inChartArea:this.isPointInArea(e)},r=t=>(t.options.events||this.options.events).includes(e.native.type);if(!1===this.notifyPlugins("beforeEvent",n,r))return;const i=this._handleEvent(e,t,n.inChartArea);return n.cancelable=!1,this.notifyPlugins("afterEvent",n,r),(i||n.changed)&&this.render(),this}_handleEvent(e,t,n){const{_active:r=[],options:i}=this,a=t,o=this._getActiveElements(e,r,n,a),s=function(e){return"mouseup"===e.type||"click"===e.type||"contextmenu"===e.type}(e),l=function(e,t,n,r){return n&&"mouseout"!==e.type?r?t:e:null}(e,this._lastEvent,n,s);n&&(this._lastEvent=null,yi(i.onHover,[e,o,this],this),s&&yi(i.onClick,[e,o,this],this));const c=!Ti(o,r);return(c||t)&&(this._active=o,this._updateHoverStyles(o,r,t)),this._lastEvent=l,c}_getActiveElements(e,t,n,r){if("mouseout"===e.type)return[];if(!n)return t;const i=this.options.hover;return this.getElementsAtEventForMode(e,i.mode,i,r)}}function Ul(){return vi(Bl.instances,(e=>e._plugins.invalidate()))}function Gl(e,t,n,r){return{x:n+e*Math.cos(t),y:r+e*Math.sin(t)}}function zl(e,t,n,r,i,a){const{x:o,y:s,startAngle:l,pixelMargin:c,innerRadius:d}=t,u=Math.max(t.outerRadius+r+n-c,0),p=d>0?d+r+n+c:0;let m=0;const g=i-l;if(r){const e=((d>0?d-r:0)+(u>0?u-r:0))/2;m=(g-(0!==e?g*e/(e+r):g))/2}const _=(g-Math.max(.001,g*u-n/Li)/u)/2,h=l+_+m,f=i-_-m,{outerStart:b,outerEnd:E,innerStart:S,innerEnd:y}=function(e,t,n,r){const i=Qa(e.options.borderRadius,["outerStart","outerEnd","innerStart","innerEnd"]),a=(n-t)/2,o=Math.min(a,r*t/2),s=e=>{const t=(n-Math.min(a,e))*r/2;return na(e,0,Math.min(a,t))};return{outerStart:s(i.outerStart),outerEnd:s(i.outerEnd),innerStart:na(i.innerStart,0,o),innerEnd:na(i.innerEnd,0,o)}}(t,p,u,f-h),v=u-b,T=u-E,C=h+b/v,N=f-E/T,A=p+S,x=p+y,O=h+S/A,w=f-y/x;if(e.beginPath(),a){const t=(C+N)/2;if(e.arc(o,s,u,C,t),e.arc(o,s,u,t,N),E>0){const t=Gl(T,N,o,s);e.arc(t.x,t.y,E,N,f+zi)}const n=Gl(x,f,o,s);if(e.lineTo(n.x,n.y),y>0){const t=Gl(x,w,o,s);e.arc(t.x,t.y,y,f+zi,w+Math.PI)}const r=(f-y/p+(h+S/p))/2;if(e.arc(o,s,p,f-y/p,r,!0),e.arc(o,s,p,r,h+S/p,!0),S>0){const t=Gl(A,O,o,s);e.arc(t.x,t.y,S,O+Math.PI,h-zi)}const i=Gl(v,h,o,s);if(e.lineTo(i.x,i.y),b>0){const t=Gl(v,C,o,s);e.arc(t.x,t.y,b,h-zi,C)}}else{e.moveTo(o,s);const t=Math.cos(C)*u+o,n=Math.sin(C)*u+s;e.lineTo(t,n);const r=Math.cos(N)*u+o,i=Math.sin(N)*u+s;e.lineTo(r,i)}e.closePath()}function Hl(e,t,n=t){e.lineCap=Ei(n.borderCapStyle,t.borderCapStyle),e.setLineDash(Ei(n.borderDash,t.borderDash)),e.lineDashOffset=Ei(n.borderDashOffset,t.borderDashOffset),e.lineJoin=Ei(n.borderJoinStyle,t.borderJoinStyle),e.lineWidth=Ei(n.borderWidth,t.borderWidth),e.strokeStyle=Ei(n.borderColor,t.borderColor)}function Vl(e,t,n){e.lineTo(n.x,n.y)}function ql(e,t,n={}){const r=e.length,{start:i=0,end:a=r-1}=n,{start:o,end:s}=t,l=Math.max(i,o),c=Math.min(a,s),d=ie.filter(t,r,i,n)))),e.itemSort&&(l=l.sort(((t,r)=>e.itemSort(t,r,n)))),vi(l,(t=>{const n=_c(e.callbacks,t);r.push(fc(n,"labelColor",this,t)),i.push(fc(n,"labelPointStyle",this,t)),a.push(fc(n,"labelTextColor",this,t))})),this.labelColors=r,this.labelPointStyles=i,this.labelTextColors=a,this.dataPoints=l,l}update(e,t){const n=this.options.setContext(this.getContext()),r=this._active;let i,a=[];if(r.length){const e=ac[n.position].call(this,r,this._eventPosition);a=this._createItems(n),this.title=this.getTitle(a,n),this.beforeBody=this.getBeforeBody(a,n),this.body=this.getBody(a,n),this.afterBody=this.getAfterBody(a,n),this.footer=this.getFooter(a,n);const t=this._size=cc(this,n),o=Object.assign({},e,t),s=uc(this.chart,n,o),l=pc(n,o,s,this.chart);this.xAlign=s.xAlign,this.yAlign=s.yAlign,i={opacity:1,x:l.x,y:l.y,width:t.width,height:t.height,caretX:e.x,caretY:e.y}}else 0!==this.opacity&&(i={opacity:0});this._tooltipItems=a,this.$context=void 0,i&&this._resolveAnimations().update(this,i),e&&n.external&&n.external.call(this,{chart:this.chart,tooltip:this,replay:t})}drawCaret(e,t,n,r){const i=this.getCaretPosition(e,n,r);t.lineTo(i.x1,i.y1),t.lineTo(i.x2,i.y2),t.lineTo(i.x3,i.y3)}getCaretPosition(e,t,n){const{xAlign:r,yAlign:i}=this,{caretSize:a,cornerRadius:o}=n,{topLeft:s,topRight:l,bottomLeft:c,bottomRight:d}=Xa(o),{x:u,y:p}=e,{width:m,height:g}=t;let _,h,f,b,E,S;return"center"===i?(E=p+g/2,"left"===r?(_=u,h=_-a,b=E+a,S=E-a):(_=u+m,h=_+a,b=E-a,S=E+a),f=_):(h="left"===r?u+Math.max(s,c)+a:"right"===r?u+m-Math.max(l,d)-a:this.caretX,"top"===i?(b=p,E=b-a,_=h-a,f=h+a):(b=p+g,E=b+a,_=h+a,f=h-a),S=b),{x1:_,x2:h,x3:f,y1:b,y2:E,y3:S}}drawTitle(e,t,n){const r=this.title,i=r.length;let a,o,s;if(i){const l=Bo(n.rtl,this.x,this.width);for(e.x=mc(this,n.titleAlign,n),t.textAlign=l.textAlign(n.titleAlign),t.textBaseline="middle",a=eo(n.titleFont),o=n.titleSpacing,t.fillStyle=n.titleColor,t.font=a.string,s=0;s0!==e))?(e.beginPath(),e.fillStyle=i.multiKeyBackground,Ya(e,{x:t,y:m,w:l,h:s,radius:o}),e.fill(),e.stroke(),e.fillStyle=a.backgroundColor,e.beginPath(),Ya(e,{x:n,y:m+1,w:l-2,h:s-2,radius:o}),e.fill()):(e.fillStyle=i.multiKeyBackground,e.fillRect(t,m,l,s),e.strokeRect(t,m,l,s),e.fillStyle=a.backgroundColor,e.fillRect(n,m+1,l-2,s-2))}e.fillStyle=this.labelTextColors[n]}drawBody(e,t,n){const{body:r}=this,{bodySpacing:i,bodyAlign:a,displayColors:o,boxHeight:s,boxWidth:l,boxPadding:c}=n,d=eo(n.bodyFont);let u=d.lineHeight,p=0;const m=Bo(n.rtl,this.x,this.width),g=function(n){t.fillText(n,m.x(e.x+p),e.y+u/2),e.y+=u+i},_=m.textAlign(a);let h,f,b,E,S,y,v;for(t.textAlign=a,t.textBaseline="middle",t.font=d.string,e.x=mc(this,_,n),t.fillStyle=n.bodyColor,vi(this.beforeBody,g),p=o&&"right"!==_?"center"===a?l/2+c:l+2+c:0,E=0,y=r.length;E
\n\n",n}const Jy="https://api.semanticscholar.org/graph/v1/paper/search",ev="43dKLfcWbP3K1gE6e4TPe1VSLX2OWA9b3W1NAn8k";function tv(e){if(!e)return"";const t=e.match(/[^\.!\?]+[\.!\?]+/g);return t&&0!==t.length?t.slice(0,2).join(" "):""}const nv='